hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a5843ff4e8dfc97c59826299582dc31013c7a62
| 732
|
py
|
Python
|
pycroft/model/webstorage.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 18
|
2016-04-20T19:00:56.000Z
|
2021-12-19T16:43:57.000Z
|
pycroft/model/webstorage.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 461
|
2016-07-20T00:42:59.000Z
|
2022-03-25T17:03:07.000Z
|
pycroft/model/webstorage.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 15
|
2016-07-15T18:46:43.000Z
|
2021-03-17T20:08:39.000Z
|
# Copyright (c) 2017 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from pycroft.model.base import IntegerIdModel
from sqlalchemy import Column, func, LargeBinary
from pycroft.model.session import session
from pycroft.model.types import DateTimeTz
class WebStorage(IntegerIdModel):
data = Column(LargeBinary, nullable=False)
expiry = Column(DateTimeTz, nullable=False)
@staticmethod
def auto_expire():
"""Delete all expired items from the database"""
WebStorage.q.filter(WebStorage.expiry <= func.current_timestamp()).delete(False)
session.commit()
| 38.526316
| 88
| 0.752732
|
75638ed0cf8f842aaa865081fd26d8dc0226adb1
| 130
|
py
|
Python
|
experiments/__init__.py
|
progwriter/mininet-helper
|
7237fa963f04ceb79e602a5fa3bb1ebdc1d83648
|
[
"MIT"
] | 1
|
2019-04-17T21:02:11.000Z
|
2019-04-17T21:02:11.000Z
|
experiments/__init__.py
|
progwriter/mininet-helper
|
7237fa963f04ceb79e602a5fa3bb1ebdc1d83648
|
[
"MIT"
] | null | null | null |
experiments/__init__.py
|
progwriter/mininet-helper
|
7237fa963f04ceb79e602a5fa3bb1ebdc1d83648
|
[
"MIT"
] | null | null | null |
from .inject_ditg import *
from .test2 import *
# TODO: scan and import functions from all files in this package automatically
| 18.571429
| 78
| 0.769231
|
b6688495df55b23ad2e1fc13f1fc2a75ddf81a8f
| 59,125
|
py
|
Python
|
tests/providers/github/test_provider.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | 1
|
2019-05-08T02:32:17.000Z
|
2019-05-08T02:32:17.000Z
|
tests/providers/github/test_provider.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | null | null | null |
tests/providers/github/test_provider.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | null | null | null |
import io
import os
import copy
import json
import base64
import hashlib
from http import client
import furl
import pytest
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.providers.github import GitHubProvider
from waterbutler.providers.github.path import GitHubPath
from waterbutler.providers.github.metadata import (GitHubRevision,
GitHubFileTreeMetadata,
GitHubFolderTreeMetadata,
GitHubFileContentMetadata,
GitHubFolderContentMetadata)
from waterbutler.providers.github import settings as github_settings
from waterbutler.providers.github.exceptions import GitHubUnsupportedRepoError
from tests.providers.github.fixtures import (crud_fixtures,
revision_fixtures,
provider_fixtures)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def other_auth():
return {
'name': 'notcat',
'email': 'notcat@notcat.com',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def other_credentials():
return {'token': 'i\'ll have you know that I don\'t take naps. I was just resting my eyes'}
@pytest.fixture
def settings():
return {
'owner': 'cat',
'repo': 'food',
}
@pytest.fixture
def other_settings():
return {
'owner': 'notcat',
'repo': 'might be food',
}
@pytest.fixture
def file_content():
return b'hungry'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def provider(auth, credentials, settings, provider_fixtures):
provider = GitHubProvider(auth, credentials, settings)
provider._repo = provider_fixtures['repo_metadata']
provider.default_branch = provider_fixtures['repo_metadata']['default_branch']
return provider
@pytest.fixture
def other_provider(other_auth, other_credentials, other_settings, provider_fixtures):
provider = GitHubProvider(other_auth, other_credentials, other_settings)
provider._repo = provider_fixtures['repo_metadata']
provider.default_branch = provider_fixtures['repo_metadata']['default_branch']
return provider
class TestHelpers:
def test_build_repo_url(self, provider, settings):
expected = provider.build_url('repos', settings['owner'], settings['repo'], 'contents')
assert provider.build_repo_url('contents') == expected
def test_committer(self, auth, provider):
expected = {
'name': auth['name'],
'email': auth['email'],
}
assert provider.committer == expected
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, provider_fixtures):
branch_url = provider.build_repo_url('branches', provider.default_branch)
tree_url = provider.build_repo_url(
'git', 'trees',
provider_fixtures['branch_metadata']['commit']['commit']['tree']['sha'],
recursive=1
)
aiohttpretty.register_json_uri('GET', branch_url, body=provider_fixtures['branch_metadata'])
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
blob_path = 'file.txt'
result = await provider.validate_v1_path('/' + blob_path)
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + blob_path + '/')
expected = GitHubPath('/' + blob_path, _ids=[(provider.default_branch, '')])
assert exc.value.code == client.NOT_FOUND
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_root(self, provider):
path = '/'
result = await provider.validate_v1_path(path, branch=provider.default_branch)
no_branch_result = await provider.validate_v1_path(path)
expected = GitHubPath(path, _ids=[(provider.default_branch, '')])
assert result == expected
assert expected == no_branch_result
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, provider_fixtures):
branch_url = provider.build_repo_url('branches', provider.default_branch)
tree_url = provider.build_repo_url(
'git', 'trees',
provider_fixtures['branch_metadata']['commit']['commit']['tree']['sha'],
recursive=1
)
aiohttpretty.register_json_uri(
'GET', branch_url, body=provider_fixtures['branch_metadata']
)
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
tree_path = 'level1'
expected = GitHubPath(
'/' + tree_path + '/', _ids=[(provider.default_branch, ''),
(provider.default_branch, None)]
)
result = await provider.validate_v1_path('/' + tree_path + '/')
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + tree_path)
assert exc.value.code == client.NOT_FOUND
assert result == expected
assert result.extra == expected.extra
@pytest.mark.asyncio
async def test_reject_multiargs(self, provider):
with pytest.raises(exceptions.InvalidParameters) as exc:
await provider.validate_v1_path('/foo', ref=['bar', 'baz'])
assert exc.value.code == client.BAD_REQUEST
with pytest.raises(exceptions.InvalidParameters) as exc:
await provider.validate_path('/foo', ref=['bar', 'baz'])
assert exc.value.code == client.BAD_REQUEST
@pytest.mark.asyncio
async def test_validate_path(self, provider):
path = await provider.validate_path('/this/is/my/path')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, None)
assert path.parts[0].identifier == (provider.default_branch, None)
@pytest.mark.asyncio
async def test_validate_path_passes_branch(self, provider):
path = await provider.validate_path('/this/is/my/path', branch='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_ref(self, provider):
path = await provider.validate_path('/this/is/my/path', ref='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_file_sha(self, provider):
path = await provider.validate_path('/this/is/my/path', fileSha='Thisisasha')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, 'Thisisasha')
assert path.parts[0].identifier == (provider.default_branch, None)
@pytest.mark.asyncio
async def test_revalidate_path(self, provider):
path = '/'
child_path = 'grass.txt'
github_path = GitHubPath(path, _ids=[(provider.default_branch, '')])
result = await provider.revalidate_path(github_path, child_path)
assert result.path == child_path
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path(self, provider, provider_fixtures):
ref = hashlib.sha1().hexdigest()
file_sha = provider_fixtures['repo_tree_metadata_root']['tree'][0]['sha']
path = GitHubPath(
'/file.txt', _ids=[(provider.default_branch, ''), (provider.default_branch, '')]
)
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha=path.identifier[0]
)
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_ref_branch(self, provider, provider_fixtures):
ref = hashlib.sha1().hexdigest()
file_sha = provider_fixtures['repo_tree_metadata_root']['tree'][0]['sha']
path = GitHubPath('/file.txt', _ids=[(provider.default_branch, ''), ('other_branch', '')])
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha=path.identifier[0]
)
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_revision(self, provider, provider_fixtures):
ref = hashlib.sha1().hexdigest()
file_sha = provider_fixtures['repo_tree_metadata_root']['tree'][0]['sha']
path = GitHubPath('/file.txt', _ids=[(provider.default_branch, ''), ('other_branch', '')])
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha='Just a test'
)
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path, revision='Just a test')
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_create(self, provider, provider_fixtures, crud_fixtures, file_content,
file_stream):
message = 'so hungry'
item = provider_fixtures['upload_response']
path = GitHubPath(
'/' + item['content']['path'],
_ids=[(provider.default_branch, ''), ('master', ''), ('master', '')]
)
commit_url = provider.build_repo_url('commits', path=path.path, sha=path.branch_ref)
sha_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
blob_url = provider.build_repo_url('git', 'blobs')
create_tree_url = provider.build_repo_url('git', 'trees')
create_commit_url = provider.build_repo_url('git', 'commits')
aiohttpretty.register_json_uri('GET', commit_url, status=404)
aiohttpretty.register_json_uri('GET', sha_url, body=crud_fixtures['latest_sha_metadata'])
aiohttpretty.register_json_uri(
'POST', blob_url, body=crud_fixtures['blob_data'], status=201
)
aiohttpretty.register_json_uri(
'POST', sha_url, body=crud_fixtures['latest_sha_metadata'], status=200
)
aiohttpretty.register_json_uri(
'POST', create_tree_url,
body=provider_fixtures['repo_tree_metadata_root'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_commit_url, status=201,
body=provider_fixtures['new_head_commit_metadata']
)
result = await provider.upload(file_stream, path, message)
expected = GitHubFileTreeMetadata({
'path': path.path,
'sha': crud_fixtures['blob_data']['sha'],
'size': file_stream.size,
}, commit=provider_fixtures['new_head_commit_metadata'], ref=path.branch_ref), True
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_update(self, provider, provider_fixtures, crud_fixtures, file_content,
file_stream):
message = 'so hungry'
path = GitHubPath('/file.txt', _ids=[(provider.default_branch, ''), ('master', '')])
tree_meta = provider_fixtures['repo_tree_metadata_root']
commit_meta = crud_fixtures['all_commits_metadata']
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', commit_meta[0]['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
commit_url = provider.build_repo_url('commits', path=path.path, sha=path.branch_ref)
sha_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
blob_url = provider.build_repo_url('git', 'blobs')
create_tree_url = provider.build_repo_url('git', 'trees')
blob_tree_url = provider.build_repo_url(
'git', 'trees') + '/{}:?recursive=99999'.format(path.branch_ref)
aiohttpretty.register_json_uri(
'GET', commit_url, body=crud_fixtures['all_commits_metadata'], status=200
)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
aiohttpretty.register_json_uri('GET', sha_url, body=crud_fixtures['latest_sha_metadata'])
aiohttpretty.register_json_uri(
'POST', blob_url, body=crud_fixtures['blob_data'], status=201
)
aiohttpretty.register_json_uri(
'GET', blob_tree_url, body=crud_fixtures['crud_repo_tree_metadata_root']
)
aiohttpretty.register_json_uri(
'POST', create_tree_url,
body=crud_fixtures['crud_repo_tree_metadata_root'], status=201
)
result = await provider.upload(file_stream, path, message)
expected = GitHubFileTreeMetadata({
'path': path.path,
'sha': crud_fixtures['blob_data']['sha'],
'size': file_stream.size,
}, ref=path.branch_ref), False
assert result[0].serialized() == expected[0].serialized()
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_empty_repo(self, provider, provider_fixtures,
crud_fixtures, file_content, file_stream):
message = 'so hungry'
item = provider_fixtures['upload_response']
path = GitHubPath(
'/' + item['content']['path'],
_ids=[(provider.default_branch, ''), ('master', ''), ('master', '')]
)
commit_url = provider.build_repo_url('commits', path=path.path, sha=path.branch_ref)
sha_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
blob_url = provider.build_repo_url('git', 'blobs')
create_tree_url = provider.build_repo_url('git', 'trees')
create_commit_url = provider.build_repo_url('git', 'commits')
git_keep_url = provider.build_repo_url('contents', '.gitkeep')
aiohttpretty.register_json_uri(
'GET', commit_url,
body={
"message": "Git Repository is empty.",
"documentation_url": "https://developer.github.com/v3"
},
status=409
)
aiohttpretty.register_json_uri('GET', sha_url, body=crud_fixtures['latest_sha_metadata'])
aiohttpretty.register_json_uri(
'POST', blob_url, body=crud_fixtures['blob_data'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url,
body=provider_fixtures['repo_tree_metadata_root'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_commit_url,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', sha_url, body=crud_fixtures['latest_sha_metadata'], status=200
)
aiohttpretty.register_json_uri(
'PUT', git_keep_url, body=provider_fixtures['branch_metadata'], status=201
)
result = await provider.upload(file_stream, path, message)
expected = GitHubFileTreeMetadata({
'path': path.path,
'sha': crud_fixtures['blob_data']['sha'],
'size': file_stream.size,
}, commit=provider_fixtures['new_head_commit_metadata'], ref=path.branch_ref), True
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_checksum_mismatch(self, provider, provider_fixtures,
crud_fixtures, file_content, file_stream):
item = provider_fixtures['upload_response']
path = GitHubPath(
'/' + item['content']['path'],
_ids=[(provider.default_branch, ''), ('master', ''), ('master', '')]
)
commit_url = provider.build_repo_url('commits', path=path.path, sha=path.branch_ref)
sha_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
blob_url = provider.build_repo_url('git', 'blobs')
aiohttpretty.register_json_uri('GET', commit_url, status=404)
aiohttpretty.register_json_uri('GET', sha_url, body=crud_fixtures['latest_sha_metadata'])
aiohttpretty.register_json_uri(
'POST', blob_url, body=crud_fixtures['checksum_mismatch_blob_data'], status=201
)
with pytest.raises(exceptions.UploadChecksumMismatchError) as exc:
await provider.upload(file_stream, path)
assert aiohttpretty.has_call(method='GET', uri=commit_url)
assert aiohttpretty.has_call(method='GET', uri=sha_url)
assert aiohttpretty.has_call(method='POST', uri=blob_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_root_no_confirm(self, provider, provider_fixtures):
path = GitHubPath('/', _ids=[('master', '')])
with pytest.raises(exceptions.DeleteError) as e:
await provider.delete(path)
assert e.value.code == 400
assert e.value.message == 'confirm_delete=1 is required for deleting root provider folder'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_root(self, provider, crud_fixtures):
path = GitHubPath('/', _ids=[('master', '')])
branch_url = provider.build_repo_url('branches', 'master')
commit_url = provider.build_repo_url('git', 'commits')
patch_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
aiohttpretty.register_json_uri(
'GET', branch_url, body=crud_fixtures['deleted_branch_metadata']
)
aiohttpretty.register_json_uri(
'POST', commit_url, body=crud_fixtures['deleted_commit_metadata'], status=201
)
aiohttpretty.register_json_uri('PATCH', patch_url)
await provider.delete(path, confirm_delete=1)
assert aiohttpretty.has_call(method='PATCH', uri=patch_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_file_with_sha(self, provider, provider_fixtures):
item = provider_fixtures['upload_response']
sha = item['content']['sha']
path = GitHubPath('/file.txt', _ids=[('master', sha), ('master', sha)])
branch = 'master'
message = 'deleted'
url = provider.build_repo_url('contents', path.path)
aiohttpretty.register_json_uri('DELETE', url)
await provider.delete(path, sha, message, branch=branch)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_file_no_sha(self, provider, provider_fixtures, crud_fixtures):
item = provider_fixtures['upload_response']
sha = item['content']['sha']
path = GitHubPath('/file.txt', _ids=[('master', ''), ('master', '')])
branch = 'master'
message = 'deleted'
url = provider.build_repo_url('contents', path.path)
commit_url = provider.build_repo_url('commits', path=path.path, sha=path.branch_ref)
tree_url = furl.furl(provider.build_repo_url(
'git', 'trees', crud_fixtures['all_commits_metadata'][0]['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('DELETE', url)
aiohttpretty.register_json_uri(
'GET', commit_url, body=crud_fixtures['all_commits_metadata']
)
await provider.delete(path, sha, message, branch=branch)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_folder(self, provider, provider_fixtures, crud_fixtures):
sha = crud_fixtures['deleted_tree_metadata']['tree'][2]['sha']
path = GitHubPath('/deletedfolder/', _ids=[('master', sha), ('master', sha)])
branch = 'master'
message = 'deleted'
metadata_url = provider.build_repo_url('contents', ref='master')
aiohttpretty.register_json_uri('GET', metadata_url,
body=provider_fixtures['content_repo_metadata_root'])
branch_url = provider.build_repo_url('branches', 'master')
tree_url = furl.furl(
provider.build_repo_url(
'git', 'trees',
crud_fixtures['deleted_branch_metadata']['commit']['commit']['tree']['sha']
)
)
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
patch_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
aiohttpretty.register_json_uri(
'GET', branch_url, body=crud_fixtures['deleted_branch_metadata']
)
aiohttpretty.register_json_uri('GET', tree_url, body=crud_fixtures['deleted_tree_metadata'])
aiohttpretty.register_json_uri(
'POST', create_tree_url, body=crud_fixtures['deleted_tree_metadata_2'], status=201
)
aiohttpretty.register_json_uri(
'POST', commit_url, body=crud_fixtures['deleted_commit_metadata'], status=201
)
aiohttpretty.register_json_uri('PATCH', patch_url)
await provider.delete(path, sha, message, branch=branch)
assert aiohttpretty.has_call(method='PATCH', uri=patch_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_folder_error_case(self, provider, provider_fixtures, crud_fixtures):
sha = crud_fixtures['deleted_tree_metadata']['tree'][2]['sha']
path = GitHubPath('/deletedfolder/', _ids=[('master', sha), ('master', sha)])
branch = 'master'
message = 'deleted'
metadata_url = provider.build_repo_url('contents', ref='master')
aiohttpretty.register_json_uri('GET', metadata_url,
body=provider_fixtures['content_repo_metadata_root'])
branch_url = provider.build_repo_url('branches', 'master')
tree_url = furl.furl(provider.build_repo_url(
'git', 'trees',
crud_fixtures['deleted_branch_metadata']['commit']['commit']['tree']['sha'])
)
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
patch_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
aiohttpretty.register_json_uri(
'GET', branch_url, body=crud_fixtures['deleted_branch_metadata']
)
aiohttpretty.register_json_uri(
'GET', tree_url, body=crud_fixtures['deleted_tree_metadata_2']
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, body=crud_fixtures['deleted_tree_metadata_2'], status=201
)
with pytest.raises(exceptions.NotFoundError) as e:
await provider.delete(path, sha, message, branch=branch)
assert e.value.code == 404
assert e.value.message == "Could not retrieve file or directory /deletedfolder/"
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_folder_last_item_in_repo(self, provider, provider_fixtures,
crud_fixtures):
sha = crud_fixtures['deleted_tree_metadata']['tree'][2]['sha']
path = GitHubPath('/deletedfolder/', _ids=[('master', sha), ('master', sha)])
branch = 'master'
message = 'deleted'
metadata_url = provider.build_repo_url('contents', ref='master')
aiohttpretty.register_json_uri(
'GET', metadata_url, body=provider_fixtures['content_repo_metadata_root_one_folder']
)
branch_url = provider.build_repo_url('branches', 'master')
aiohttpretty.register_json_uri(
'GET', branch_url, body=crud_fixtures['deleted_branch_metadata']
)
commit_url = provider.build_repo_url('git', 'commits')
aiohttpretty.register_json_uri(
'POST', commit_url, body=crud_fixtures['deleted_commit_metadata'], status=201
)
patch_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
aiohttpretty.register_json_uri('PATCH', patch_url)
await provider.delete(path, sha, message, branch=branch)
assert aiohttpretty.has_call(method='PATCH', uri=patch_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_subfolder(self, provider, crud_fixtures):
sha = crud_fixtures['deleted_tree_metadata']['tree'][2]['sha']
path = GitHubPath(
'/folder1/deletedfolder/', _ids=[('master', sha), ('master', sha), ('master', sha)]
)
branch = 'master'
message = 'deleted'
branch_url = provider.build_repo_url('branches', 'master')
branch_metadata = crud_fixtures['deleted_subfolder_branch_metadata']
aiohttpretty.register_json_uri('GET', branch_url, body=branch_metadata)
tree_url = furl.furl(
provider.build_repo_url(
'git', 'trees', branch_metadata['commit']['commit']['tree']['sha']
)
)
main_tree_metadata = crud_fixtures['deleted_subfolder_main_tree_metadata']
aiohttpretty.register_json_uri('GET', tree_url, body=main_tree_metadata)
idx_tree_url = furl.furl(
provider.build_repo_url('git', 'trees', main_tree_metadata['tree'][3]['sha'])
)
aiohttpretty.register_json_uri(
'GET', idx_tree_url, body=crud_fixtures['deleted_subfolder_idx_tree_metadata']
)
create_tree_url = provider.build_repo_url('git', 'trees')
aiohttpretty.register_json_uri('POST', create_tree_url, **{
"responses": [
{'body': json.dumps(
crud_fixtures['deleted_subfolder_tree_data_1']).encode('utf-8'), 'status': 201},
{'body': json.dumps(
crud_fixtures['deleted_subfolder_tree_data_2']).encode('utf-8'), 'status': 201},
]})
commit_url = provider.build_repo_url('git', 'commits')
aiohttpretty.register_json_uri(
'POST', commit_url, body=crud_fixtures['deleted_subfolder_commit_metadata'], status=201
)
patch_url = provider.build_repo_url('git', 'refs', 'heads', path.branch_ref)
aiohttpretty.register_json_uri('PATCH', patch_url)
await provider.delete(path, sha, message, branch=branch)
assert aiohttpretty.has_call(method='PATCH', uri=patch_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_subfolder_stop_iteration_error(self, provider, crud_fixtures):
sha = crud_fixtures['deleted_tree_metadata']['tree'][2]['sha']
path = GitHubPath(
'/folder1/deletedfolder/', _ids=[('master', sha), ('master', sha), ('master', sha)]
)
branch = 'master'
message = 'deleted'
branch_url = provider.build_repo_url('branches', 'master')
branch_metadata = crud_fixtures['deleted_subfolder_branch_metadata']
aiohttpretty.register_json_uri('GET', branch_url, body=branch_metadata)
tree_url = furl.furl(
provider.build_repo_url(
'git', 'trees', branch_metadata['commit']['commit']['tree']['sha']
)
)
aiohttpretty.register_json_uri(
'GET', tree_url, body=crud_fixtures['deleted_subfolder_bad_tree_metadata']
)
with pytest.raises(exceptions.MetadataError) as e:
await provider.delete(path, sha, message, branch=branch)
assert e.value.code == 404
assert e.value.message == 'Could not delete folder \'{0}\''.format(path)
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, provider_fixtures):
ref = hashlib.sha1().hexdigest()
path = GitHubPath('/file.txt', _ids=[(provider.default_branch, ''), ('other_branch', '')])
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha=path.identifier[0]
)
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('GET', commit_url, body=[{
'commit': {
'tree': {'sha': ref},
'author': {'date': '1970-01-02T03:04:05Z'}
},
}])
result = await provider.metadata(path)
item = provider_fixtures['repo_tree_metadata_root']['tree'][0]
web_view = provider._web_view(path=path)
assert result == GitHubFileTreeMetadata(item, web_view=web_view, commit={
'tree': {'sha': ref}, 'author': {'date': '1970-01-02T03:04:05Z'}
}, ref=path.identifier[0])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_error(self, provider, provider_fixtures):
path = GitHubPath(
'/file.txt', _ids=[(provider.default_branch, ''), (provider.default_branch, '')]
)
tree_url = provider.build_repo_url('git', 'trees', path.branch_ref, recursive=1)
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha=path.identifier[0]
)
aiohttpretty.register_json_uri('GET', tree_url, body={'tree': [], 'truncated': False})
aiohttpretty.register_json_uri('GET', commit_url, body=[{
'commit': {
'tree': {'sha': path.branch_ref},
'author': {'date': '1970-01-02T03:04:05Z'}
},
}])
with pytest.raises(exceptions.NotFoundError) as e:
await provider.metadata(path)
assert e.value.code == 404
assert e.value.message == 'Could not retrieve file or directory {0}'.format(str(path))
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_doesnt_exist(self, provider, provider_fixtures):
ref = hashlib.sha1().hexdigest()
path = GitHubPath('/file.txt', _ids=[(provider.default_branch, ''), ('other_branch', '')])
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url(
'commits', path=path.path.lstrip('/'), sha=path.identifier[0]
)
aiohttpretty.register_json_uri(
'GET', tree_url, body=provider_fixtures['repo_tree_metadata_root']
)
aiohttpretty.register_json_uri('GET', commit_url, body=[])
with pytest.raises(exceptions.NotFoundError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root(self, provider, provider_fixtures):
path = GitHubPath('/', _ids=[(provider.default_branch, '')])
url = provider.build_repo_url('contents', path.path, ref=provider.default_branch)
aiohttpretty.register_json_uri(
'GET', url, body=provider_fixtures['content_repo_metadata_root']
)
result = await provider.metadata(path)
ret = []
for item in provider_fixtures['content_repo_metadata_root']:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item, ref=provider.default_branch))
else:
ret.append(
GitHubFileContentMetadata(
item, web_view=item['html_url'], ref=provider.default_branch
)
)
assert result == ret
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_path_from_metadata(self, provider, provider_fixtures):
path = GitHubPath('/', _ids=[(provider.default_branch, '')])
item = provider_fixtures['content_repo_metadata_root'][0]
metadata = GitHubFileContentMetadata(
item, web_view=item['html_url'], ref=provider.default_branch
)
result = provider.path_from_metadata(path, metadata)
assert result.path == item['path']
# note, more asserst here?
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_fetch_error(self, provider, provider_fixtures):
path = GitHubPath(
'/test/', _ids=[(provider.default_branch, ''), (provider.default_branch, '')]
)
url = furl.furl(provider.build_repo_url('contents', '/test/'))
url.args.update({'ref': path.branch_ref})
message = 'This repository is not empty.'
aiohttpretty.register_json_uri('GET', url, body={
'message': message
}, status=404)
with pytest.raises(exceptions.MetadataError) as e:
await provider.metadata(path)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_error_empty_repo(self, provider, provider_fixtures):
path = GitHubPath(
'/test/', _ids=[(provider.default_branch, ''), (provider.default_branch, '')]
)
url = furl.furl(provider.build_repo_url('contents', '/test/'))
url.args.update({'ref': path.branch_ref})
message = 'This repository is empty.'
aiohttpretty.register_json_uri('GET', url, body={
'message': message
}, status=404)
result = await provider.metadata(path)
expected = []
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_error_dict_return(self, provider, provider_fixtures):
path = GitHubPath(
'/test/', _ids=[(provider.default_branch, ''), (provider.default_branch, '')]
)
url = furl.furl(provider.build_repo_url('contents', '/test/'))
url.args.update({'ref': path.branch_ref})
message = 'This repository is empty.'
aiohttpretty.register_json_uri('GET', url, body={})
with pytest.raises(exceptions.MetadataError) as e:
await provider.metadata(path)
assert e.value.code == 404
assert e.value.message == 'Could not retrieve folder "{0}"'.format(str(path))
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_revision_metadata(self, provider, revision_fixtures):
metadata = revision_fixtures['revision_metadata']
path = GitHubPath(
'/file.txt', _ids=[("master", metadata[0]['sha']), ('master', metadata[0]['sha'])]
)
url = provider.build_repo_url('commits', path=path.path, sha=path.file_sha)
aiohttpretty.register_json_uri('GET', url, body=metadata)
result = await provider.revisions(path)
expected = [
GitHubRevision(item)
for item in metadata
]
assert result == expected
class TestIntra:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_file(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root']
new_tree_meta = provider_fixtures['repo_tree_metadata_root_updated']
src_path = GitHubPath('/file.txt', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/file.txt',
_ids=[
("master", ''), (branch_meta['name'], ''), (branch_meta['name'], '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
headers = {'Content-Type': 'application/json'}
update_ref_url = provider.build_repo_url('git', 'refs', 'heads', src_path.branch_ref)
aiohttpretty.register_json_uri(
'POST', commit_url, headers=headers,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, headers=headers, body=new_tree_meta, status=201
)
aiohttpretty.register_json_uri('POST', update_ref_url)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
result = await provider.intra_copy(provider, src_path, dest_path)
blobs = [new_tree_meta['tree'][0]]
blobs[0]['path'] = dest_path.path
commit = provider_fixtures['new_head_commit_metadata']
expected = (GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), True)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_file_no_commit(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root']
src_path = GitHubPath('/file.txt', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/file.txt',
_ids=[
("master", ''), (branch_meta['name'], ''), (branch_meta['name'], '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
headers = {'Content-Type': 'application/json'}
update_ref_url = provider.build_repo_url('git', 'refs', 'heads', src_path.branch_ref)
aiohttpretty.register_json_uri(
'POST', commit_url, headers=headers,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, headers=headers, body=tree_meta, status=201
)
aiohttpretty.register_json_uri('POST', update_ref_url)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
result = await provider.intra_copy(provider, src_path, dest_path)
blobs = [tree_meta['tree'][0]]
blobs[0]['path'] = dest_path.path
commit = None
expected = (GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), True)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_and_move_folder(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root_with_folder']
new_tree_meta = provider_fixtures['repo_tree_metadata_root_with_folder_updated']
src_path = GitHubPath('/file/', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/file/',
_ids=[
("master", ''), (branch_meta['name'], ''), (branch_meta['name'], '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
headers = {'Content-Type': 'application/json'}
update_ref_url = provider.build_repo_url('git', 'refs', 'heads', src_path.branch_ref)
aiohttpretty.register_json_uri(
'POST', commit_url, headers=headers,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, headers=headers, body=new_tree_meta, status=201
)
aiohttpretty.register_json_uri('POST', update_ref_url)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
result = await provider.intra_copy(provider, src_path, dest_path)
other_result = await provider.intra_move(provider, src_path, dest_path)
blobs = new_tree_meta['tree'][:3]
provider._reparent_blobs(blobs, src_path, dest_path)
commit = provider_fixtures['new_head_commit_metadata']
expected = GitHubFolderTreeMetadata({
'path': dest_path.path.strip('/')
}, commit=commit, ref=dest_path.branch_ref)
expected.children = []
for item in blobs:
if item['path'] == dest_path.path.rstrip('/'):
continue
if item['type'] == 'tree':
expected.children.append(GitHubFolderTreeMetadata(item, ref=dest_path.branch_ref))
else:
expected.children.append(GitHubFileTreeMetadata(item, ref=dest_path.branch_ref))
assert result == (expected, True) == other_result
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_not_found_error(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root_with_folder']
src_path = GitHubPath('/filenotfound.txt', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/filenotfound.txt',
_ids=[
("master", ''), (branch_meta['name'], ''), (branch_meta['name'], '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
with pytest.raises(exceptions.NotFoundError) as e:
await provider.intra_copy(provider, src_path, dest_path)
assert e.value.code == 404
assert e.value.message == 'Could not retrieve file or directory ' + '/' + src_path.path
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_move_file(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root']
new_tree_meta = provider_fixtures['repo_tree_metadata_root_updated']
src_path = GitHubPath('/file.txt', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/file.txt',
_ids=[
("master", ''), (branch_meta['name'], ''), (branch_meta['name'], '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
headers = {'Content-Type': 'application/json'}
update_ref_url = provider.build_repo_url('git', 'refs', 'heads', src_path.branch_ref)
aiohttpretty.register_json_uri(
'POST', commit_url, headers=headers,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, headers=headers, body=new_tree_meta, status=201
)
aiohttpretty.register_json_uri('POST', update_ref_url)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
result = await provider.intra_move(provider, src_path, dest_path)
blobs = [new_tree_meta['tree'][0]]
blobs[0]['path'] = dest_path.path
commit = provider_fixtures['new_head_commit_metadata']
expected = (GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), True)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_move_file_different_branch(self, provider, provider_fixtures):
branch_meta = provider_fixtures['branch_metadata']
tree_meta = provider_fixtures['repo_tree_metadata_root']
new_tree_meta = provider_fixtures['repo_tree_metadata_root_updated']
src_path = GitHubPath('/file.txt', _ids=[("master", ''), (branch_meta['name'], '')])
dest_path = GitHubPath(
'/truefacts/file.txt',
_ids=[
("master", ''), (branch_meta['name'] + '2', ''), (branch_meta['name'] + '2', '')
])
tree_url = furl.furl(
provider.build_repo_url('git', 'trees', branch_meta['commit']['commit']['tree']['sha'])
)
tree_url.args.update({'recursive': 1})
branch_url = provider.build_repo_url('branches', branch_meta['name'])
branch_url_2 = provider.build_repo_url('branches', branch_meta['name'] + '2')
create_tree_url = provider.build_repo_url('git', 'trees')
commit_url = provider.build_repo_url('git', 'commits')
headers = {'Content-Type': 'application/json'}
update_ref_url = provider.build_repo_url('git', 'refs', 'heads', src_path.branch_ref)
update_ref_url_2 = provider.build_repo_url('git', 'refs', 'heads', dest_path.branch_ref)
aiohttpretty.register_json_uri(
'POST', commit_url, headers=headers,
body=provider_fixtures['new_head_commit_metadata'], status=201
)
aiohttpretty.register_json_uri(
'POST', create_tree_url, headers=headers, body=new_tree_meta, status=201
)
aiohttpretty.register_json_uri('POST', update_ref_url)
aiohttpretty.register_json_uri('POST', update_ref_url_2)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_meta)
aiohttpretty.register_json_uri('GET', branch_url_2, body=branch_meta)
aiohttpretty.register_json_uri('GET', tree_url, body=tree_meta)
result = await provider.intra_move(provider, src_path, dest_path)
blobs = [new_tree_meta['tree'][0]]
blobs[0]['path'] = dest_path.path
commit = provider_fixtures['new_head_commit_metadata']
expected = (GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), True)
assert result == expected
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_out(self, provider):
path = GitHubPath(
'/Imarealboy/', _ids=[(provider.default_branch, ''), ('other_branch', '')]
)
url = provider.build_repo_url('contents', path.child('.gitkeep').path)
aiohttpretty.register_uri('PUT', url, status=400)
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_be_folder(self, provider):
path = GitHubPath('/Imarealboy', _ids=[(provider.default_branch, ''), ('other_branch', '')])
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_already_exists(self, provider):
path = GitHubPath(
'/Imarealboy/', _ids=[(provider.default_branch, ''), ('other_branch', '')]
)
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'Invalid request.\n\n"sha" wasn\'t supplied.'
})
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == ('Cannot create folder "Imarealboy", because a file or folder '
'already exists with that name')
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_raises_other_422(self, provider):
path = GitHubPath(
'/Imarealboy/', _ids=[(provider.default_branch, ''), ('other_branch', '')]
)
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'github no likey'
})
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 422
assert e.value.data == {'message': 'github no likey'}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_returns_metadata(self, provider, provider_fixtures):
path = GitHubPath(
'/i/like/trains/', _ids=[(provider.default_branch, ''),
('other_branch', ''), ('other_branch', ''), ('other_branch', '')]
)
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri(
'PUT', url, status=201, body=provider_fixtures['create_folder_response']
)
metadata = await provider.create_folder(path)
assert metadata.kind == 'folder'
assert metadata.name == 'trains'
assert metadata.path == '/i/like/trains/'
class TestOperations:
def test_can_duplicate_names(self, provider):
assert provider.can_duplicate_names() is False
def test_can_intra_move(self, provider, other_provider):
assert provider.can_intra_move(other_provider) is False
assert provider.can_intra_move(provider) is True
def test_can_intra_copy(self, provider, other_provider):
assert provider.can_intra_copy(other_provider) is False
assert provider.can_intra_copy(provider) is True
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test__fetch_branch_error(self, provider):
url = provider.build_repo_url('branches', 'master')
aiohttpretty.register_json_uri('GET', url, status=404)
with pytest.raises(exceptions.NotFoundError) as e:
await provider._fetch_branch('master')
assert e.value.code == 404
assert e.value.message == 'Could not retrieve file or directory . No such branch \'master\''
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test__fetch_tree_truncated_error(self, provider):
sha = 'TotallyASha'
url = furl.furl(provider.build_repo_url('git', 'trees', sha))
aiohttpretty.register_json_uri('GET', url, body={'truncated': True})
with pytest.raises(GitHubUnsupportedRepoError) as e:
await provider._fetch_tree(sha)
assert e.value.code == 501
assert e.value.message == (
'Some folder operations on large GitHub repositories cannot be supported without'
' data loss. To carry out this operation, please perform it in a local git'
' repository, then push to the target repository on GitHub.'
)
class TestUtilities:
def test__path_exists_in_tree(self, provider, provider_fixtures):
_ids = [('master', '')]
assert provider._path_exists_in_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/alpha.txt', _ids=_ids)
)
assert provider._path_exists_in_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/beta/', _ids=_ids)
)
assert not provider._path_exists_in_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/gaw-gai.txt', _ids=_ids)
)
assert not provider._path_exists_in_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/kaw-kai/', _ids=_ids)
)
def test__remove_path_from_tree(self, provider, provider_fixtures):
_ids = [('master', '')]
simple_file_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/alpha.txt', _ids=_ids)
)
assert len(simple_file_tree) == (
len(provider_fixtures['nested_tree_metadata']['tree']) - 1
)
assert 'alpha.txt' not in [x['path'] for x in simple_file_tree]
simple_folder_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'], GitHubPath('/beta/', _ids=_ids)
)
assert len(simple_folder_tree) == 1
assert simple_folder_tree[0]['path'] == 'alpha.txt'
nested_file_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/beta/gamma.txt', _ids=_ids)
)
assert len(nested_file_tree) == (
len(provider_fixtures['nested_tree_metadata']['tree']) - 1
)
assert 'beta/gamma.txt' not in [x['path'] for x in nested_file_tree]
nested_folder_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/beta/delta/', _ids=_ids)
)
assert len(nested_folder_tree) == 3
assert len([x for x in nested_folder_tree if x['path'].startswith('beta/delta')]) == 0
missing_file_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/bet', _ids=_ids)
)
assert missing_file_tree == provider_fixtures['nested_tree_metadata']['tree']
missing_folder_tree = provider._remove_path_from_tree(
provider_fixtures['nested_tree_metadata']['tree'],
GitHubPath('/beta/gam/', _ids=_ids)
)
assert missing_file_tree == provider_fixtures['nested_tree_metadata']['tree']
def test__reparent_blobs(self, provider, provider_fixtures):
_ids = [('master', '')]
file_rename_blobs = copy.deepcopy(
[x for x in
provider_fixtures['nested_tree_metadata']['tree'] if x['path'] == 'alpha.txt']
)
provider._reparent_blobs(
file_rename_blobs, GitHubPath('/alpha.txt', _ids=_ids),
GitHubPath('/zeta.txt', _ids=_ids)
)
assert len(file_rename_blobs) == 1
assert file_rename_blobs[0]['path'] == 'zeta.txt'
folder_rename_blobs = copy.deepcopy(
[x for x in provider_fixtures['nested_tree_metadata']['tree']
if x['path'].startswith('beta')]
)
provider._reparent_blobs(
folder_rename_blobs, GitHubPath('/beta/', _ids=_ids),
GitHubPath('/theta/', _ids=_ids)
)
assert len(folder_rename_blobs) == 4 # beta/, gamma.txt, delta/, epsilon.txt
assert len(
[x for x in folder_rename_blobs if x['path'].startswith('theta/')]
) == 3 # gamma.txt, delta/, epsilon.txt
assert len([x for x in folder_rename_blobs if x['path'] == 'theta']) == 1 # theta/
def test__prune_subtrees(self, provider, provider_fixtures):
pruned_tree = provider._prune_subtrees(provider_fixtures['nested_tree_metadata']['tree'])
assert len(pruned_tree) == 3 # alpha.txt, gamma.txt, epsilon.txt
assert len([x for x in pruned_tree if x['type'] == 'tree']) == 0
| 38.593342
| 100
| 0.637294
|
c10c9e10bbece0c1392a244ba64156372883f8b6
| 1,351
|
py
|
Python
|
Fun/Rain_alphabet.py
|
kii-chan-iine/MyCode
|
be7dbd1f806a6874bad96e127f73f788c0a11849
|
[
"Artistic-2.0"
] | null | null | null |
Fun/Rain_alphabet.py
|
kii-chan-iine/MyCode
|
be7dbd1f806a6874bad96e127f73f788c0a11849
|
[
"Artistic-2.0"
] | null | null | null |
Fun/Rain_alphabet.py
|
kii-chan-iine/MyCode
|
be7dbd1f806a6874bad96e127f73f788c0a11849
|
[
"Artistic-2.0"
] | null | null | null |
import random, pygame
PANEL_width = 400
PANEL_highly = 500
FONT_PX = 15
pygame.init()
# 创建一个窗口
winSur = pygame.display.set_mode((PANEL_width, PANEL_highly))
font = pygame.font.SysFont('123.ttf', 22)
bg_suface = pygame.Surface((PANEL_width, PANEL_highly), flags=pygame.SRCALPHA)
pygame.Surface.convert(bg_suface)
bg_suface.fill(pygame.Color(0, 0, 0, 28))
winSur.fill((0, 0, 0))
letter = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', 'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'z', 'x', 'c',
'v', 'b', 'n', 'm']
texts = [
font.render(str(letter[i]), True, (0, 255, 0)) for i in range(26)
]
# 按窗口的宽度来计算可以在画板上放几列坐标并生成一个列表
column = int(PANEL_width / FONT_PX)
drops = [0 for i in range(column)]
while True:
# 从队列中获取事件
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
elif event.type == pygame.KEYDOWN:
chang = pygame.key.get_pressed()
if (chang[32]):
exit()
# 暂停给定的毫秒数
pygame.time.delay(30)
# 重新编辑图像
winSur.blit(bg_suface, (0, 0))
for i in range(len(drops)):
text = random.choice(texts)
# 重新编辑每个坐标点的图像
winSur.blit(text, (i * FONT_PX, drops[i] * FONT_PX))
drops[i] += 1
if drops[i] * 10 > PANEL_highly or random.random() > 0.95:
drops[i] = 0
pygame.display.flip()
| 32.166667
| 119
| 0.57809
|
5430e13a9256c20efd3277d16b29e0749d863843
| 4,976
|
py
|
Python
|
COMMON/multiprocessing_env.py
|
abcdcamey/RL-learning
|
84e3be15a22bc05fec063b4c3dd56c4836c5981a
|
[
"MIT"
] | 1
|
2021-12-08T15:18:20.000Z
|
2021-12-08T15:18:20.000Z
|
COMMON/multiprocessing_env.py
|
abcdcamey/RL-learning
|
84e3be15a22bc05fec063b4c3dd56c4836c5981a
|
[
"MIT"
] | null | null | null |
COMMON/multiprocessing_env.py
|
abcdcamey/RL-learning
|
84e3be15a22bc05fec063b4c3dd56c4836c5981a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2022/4/19 10:25 下午
# Description :
"""
# 该代码来自 openai baseline,用于多线程环境
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| 30.157576
| 102
| 0.586616
|
0e694cb77f47181d61567c493d860ae361ed78f9
| 154
|
py
|
Python
|
loguru-example/main.py
|
t-igu/vscode-remote-container-example
|
7022cd6fe662eb0422f23e55fc4f6d1a49f3f722
|
[
"MIT"
] | null | null | null |
loguru-example/main.py
|
t-igu/vscode-remote-container-example
|
7022cd6fe662eb0422f23e55fc4f6d1a49f3f722
|
[
"MIT"
] | null | null | null |
loguru-example/main.py
|
t-igu/vscode-remote-container-example
|
7022cd6fe662eb0422f23e55fc4f6d1a49f3f722
|
[
"MIT"
] | null | null | null |
from config import setup_logger
from loguru import logger
def main():
setup_logger()
logger.info("aaaaa")
if __name__ == "__main__":
main()
| 17.111111
| 31
| 0.688312
|
f87e012b5c5b6460e24f1a30164d97eb27de1a7b
| 3,978
|
py
|
Python
|
PopulationInInadequateHousing/PopulationInInadequateHousing.py
|
UPTechMX/UPT-Calculus-Modules
|
1568244f0d65121f3b7637cdc8cf9f6deca9765f
|
[
"MIT"
] | null | null | null |
PopulationInInadequateHousing/PopulationInInadequateHousing.py
|
UPTechMX/UPT-Calculus-Modules
|
1568244f0d65121f3b7637cdc8cf9f6deca9765f
|
[
"MIT"
] | 3
|
2021-04-05T08:09:47.000Z
|
2021-04-05T08:14:00.000Z
|
PopulationInInadequateHousing/PopulationInInadequateHousing.py
|
UPTechMX/UPT-Calculus-Modules
|
1568244f0d65121f3b7637cdc8cf9f6deca9765f
|
[
"MIT"
] | 2
|
2020-04-13T16:50:28.000Z
|
2020-05-04T15:17:27.000Z
|
# -*- coding: utf-8 -*-
import sys
import os
import multiprocessing
import threading
import _thread as thread
import time
import gc
from random import randint
import json
import math
from plup.indicators.Indicator import Indicator
from plup.Helpers.Vacuum import vacuum
from plup.Helpers.LogEvents import LogEvents
from django.db import transaction
from plup.models import assumptions,classification
class Module:
def __init__(self, user, scenario, extra_dict_arguments=None):
self.__user = user
self.__scenario = scenario
def run(self):
try:
self.__Indicator = Indicator(self.__user)
inadequate_classes=self.__getClassess("inadequate_hu")
inadequate_array="'{"+",".join(inadequate_classes)+"}'"
error = True
count = 0
while error and count < 3:
self.__Indicator = Indicator(self.__user)
db = self.__Indicator.get_up_calculator_connection()
try:
query = """
select urbper_indicator_population_inadequate_housing({scenario},{inadequate})
""".format(
scenario=self.__scenario,
inadequate=inadequate_array
)
LogEvents(
"urbper_indicator population in inadequate housing",
"population in inadequate housing started: " + query,
self.__scenario,
self.__user
)
with transaction.atomic():
db.execute(query)
except Exception as e:
error = True
count += 1
time.sleep(randint(1, 3))
db.close()
LogEvents(
"population in inadequate housing",
"population in inadequate housing module failed " +
str(count) + ": " + str(e),
self.__scenario,
self.__user
)
else:
error = False
db.close()
LogEvents(
"population in inadequate housing",
"population in inadequate housing module finished",
self.__scenario,
self.__user
)
except Exception as e:
LogEvents(
"population in inadequate housing",
"unknown error " +
str(e),
self.__scenario,
self.__user
)
def __getClassess(self,fclass):
try:
query="""select distinct classification.name
from classification
where classification.category='risk'
and classification.fclass='{fclass}'
""".format(fclass=fclass)
results = classification.objects.filter(category='risk',fclass=fclass).distinct().values_list('name',flat=True)
LogEvents(
"classes",
"classes finished: " + query,
self.__scenario,
self.__user
)
results_set=[row for row in results]
results=results_set
except Exception as e:
error = True
time.sleep(randint(1, 3))
LogEvents(
"classes",
"classes failed: " + str(e),
self.__scenario,
self.__user
)
else:
error = False
LogEvents(
"classes",
"classes finished",
self.__scenario,
self.__user
)
return results
| 33.711864
| 123
| 0.477627
|
e9918c649118986c358e62817c5561145aacfb19
| 3,943
|
py
|
Python
|
tensorflow/python/keras/layers/image_preprocessing_test.py
|
guoyichan/tensorflow
|
0f0b2cd1d59192267d551ba91cb4554e7143e083
|
[
"Apache-2.0"
] | 1
|
2019-12-28T06:25:37.000Z
|
2019-12-28T06:25:37.000Z
|
tensorflow/python/keras/layers/image_preprocessing_test.py
|
yechens/tensorflow
|
382261952391abea73884374fb8abbc294a53596
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/layers/image_preprocessing_test.py
|
yechens/tensorflow
|
382261952391abea73884374fb8abbc294a53596
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import image_preprocessing
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.platform import test
class ResizingTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2),
('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2),
('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2),
('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2),
('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2),
('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12),
('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12),
('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12),
('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12),
('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12),
('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12))
def test_up_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
test.main()
| 44.303371
| 80
| 0.722039
|
a2822978c46da524e2b40c4143ccbdaffcfe3c51
| 1,739
|
py
|
Python
|
python/lsst/qa/explorer/consolidateQATable.py
|
lsst-dm/qa_explorer
|
f5997b3236f8abfc646295b63ecdd4abe8274164
|
[
"MIT"
] | 3
|
2018-03-21T01:21:49.000Z
|
2019-07-24T13:30:41.000Z
|
python/lsst/qa/explorer/consolidateQATable.py
|
lsst-dm/qa_explorer
|
f5997b3236f8abfc646295b63ecdd4abe8274164
|
[
"MIT"
] | 5
|
2018-03-22T19:20:33.000Z
|
2018-11-19T17:02:08.000Z
|
python/lsst/qa/explorer/consolidateQATable.py
|
lsst-dm/qa_explorer
|
f5997b3236f8abfc646295b63ecdd4abe8274164
|
[
"MIT"
] | null | null | null |
"""Command-line task and associated config for consolidating QA tables.
The deepCoadd_qa table is a table with QA columns of interest computed
for all filters for which the deepCoadd_obj tables are written.
"""
import os
import pandas as pd
from lsst.pex.config import Config, Field
from lsst.pipe.base import CmdLineTask, ArgumentParser
from lsst.pipe.tasks.parquetTable import ParquetTable
from lsst.pipe.tasks.postprocess import (ConsolidateObjectTableConfig, ConsolidateObjectTableTask,
TractObjectDataIdContainer)
# Question: is there a way that LSST packages store data files?
ROOT = os.path.abspath(os.path.dirname(__file__))
class ConsolidateQATableConfig(ConsolidateObjectTableConfig):
coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
class ConsolidateQATableTask(ConsolidateObjectTableTask):
"""Write patch-merged source tables to a tract-level parquet file
"""
_DefaultName = "consolidateQATable"
ConfigClass = ConsolidateQATableConfig
inputDataset = 'deepCoadd_qa'
outputDataset = 'deepCoadd_qa_tract'
@classmethod
def _makeArgumentParser(cls):
parser = ArgumentParser(name=cls._DefaultName)
parser.add_id_argument("--id", cls.inputDataset,
help="data ID, e.g. --id tract=12345",
ContainerClass=TractObjectDataIdContainer)
return parser
def runDataRef(self, patchRefList):
df = pd.concat([patchRef.get().toDataFrame() for patchRef in patchRefList])
patchRefList[0].put(ParquetTable(dataFrame=df), self.outputDataset)
def writeMetadata(self, dataRef):
"""No metadata to write.
"""
pass
| 34.098039
| 98
| 0.707878
|
e43062f9dcb219051f4147e98d2b7d7ea21fa182
| 1,943
|
py
|
Python
|
cra_helper/asset_manifest.py
|
10686142/django-cra-helper
|
295ced7e971878d34f4853e09ec730aebaa50ff2
|
[
"MIT"
] | null | null | null |
cra_helper/asset_manifest.py
|
10686142/django-cra-helper
|
295ced7e971878d34f4853e09ec730aebaa50ff2
|
[
"MIT"
] | null | null | null |
cra_helper/asset_manifest.py
|
10686142/django-cra-helper
|
295ced7e971878d34f4853e09ec730aebaa50ff2
|
[
"MIT"
] | null | null | null |
import os
import logging
import json
from django.conf import settings
logger = logging.getLogger(__name__)
_asset_filename = 'asset-manifest.json'
def generate_manifest(is_server_live: bool, bundle_path: str, app_dir: str) -> dict:
# Prepare references to various files frontend
if is_server_live:
return {
'bundle_js': bundle_path,
}
else:
_manifest = {}
build_dir = os.path.join(app_dir, 'build')
# Add the CRA static directory to STATICFILES_DIRS so collectstatic can grab files in there
static_dir = os.path.join(build_dir, 'static')
settings.STATICFILES_DIRS += [static_dir]
# CRA generates a JSON file that maps typical filenames to their hashed filenames
manifest_path = os.path.join(build_dir, _asset_filename)
# Try to load the JSON manifest from the React build directory first
try:
with open(manifest_path) as data_file:
logger.info('found manifest in React build files')
data = json.load(data_file)
except Exception as e:
# If that doesn't work, try to load it from the Django project's static files directory
try:
static_manifest_path = os.path.join(settings.STATIC_ROOT, _asset_filename)
with open(static_manifest_path) as data_file:
logger.info('found manifest in static files')
data = json.load(data_file)
except Exception as e:
logger.error('can\'t load static asset manifest: {}'.format(e))
return {}
# Generate relative paths to our bundled assets
for filename, path in data.get('files').items():
asset_key = filename.replace('.', '_')
asset_key = asset_key.replace('/', '_')
_manifest[asset_key] = os.path.relpath(path, 'static/')
return _manifest
| 36.660377
| 99
| 0.627895
|
eeec87daf5d4e38ffbbc5445d3f952bd153b2c60
| 4,592
|
py
|
Python
|
synthetic_test/Plots/plot_precision.py
|
sritejakv/eval_CG
|
259b2d0ee39cbfe3c6961d7bbdeff8c135ec5712
|
[
"MIT"
] | null | null | null |
synthetic_test/Plots/plot_precision.py
|
sritejakv/eval_CG
|
259b2d0ee39cbfe3c6961d7bbdeff8c135ec5712
|
[
"MIT"
] | null | null | null |
synthetic_test/Plots/plot_precision.py
|
sritejakv/eval_CG
|
259b2d0ee39cbfe3c6961d7bbdeff8c135ec5712
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
def autolabel(rects, plot_axes):
"""
Attach a text label above each bar displaying its width
"""
totals = []
for i in rects:
totals.append(i.get_width())
total = sum(totals)
for rect in rects[:-1]:
height = rect.get_height()
if rect.get_width() > 0:
plot_axes.text(rect.get_width(), rect.get_y() + height/2, "%.2f" % rect.get_width(), fontsize=7, color='black', alpha=0.8, ha='center', va='bottom')
plot_axes.text(rects[-1].get_width(), rects[-1].get_y() + height/2, "%.2f" % rects[-1].get_width(), fontsize=7, ha='center', va='bottom', weight='bold', style='italic')
def get_geometric_mean(dataset, metric):
"""
Habibs geometric mean
"""
import numpy as np
import math
zeroes = []
non_zeroes = []
sum_of_logs = 0.0
for index, row in dataset.iterrows():
if row[metric] > 0:
non_zeroes.append(row[metric])
sum_of_logs += np.log2(row[metric])
else:
zeroes.append(row[metric])
m = len(zeroes)
n = len(non_zeroes)
nbynplusm = n/(n + m)
right_side_of_exp = (1/(n + m)) * sum_of_logs
exp_value = math.exp(right_side_of_exp)
geometric_mean = nbynplusm * exp_value
return geometric_mean
style.use(['ggplot', 'fivethirtyeight'])
colors = ['#DA7C30', '#396AB1', '#CC2529', '#47CDDA']
c2f_main = pd.read_csv('../docker_reports/Code2flow.csv')
c2f = c2f_main[['Category', 'Precision']]
pyan_main = pd.read_csv('../docker_reports/Pyan.csv')
pyan = pyan_main[['Category', 'Precision']]
walaNCFA_main = pd.read_csv('../docker_reports/WalaNCFA.csv')
walaNCFA = walaNCFA_main[['Category', 'Precision']]
c2f_mean = c2f.groupby(['Category'], as_index=False).mean()
# c2f_mean.loc[len(c2f_mean)] = ['Weighted Average', get_weighted_geometric_mean(c2f_main)]
c2f_mean.loc[len(c2f_mean)] = ['Average', get_geometric_mean(c2f_main, "Precision")]
pyan_mean = pyan.groupby(['Category'], as_index=False).mean()
# pyan_mean.loc[len(pyan_mean)] = ['Weighted Average', get_weighted_geometric_mean(pyan_main)]
pyan_mean.loc[len(pyan_mean)] = ['Average', get_geometric_mean(pyan_main, "Precision")]
walaNCFA_mean = walaNCFA.groupby(['Category'], as_index=False).mean()
# walaNCFA_mean.loc[len(walaNCFA_mean)] = ['Weighted Average', get_weighted_geometric_mean(walaNCFA_main)]
walaNCFA_mean.loc[len(walaNCFA_mean)] = ['Average', get_geometric_mean(walaNCFA_main, "Precision")]
c2f_precision = c2f_mean[['Category', 'Precision']].copy()
c2f_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
pyan_precision = pyan_mean[['Category', 'Precision']].copy()
pyan_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
walaNCFA_precision = walaNCFA_mean[['Category', 'Precision']].copy()
walaNCFA_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
label_fontsize = 10
title_fontsize = 11
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, sharey=True, figsize=(9, 4))
c2f_precision.plot(kind='barh', y='Precision', x='Category', color=colors[0], alpha=0.6, ax=ax0)
ax0.set_title('Code2flow', fontsize=title_fontsize)
ax0.set_xlabel('Precision', fontsize=label_fontsize)
ax0.set_ylabel('Benchmark Category', fontsize=label_fontsize)
ax0.set_xlim([0, 1])
pyan_precision.plot(kind='barh', y='Precision', x='Category', color=colors[1], alpha=0.6, ax=ax1)
ax1.set_title('Pyan', fontsize=title_fontsize)
ax1.set_xlabel('Precision', fontsize=label_fontsize)
ax1.set_xlim([0, 1])
walaNCFA_precision.plot(kind='barh', y='Precision', x='Category', color=colors[2], alpha=0.6, ax=ax2)
ax2.set_title('Wala NCFA', fontsize=title_fontsize)
ax2.set_xlabel('Precision', fontsize=label_fontsize)
ax2.set_xlim([0, 1])
ax0.legend().set_visible(False)
ax1.legend().set_visible(False)
ax2.legend().set_visible(False)
tick_label_size = 8
ax0.tick_params(labelsize=tick_label_size)
ax1.tick_params(labelsize=tick_label_size)
ax2.tick_params(labelsize=tick_label_size)
#Setting weight for Average row
ylabels = ax0.get_yticklabels()
modified_ylabels = []
for i in ylabels:
if 'Average' in i.get_text():
i.set_weight("bold")
i.set_style("italic")
modified_ylabels.append(i)
ax0.set_yticklabels(modified_ylabels)
#Adding values next to the bars
autolabel(ax0.patches, ax0)
autolabel(ax1.patches, ax1)
autolabel(ax2.patches, ax2)
# autolabel(ax3.patches, ax3)
fig.savefig('precision_synthetic_test.png', transparent=False, dpi=150, bbox_inches="tight")
| 38.588235
| 172
| 0.714068
|
7ae18eb6670e3a65f1225fb538ab2e80123c65f0
| 534
|
py
|
Python
|
ros_ws/build/gripper_pkg/catkin_generated/pkg.develspace.context.pc.py
|
isuru-m/ROSbot_Gripper_Project
|
c3d8f46461612a52137ff3f63db45cac20b5364f
|
[
"MIT"
] | null | null | null |
ros_ws/build/gripper_pkg/catkin_generated/pkg.develspace.context.pc.py
|
isuru-m/ROSbot_Gripper_Project
|
c3d8f46461612a52137ff3f63db45cac20b5364f
|
[
"MIT"
] | null | null | null |
ros_ws/build/gripper_pkg/catkin_generated/pkg.develspace.context.pc.py
|
isuru-m/ROSbot_Gripper_Project
|
c3d8f46461612a52137ff3f63db45cac20b5364f
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/husarion/ros_ws/devel/include;/home/husarion/ros_ws/src/gripper_pkg/include".split(';') if "/home/husarion/ros_ws/devel/include;/home/husarion/ros_ws/src/gripper_pkg/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "gripper_pkg"
PROJECT_SPACE_DIR = "/home/husarion/ros_ws/devel"
PROJECT_VERSION = "0.0.0"
| 59.333333
| 229
| 0.749064
|
466863f028b69152b1f8e9621b91d2bca161cbb2
| 1,172
|
py
|
Python
|
setup.py
|
petarmaric/metated
|
5c7007ffb4aac7968d947e6f9edcd488fdea0eb3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
petarmaric/metated
|
5c7007ffb4aac7968d947e6f9edcd488fdea0eb3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
petarmaric/metated
|
5c7007ffb4aac7968d947e6f9edcd488fdea0eb3
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
import metaTED
setup(
name='metaTED',
version=metaTED.__version__,
url='https://github.com/petarmaric/metated',
download_url='http://pypi.python.org/pypi/metaTED',
license='BSD',
author='Petar Maric',
author_email='petar.maric@gmail.com',
description='Creates metalink files of TED talks for easier downloading',
long_description=open('README.rst').read(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Utilities',
],
keywords='TED metalink download video',
platforms='any',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['metaTED=metaTED:main']
},
install_requires=open('requirements.txt').read().splitlines()
)
| 31.675676
| 77
| 0.645904
|
85b610545d748034647d7cea0915910dbacca7a9
| 18,072
|
py
|
Python
|
src/ydata_quality/labelling/engine.py
|
poga/ydata-quality
|
0cdda2774b05101c5f4f773b5e946f2a6544da09
|
[
"MIT"
] | 242
|
2021-09-22T17:16:49.000Z
|
2022-03-30T10:26:25.000Z
|
src/ydata_quality/labelling/engine.py
|
poga/ydata-quality
|
0cdda2774b05101c5f4f773b5e946f2a6544da09
|
[
"MIT"
] | 13
|
2021-09-23T00:15:10.000Z
|
2022-02-04T16:33:42.000Z
|
src/ydata_quality/labelling/engine.py
|
poga/ydata-quality
|
0cdda2774b05101c5f4f773b5e946f2a6544da09
|
[
"MIT"
] | 21
|
2021-09-24T09:59:30.000Z
|
2022-03-16T02:48:11.000Z
|
"""
Implementation of LabelInspector engine class to run label quality analysis.
"""
from typing import Optional, Union
from pandas import DataFrame, Series
from src.ydata_quality.core.warnings import Priority
from ..core import QualityEngine, QualityWarning
from ..utils.auxiliary import infer_dtypes
from ..utils.modelling import (estimate_centroid, estimate_sd, gmm_clustering,
normality_test, performance_one_vs_rest,
standard_transform)
def label_inspector_dispatch(df, label, random_state: Optional[int] = None, severity: Optional[str] = None):
"""Runs a label type inference to instantiate the correct label inspector.
Instantiate this label inspector method to create a Label Inspector.
Arguments:
df (DataFrame): reference DataFrame used to run the label analysis.
label (str, optional): target feature to be predicted.
random_state (int, optional): Integer seed for random reproducibility. Default is None.
Set to None for fully random behavior, no reproducibility.
severity (str, optional): Sets the logger warning threshold to one of the valid levels
[DEBUG, INFO, WARNING, ERROR, CRITICAL]
"""
label_dtype = infer_dtypes(df[label])[label] # Label column dtype inferral
if label_dtype == 'categorical':
return CategoricalLabelInspector(df, label, random_state=random_state, severity=severity)
return NumericalLabelInspector(df, label, random_state=random_state, severity=severity)
class SharedLabelInspector(QualityEngine):
"""Shared structure for Numerical/Categorical Label Inspector"""
def __init__(self, df: DataFrame, label: str,
random_state: Optional[int] = None, severity: Optional[str] = None):
super().__init__(df=df, label=label, random_state=random_state, severity=severity)
self._tdf = None
@property
def tdf(self):
"Property that returns the transformed dataset centroids for all (not nan) classes."
if self._tdf is None:
self._tdf = self._transform_df()
return self._tdf
@staticmethod
def __get_missing_labels(df: DataFrame, label: str):
return df[df[label].isna()]
def _transform_df(self):
"""Selects all observations with a label feature and applies preprocessing transformations.
Index and column names are preserved.
Observations with missing label are disregarded."""
df = self.df[~self.df[self.label].isna()]
dtypes = self.dtypes
skip = [self.label] if self.dtypes[self.label] == 'categorical' else []
tdf, _ = standard_transform(df, dtypes, skip=skip, robust=True)
return tdf
def missing_labels(self):
"""Returns observations with missing labels"""
missing_labels = self.__get_missing_labels(self.df, self.label)
if len(missing_labels) > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.MISSING_LABELS, category=QualityWarning.Category.LABELS,
priority=Priority.P1, data=missing_labels,
description=f"Found {len(missing_labels)} instances with missing labels."
))
else:
self._logger.info("No missing labels were found.")
missing_labels = None
return missing_labels
class CategoricalLabelInspector(SharedLabelInspector):
"""Engine for running analysis on categorical labels.
Ordinal labels can be handled if passed as categorical."""
def __init__(self, df: DataFrame, label: str, random_state: Optional[int], severity: Optional[str] = None):
super().__init__(df=df, label=label, random_state=random_state, severity=severity)
self._centroids = None
self._tests = ["missing_labels", "few_labels", "unbalanced_classes",
"one_vs_rest_performance", "outlier_detection"]
@property
def centroids(self):
"Property that returns estimated centroids for all (not nan) classes."
if self._centroids is None:
self._centroids = self._get_centroids()
return self._centroids
def __get_few_labels(self, th=1):
counts = self._get_label_counts(dropna=True)
return counts[counts <= th]
def _get_label_counts(self, dropna=False):
"""Returns a series with unique values of the label column and observation counts
Args:
dropna: Controls if NaN (empty) values can be considered as a class of their own"""
return Series(
self.df[self.label].value_counts(dropna=dropna).sort_values(ascending=False),
name='Label counts')
def few_labels(self, count_th: Union[int, float] = 1):
"""Retrieves labels with a few observations.
By default returns labels with only one record.
When a float is passed it is treated as a fraction of the total records."""
assert count_th > 0 and (isinstance(count_th, int) or 0 < count_th < 1), "\
count_th must be positive integer or float in the ]0,1[ interval."
if isinstance(count_th, float):
count_th = int(count_th * self.df.shape[0])
few_labels = self.__get_few_labels(count_th)
if len(few_labels) > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.FEW_LABELS, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=few_labels,
description=f"Found {len(few_labels)} labels with {count_th} or less records."
))
else:
self._logger.info("No labels with %d or less records were found.", count_th)
few_labels = None
return few_labels
def unbalanced_classes(self, slack: float = 0.3):
"""Reports majority/minority classes (above/below a relative count threshold).
Arguments:
slack: Margin for alert triggers based on label representativity.
Slack is linearly adjusted for n>2 classes.
"""
# TODO: Plot bar chart with observation counts for each class and respective thresholds
if slack < 0 or slack > 0.5:
raise ValueError('Slack must be part of the open interval ]0, 0.5[')
label_counts = self._get_label_counts(dropna=True) # No label records are not considered
n_classes = len(label_counts)
labeled_records = label_counts.sum() # Total labelled records
label_ratio = label_counts / labeled_records
fair_share = 1 / n_classes
adj_slack = slack * (2 / n_classes) # Adjust slack depending on number of classes
label_excess = (label_ratio - fair_share)[abs(label_ratio - fair_share) > adj_slack]
data = {}
if len(label_excess) != 0:
for _class, excess in label_excess.items():
folder = 'Under-represented' # By default
if excess > 0:
folder = 'Over-represented'
data.setdefault(folder, {})[_class] = self.df[self.df[self.label] == _class]
self.store_warning(
QualityWarning(
test=QualityWarning.Test.UNBALANCED_CLASSES, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=data,
description=f"""
Classes {set(data['Under-represented'].keys())} \
are under-represented each having less than {fair_share-adj_slack:.1%} of total instances. \
Classes {set(data['Over-represented'].keys())} \
are over-represented each having more than {fair_share+adj_slack:.1%} of total instances
"""))
else:
self._logger.info("No unbalanced classes were found.")
return None
return label_excess.index
def one_vs_rest_performance(self, slack: float = 0):
"""Performs one vs rest classification over each label class.
Returns a series with Area Under Curve for each label class.
Slack defines a proportion for the record weighted average of performances as a tolerance.
Any binary classifier that scores below the average minus tolerance triggers a warning.
"""
# TODO: Plot ROC curve
assert 0 <= slack <= 1, "Argument th is expected to be a float in the [0,1] interval"
_class_counts = self._get_label_counts(dropna=True)
_class_counts = _class_counts[_class_counts > 1]
results = {
_class: performance_one_vs_rest(df=self.tdf, label_feat=self.label,
_class=_class, dtypes=self.dtypes)
for _class in _class_counts.index
}
record_weighted_avg = sum([perf * _class_counts[_class] for _class, perf in results.items()])
record_weighted_avg = (1 / _class_counts.sum()) * record_weighted_avg
threshold = (1 - slack) * record_weighted_avg
poor_performers = {_class: perf for _class, perf in results.items() if perf < threshold}
if len(poor_performers) > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.ONE_REST_PERFORMANCE, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=Series(poor_performers),
description=f"Classes {set(poor_performers.keys())} performed under the {threshold:.1%} AUROC \
threshold. The threshold was defined as an average of all classifiers with {slack:.0%} slack."
))
return Series(results)
def _get_centroids(self):
"""Produces a centroid estimation for observations grouped by label value.
Centroids are estimated using the normalized dataset."""
label_counts = self._get_label_counts(dropna=True)
centroids = DataFrame(self.tdf.iloc[:len(label_counts)],
columns=self.tdf.columns, index=label_counts.index)
for count, _class in enumerate(label_counts.index):
records = self.tdf[self.tdf[self.label] == _class]
centroids.iloc[count] = estimate_centroid(records, self.dtypes)
return centroids
def _get_class_sds(self):
"""Estimates the STDev of intra cluster distances to the centroid over each class.
Returns:
sds: A series with the intra cluster distances of each class
sd_distances: A dictionary with the distances of each point to its centroid (key).
Distances are scaled by the corresponding stdev of the intra cluster distances"""
sd_distances = {}
for _class in self.centroids.index:
sds = sd_distances.setdefault(_class, {})
records = self.tdf[self.tdf[self.label] == _class].drop(self.label, axis=1)
centroid = self.centroids.loc[_class].drop(self.label).values.flatten()
sds['SD'], sds['Scaled Distances'] = estimate_sd(records, centroid, dtypes=self.dtypes)
return sd_distances
def outlier_detection(self, th=3):
"""Provides a dictionary ordered by label values and identifying potential outliers.
Outliers are defined as points with distance to group centroid bigger than a threshold.
The threshold is defined in Standard Deviations of the intra-cluster distances."""
sd_distances = self._get_class_sds()
potential_outliers = 0
data = {}
for _class, sds in sd_distances.items():
sd_distances = sds['Scaled Distances'][sds['Scaled Distances'] > th]
new_outliers = len(sd_distances)
if new_outliers > 0:
potential_outliers += new_outliers
data.setdefault(_class, self.df.loc[sd_distances.index])
if potential_outliers > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.OUTLIER_DETECTION, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=data,
description=f"""
Found {potential_outliers} potential outliers across {len(data.keys())} classes. \
A distance bigger than {th} standard deviations of intra-cluster distances \
to the respective centroids was used to define the potential outliers.
"""
))
return data
class NumericalLabelInspector(SharedLabelInspector):
"Engine for running analyis on numerical labels."
def __init__(self, df: DataFrame, label: str, random_state, severity: Optional[str] = None):
super().__init__(df=df, label=label, random_state=random_state, severity=severity)
self._tests = ["missing_labels", "test_normality", "outlier_detection"]
def _gmm_clusters(self, max_clusters):
"""Separates the dataset into a Gaussian Mixture Model cluster optimized nbins.
Clustering is done only with the label column values."""
sorted_vals = self.tdf[self.label].sort_values().copy()
search_space = range(1, max_clusters)
aic = [None for k in search_space]
labels = {k: None for k in search_space}
for count, total_clusters in enumerate(search_space):
labels[total_clusters], aic[count] = gmm_clustering(sorted_vals.values.reshape(-1, 1), total_clusters)
ideal_k = list(labels.keys())[aic.index(min(aic))]
return Series(labels[ideal_k], index=sorted_vals.index)
def outlier_detection(self, th: float = 3., use_clusters=False, max_clusters: int = 5):
"""Detects outliers based on standard deviation of the label feature.
Estimates the median value and standard deviation for the label.
Signals all values beyond th standard deviations from the median as potential outliers.
Arguments:
th: threshold measured in cluster standard deviations
use_clusters: Set to True in order to detect outliers inside each proposed cluster.
Set to False to use a unimodal outlier detection strategy (default)
max_clusters: To take effect must be used with use_clusters passed as True.
Defines search space upper bound for number of clusters."""
if use_clusters:
cluster_labels = self._gmm_clusters(max_clusters)
else:
cluster_labels = Series('full_dataset', index=self.tdf.index)
clusters = cluster_labels.unique()
potential_outliers = {}
for cluster in clusters:
values = self.tdf[self.label][cluster_labels == cluster].copy()
if len(values) == 1: # Single element clusters are automatically flagged as potential outliers
potential_outliers[cluster] = self.df.loc[values.index]
continue
median = values.median()
std = values.std()
abs_deviations = ((values - median) / std).abs()
cluster_outliers = self.df.loc[abs_deviations[abs_deviations > th].index]
if len(cluster_outliers) > 0:
potential_outliers[cluster] = cluster_outliers
if len(potential_outliers) > 0:
total_outliers = sum([cluster_outliers.shape[0] for cluster_outliers in potential_outliers.values()])
coverage_string = f"{len(clusters)} clusters" if use_clusters else "the full dataset"
self.store_warning(
QualityWarning(
test=QualityWarning.Test.OUTLIER_DETECTION, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=potential_outliers,
description=f"""
Found {total_outliers} potential outliers across {coverage_string}. \
A distance bigger than {th} standard deviations of intra-cluster distances \
to the respective centroids was used to define the potential outliers."""
))
return potential_outliers
def test_normality(self, p_th=5e-3):
"""Runs a normal distribution test on the label column.
If passes data is normally distributed.
If it fails, retries with a battery of transformations.
"""
vals = self.tdf[self.label].copy()
test_result, transform, pstat = normality_test(vals, p_th=p_th)
if test_result:
if transform is None:
self._logger.info("The label values appears to be normally distributed.")
else:
self._logger.info("The %s transform appears to be able to normalize the label values.", transform)
self.store_warning(
QualityWarning(
test=QualityWarning.Test.TEST_NORMALITY, category=QualityWarning.Category.LABELS,
priority=Priority.P2, data=vals,
description=f"The label distribution as-is failed a normality test. \
Using the {transform} transform provided a positive normality test with a p-value statistic of {pstat:.2f}"
))
else:
self._logger.warning("""
It was not possible to normalize the label values.
See the data quality warning message for additional context.
""")
self.store_warning(
QualityWarning(
test=QualityWarning.Test.TEST_NORMALITY, category=QualityWarning.Category.LABELS,
priority=Priority.P1, data=vals,
description="""
The label distribution failed to pass a normality test as-is and following a battery of transforms.
It is possible that the data originates from an exotic distribution, there is heavy outlier presence or it is \
multimodal. Addressing this issue might prove critical for regressor performance.
"""
))
| 52.382609
| 119
| 0.642486
|
74c25ea2dffff1370ed4bf6b821ebf39ae3f6fe7
| 3,583
|
py
|
Python
|
cloudferry/lib/stage.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 6
|
2017-04-20T00:49:49.000Z
|
2020-12-20T16:27:10.000Z
|
cloudferry/lib/stage.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 3
|
2017-04-08T15:47:16.000Z
|
2017-05-18T17:40:59.000Z
|
cloudferry/lib/stage.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 8
|
2017-04-07T23:42:36.000Z
|
2021-08-10T11:05:10.000Z
|
# Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
from oslo_utils import importutils
from cloudferry.lib.utils import local_db
LOG = logging.getLogger(__name__)
local_db.execute_once("""
CREATE TABLE IF NOT EXISTS stages (
stage TEXT,
signature JSON,
PRIMARY KEY (stage)
)
""")
class Stage(object):
__metaclass__ = abc.ABCMeta
dependencies = []
def __init__(self, config):
"""
Stage constructor
:param config: cloudferry.lib.config.Configuration instance
:return:
"""
self.config = config
@abc.abstractmethod
def signature(self):
"""
Returns signature for data that will be produced during this stage. If
the signature differ from the one stored in database, then invalidate
method will be called.
:return:
"""
return
@abc.abstractmethod
def execute(self):
"""
Should contain any code that is required to be executed during this
stage.
"""
return
@abc.abstractmethod
def invalidate(self, old_signature, new_signature, force=False):
"""
Should destroy any stale data based on signature difference.
:param old_signature: old signature stored in DB
:param new_signature: new signature
"""
return
def execute_stage(class_name, config, force=False):
"""
Execute stage specified by `class_name` argument.
:param class_name: fully qualified stage class name
:param config: config.Configuration instance
"""
# Create stage object
cls = importutils.import_class(class_name)
assert issubclass(cls, Stage)
stage = cls(config)
# Execute dependency stages
for dependency in stage.dependencies:
execute_stage(dependency, config)
# Check if there is data from this stage in local DB
new_signature = stage.signature()
old_signature = None
need_invalidate = False
need_execute = False
with local_db.Transaction() as tx:
row = tx.query_one('SELECT signature FROM stages WHERE stage=:stage',
stage=class_name)
if row is None:
need_execute = True
else:
old_signature = row['signature'].data
need_invalidate = (old_signature != new_signature)
# Run invalidate and execute if needed
with local_db.Transaction() as tx:
if need_invalidate or force:
stage.invalidate(old_signature, new_signature, force=force)
tx.execute('DELETE FROM stages WHERE stage=:stage',
stage=class_name)
need_execute = True
if need_execute:
stage.execute()
tx.execute('INSERT INTO stages VALUES (:stage, :signature)',
stage=class_name,
signature=local_db.Json(new_signature))
LOG.info('Stage %s executed successfully', class_name)
else:
LOG.info('Skipping stage %s', class_name)
| 30.623932
| 78
| 0.651689
|
502f077fc24fc6221846d116adca1fc7cf02130a
| 1,679
|
py
|
Python
|
actionnetwork_activist_sync/debug.py
|
afitts/actionnetwork_activist_sync
|
40a0cabf36adca92ca17e7087792dc1829289d0a
|
[
"MIT"
] | null | null | null |
actionnetwork_activist_sync/debug.py
|
afitts/actionnetwork_activist_sync
|
40a0cabf36adca92ca17e7087792dc1829289d0a
|
[
"MIT"
] | null | null | null |
actionnetwork_activist_sync/debug.py
|
afitts/actionnetwork_activist_sync
|
40a0cabf36adca92ca17e7087792dc1829289d0a
|
[
"MIT"
] | null | null | null |
from colored import attr, fg
from dictdiffer import diff
from pprint import pprint
class PersonCompare:
def __init__(self, existing, updated):
self.existing = existing
self.existing.merge_primary_email()
self.existing.merge_primary_address()
self.updated = updated
def print_diff(self):
ignore = [
'identifiers',
'_links',
'created_date',
'modified_date',
'languages_spoken',
'email_addresses',
'postal_addresses',
'country',
'person_id'
]
for difference in diff(self.existing.__dict__, self.updated, ignore=ignore):
if difference[0] == 'add':
fmt_str = '{}{:>50}{} | {}{:<50}{}'
left = ''
left_color = fg('white')
right = '{!r} {!r}'.format(difference[1], difference[2])
right_color = fg('green')
elif difference[0] == 'remove':
fmt_str = '{}{:>50}{} | {}{:<50}{}'
left = '{!r} {!r}'.format(difference[1], difference[2])
left_color = fg('red')
right = ''
right_color = fg('white')
elif difference[0] == 'change':
fmt_str = '{}{:>50}{}'
left = '{!r} {!r}'.format(difference[1], difference[2])
left_color = fg('yellow')
right = None
right_color = None
else:
raise NotImplemented
print(fmt_str.format(
left_color, left, attr('reset'), right_color, right, attr('reset')))
| 32.921569
| 84
| 0.481834
|
3944cfa2625949c5d492df9576df8fb52f423251
| 5,143
|
py
|
Python
|
desktop/libs/indexer/src/indexer/management/commands/indexer_setup.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/management/commands/indexer_setup.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/management/commands/indexer_setup.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import uuid
import os
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from useradmin.models import install_sample_user
from indexer import utils, controller
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
"""
Install examples but do not overwrite them.
"""
def handle_noargs(self, **options):
self.user = install_sample_user()
self.fs = cluster.get_hdfs()
self.searcher = controller.CollectionManagerController(self.user)
LOG.info(_("Installing twitter collection"))
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../../../../apps/search/examples/collections/solr_configs_twitter_demo/index_data.csv'))
self._setup_collection_from_csv({
'name': 'twitter_example',
'fields': self._parse_fields(path),
'uniqueKeyField': 'id'
}, path)
LOG.info(_("Twitter collection successfully installed"))
LOG.info(_("Installing yelp collection"))
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../../../../apps/search/examples/collections/solr_configs_yelp_demo/index_data.csv'))
self._setup_collection_from_csv({
'name': 'yelp_example',
'fields': self._parse_fields(path),
'uniqueKeyField': 'id'
}, path)
LOG.info(_("Yelp collection successfully installed"))
LOG.info(_("Installing jobs collection"))
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../../../../apps/search/examples/collections/solr_configs_jobs_demo/index_data.csv'))
self._setup_collection_from_csv({
'name': 'jobs_example',
'fields': self._parse_fields(path),
'uniqueKeyField': 'id'
}, path)
LOG.info(_("Jobs collection successfully installed"))
LOG.info(_("Installing logs collection"))
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../../../../apps/search/examples/collections/solr_configs_log_analytics_demo/index_data.csv'))
self._setup_collection_from_csv({
'name': 'logs_example',
'fields': self._parse_fields(path, fieldtypes={
'region_code': 'string',
'referer': 'string'
}),
'uniqueKeyField': 'id'
}, path)
LOG.info(_("Logs collection successfully installed"))
def _setup_collection_from_csv(self, collection, path, separator=',', quote_character='"'):
if self.searcher.collection_exists(collection['name']):
self.searcher.delete_collection(collection['name'])
# Create instance directory, collection, and add fields
self.searcher.create_collection(collection['name'], collection['fields'], collection['uniqueKeyField'])
try:
hdfs_path = '/tmp/%s' % uuid.uuid4()
# Put in HDFS
with open(path) as fh:
if self.fs.do_as_user(self.user.username, self.fs.exists, hdfs_path):
overwrite = True
else:
overwrite = False
self.fs.do_as_user(self.user.username, self.fs.create, hdfs_path, data=fh.read(), overwrite=overwrite)
# Index data
self.searcher.update_data_from_hdfs(self.fs,
collection['name'],
collection['fields'],
hdfs_path,
'separated',
separator=separator,
quote_character=quote_character)
except:
self.searcher.delete_collection(collection['name'])
raise
finally:
# Remove HDFS file
if self.fs.do_as_user(self.user.username, self.fs.exists, hdfs_path):
self.fs.do_as_user(self.user.username, self.fs.remove, hdfs_path, skip_trash=True)
def _parse_fields(self, path, separator=',', quote_character='"', fieldtypes={}):
with open(path) as fh:
field_generator = utils.field_values_from_separated_file(fh, separator, quote_character)
row = next(field_generator)
field_names = row.keys()
field_types = utils.get_field_types((row.values() for row in itertools.chain([row], field_generator)), iterations=51)
return [{'name': field[0], 'type': field[0] in fieldtypes and fieldtypes[field[0]] or field[1]} for field in zip(field_names, field_types)]
| 41.144
| 171
| 0.675287
|
08813d7fd436da173f3063fa63e383c85f5a7897
| 56
|
py
|
Python
|
smartystreets_python_sdk/us_street/match_type.py
|
jasonrfarkas/smartystreets-python-sdk
|
bcb94efc09c795222eb1bd85544073a6cc063a46
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_street/match_type.py
|
jasonrfarkas/smartystreets-python-sdk
|
bcb94efc09c795222eb1bd85544073a6cc063a46
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_street/match_type.py
|
jasonrfarkas/smartystreets-python-sdk
|
bcb94efc09c795222eb1bd85544073a6cc063a46
|
[
"Apache-2.0"
] | null | null | null |
STRICT = 'strict'
RANGE = 'range'
INVALID = 'invalid'
| 9.333333
| 19
| 0.642857
|
5a3c3e853e14e817415eac556ba636959faaa401
| 4,640
|
py
|
Python
|
captain_hook.py
|
charlesreid1/b-captain-hook
|
361f59c21a733a484f48e9bd60bce2d94dbf7b1b
|
[
"Apache-2.0"
] | 1
|
2019-03-20T02:08:06.000Z
|
2019-03-20T02:08:06.000Z
|
captain_hook.py
|
charlesreid1/b-captain-hook
|
361f59c21a733a484f48e9bd60bce2d94dbf7b1b
|
[
"Apache-2.0"
] | null | null | null |
captain_hook.py
|
charlesreid1/b-captain-hook
|
361f59c21a733a484f48e9bd60bce2d94dbf7b1b
|
[
"Apache-2.0"
] | 1
|
2019-07-31T00:56:25.000Z
|
2019-07-31T00:56:25.000Z
|
import os
import logging
import subprocess
from tempfile import mkstemp
from os import access, X_OK, remove, fdopen
import requests
import json
from flask import Flask, request, abort
app = Flask(__name__)
logging.basicConfig(filename='/tmp/captain_hook.log',
filemode='a',
level=logging.DEBUG)
@app.route('/webhook', methods=['GET', 'POST'])
def index():
"""
Main WSGI application entry.
"""
path = os.path.dirname(os.path.abspath(__file__))
# Only POST is implemented
if request.method != 'POST':
logging.error('ERROR: GET request received, only POST method is implemented')
#abort(501)
return "<h2>Captain Hook</h2><p>Unfortunately, no GET method is implemented for Captain Hook. Try a POST method.</p>"
# Load config
with open(os.path.join(path, 'config.json'), 'r') as cfg:
config = json.loads(cfg.read())
hooks_path = config.get('hooks_path', os.path.join(path, 'hooks'))
logging.info("Hooks path: %s"%(hooks_path))
# Implement ping/pong
event = request.headers.get('X-GitHub-Event', 'ping')
if event == 'ping':
return json.dumps({'msg': 'pong'})
# Gather data
try:
payload = request.get_json()
except Exception:
logging.warning('Request parsing failed')
abort(400)
# Enforce secret
secret = config.get('enforce_secret', '')
if secret!='':
try:
if payload['secret'] != secret:
logging.error('Invalid secret %s.'%(payload['secret']))
abort(403)
except:
abort(501)
# Determining the branch is tricky, as it only appears for certain event
# types an at different levels
branch = None
try:
# Case 1: a ref_type indicates the type of ref.
# This true for create and delete events.
if 'ref_type' in payload:
if payload['ref_type'] == 'branch':
branch = payload['ref']
# Case 2: a pull_request object is involved. This is pull_request and
# pull_request_review_comment events.
elif 'pull_request' in payload:
# This is the TARGET branch for the pull-request, not the source
# branch
branch = payload['pull_request']['base']['ref']
elif event in ['push']:
# Push events provide a full Git ref in 'ref' and not a 'ref_type'.
branch = payload['ref'].split('/', 2)[2]
except KeyError:
# If the payload structure isn't what we expect, we'll live without
# the branch name
pass
# All current events have a repository, but some legacy events do not,
# so let's be safe
name = payload['repository']['name'] if 'repository' in payload else None
meta = {
'name': name,
'branch': branch,
'event': event
}
# Possible hooks
hooks = []
if branch and name:
hooks.append(os.path.join(hooks_path, '{event}-{name}-{branch}'.format(**meta)))
if name:
hooks.append(os.path.join(hooks_path, '{event}-{name}'.format(**meta)))
hooks.append(os.path.join(hooks_path, '{event}'.format(**meta)))
hooks.append(os.path.join(hooks_path, 'all'))
#######################################################
# Check permissions
scripts = []
for h in hooks:
if os.path.isfile(h) and access(h,X_OK):
scripts.append(h)
if len(scripts)==0:
logging.warning('Scripts failed to execute')
return json.dumps({'status': 'nop'})
#######################################################
# Save payload to temporal file
osfd, tmpfile = mkstemp()
with fdopen(osfd, 'w') as pf:
pf.write(json.dumps(payload))
# Run scripts
logging.info("%s"%(scripts))
ran = {}
for s in scripts:
proc = subprocess.Popen(
[s, tmpfile, event],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
ran[os.path.basename(s)] = {
'returncode': proc.returncode,
'stdout': stdout.decode('utf-8'),
'stderr': stderr.decode('utf-8'),
}
# Log errors if a hook failed
if proc.returncode != 0:
logging.error('{} : {} \n{}'.format(
s, proc.returncode, stderr
))
# Clean up
remove(tmpfile)
return json.dumps({'status': 'done'})
output = json.dumps(ran, sort_keys=True, indent=4)
return output
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 28.466258
| 125
| 0.57069
|
4b2eab52d544fbcce9d35c3b77e231f3033d9e9c
| 14,843
|
py
|
Python
|
tasks/dyn_raffle_handler.py
|
Ayouuuu/bili2.0
|
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
|
[
"MIT"
] | 4
|
2020-03-05T02:07:55.000Z
|
2020-06-01T02:20:03.000Z
|
tasks/dyn_raffle_handler.py
|
Ayouuuu/bili2.0
|
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
|
[
"MIT"
] | null | null | null |
tasks/dyn_raffle_handler.py
|
Ayouuuu/bili2.0
|
1108e39208e56f129fb5eb6605a5b3f1aadc0d8f
|
[
"MIT"
] | 2
|
2020-03-07T20:34:46.000Z
|
2020-04-05T10:03:13.000Z
|
import json
import asyncio
import random
from typing import Optional
import utils
from dyn import dyn_raffle_sql
from dyn.bili_data_types import DynRaffleStatus, DynRaffleJoined, DynRaffleResults, DynRaffleLuckydog
from reqs.dyn_raffle_handler import DynRaffleHandlerReq
from .utils import UtilsTask
from .base_class import Forced, Wait, Multi
class DynRaffleUtilsTask:
@staticmethod
async def create_dyn(user):
json_rsp = await user.req_s(DynRaffleHandlerReq.create_dyn, user)
user.info(f'用户生成动态 {json_rsp}')
return int(json_rsp['data']['doc_id'])
@staticmethod
async def del_dyn_by_docid(user, doc_id):
json_rsp = await user.req_s(DynRaffleHandlerReq.del_dyn_by_docid, user, doc_id)
code = json_rsp['code']
# 0(success) / -1(哎呀,系统君开小差了(⊙□⊙))
if not code:
user.info(f'用户删除动态{doc_id}(doc_id)成功')
return True
user.info(f'用户删除动态{doc_id}(doc_id)失败,可能系统错误或重复删除请求, {json_rsp}')
return False
@staticmethod
async def del_dyn_by_dynid(user, dyn_id):
json_rsp = await user.req_s(DynRaffleHandlerReq.del_dyn_by_dynid, user, dyn_id)
code = json_rsp['code']
# logout提示信息{'code': -6, 'msg': '', 'message': '', 'data': {}}
# {'code': 2200013, 'msg': '不能删除别人的动态', 'message': '不能删除别人的动态', 'data': {}}
# {'code': 0, 'msg': 'succ', 'message': 'succ', 'data': {'result': 0, 'errmsg': '删除成功', '_gt_': 0}}
# {'code': 1100404, 'msg': '不能重复删除', 'message': '不能重复删除', 'data': {}}
# {'code': 1100405, 'msg': '', 'message': '', 'data': {}}
if not code:
user.info(f'用户删除动态{dyn_id}(dyn_id)成功')
return True
user.info(f'用户删除动态{dyn_id}(dyn_id)失败,可能系统错误或重复删除请求, {json_rsp}')
return False
@staticmethod
async def check_and_fetch_raffle(user, doc_id, handle_status=-1, feed_limit=False) -> tuple:
json_rsp = await user.req_s(DynRaffleHandlerReq.is_dyn_raffle, user, doc_id)
code = json_rsp['code']
print('_____________________________________')
print('is_dyn_raffle:', doc_id, 'code:', code)
if not code:
data = json_rsp['data']
item = data['item']
str_ext = item['extension']
print(doc_id, str_ext)
if str_ext:
try:
dict_ext = json.loads(str_ext.replace('\'', ''))
except json.JSONDecodeError:
print(f'dict_extension 解析失败,可能是b站api已知问题')
if len(str_ext) != 1024:
# TODO 可能还有doc_id=21426429 "extension":"{\"emoji_type\":1}{\"emoji_type\":1}"
user.warn(f'动态抽奖{doc_id}dict_extension 解析失败', str_ext)
return 1, None
# 抽奖 不符合的可能{}或者lott_cfg为空或者其他一些
if 'lott_cfg' in dict_ext and dict_ext['lott_cfg']:
lott_cfg_x = dict_ext['lott_cfg']
if isinstance(lott_cfg_x, dict):
lott_cfg = lott_cfg_x
elif isinstance(lott_cfg_x, str):
lott_cfg = json.loads(lott_cfg_x)
else:
return -1, None
print('lott_cfg', lott_cfg)
if 'lottery_id' in lott_cfg:
uid = data['user']['uid']
post_time = int(item['upload_timestamp'])
describe = item['description']
else:
return 1, None
else:
return 1, None
else:
return 1, None
elif code == 110001:
if 'user' not in json_rsp['data']:
return 404, None
return 1, None
else:
# 目前未发现其他code
user.warn(f'互动抽奖初步查询 {json_rsp}')
return -1, None
json_rsp = await user.req_s(DynRaffleHandlerReq.fetch_dyn_raffle, user, doc_id)
code = json_rsp['code']
if not code:
# print('check raffle_status', json_rsp)
data = json_rsp['data']
dyn_id = data['business_id']
# 开奖时间
lottery_time = data['lottery_time']
# @人数
at_num = data['lottery_at_num']
# 关注 1 true
feed_limit = bool(data['lottery_feed_limit']) or feed_limit
first_prize_cmt = data['first_prize_cmt']
second_prize_cmt = data.get('second_prize_cmt', '')
third_prize_cmt = data.get('third_prize_cmt', '')
first_prize = data['first_prize']
second_prize = data.get('second_prize', 0)
third_prize = data.get('third_prize', 0)
# 需要邮寄???????存疑
# post = data['need_post']
dyn_raffle_status = DynRaffleStatus(
dyn_id=dyn_id,
doc_id=doc_id,
describe=describe,
uid=uid,
post_time=post_time,
lottery_time=lottery_time,
at_num=at_num,
feed_limit=feed_limit,
handle_status=handle_status,
prize_cmt_1st=first_prize_cmt,
prize_cmt_2nd=second_prize_cmt,
prize_cmt_3rd=third_prize_cmt,
prize_num_1st=first_prize,
prize_num_2nd=second_prize,
prize_num_3rd=third_prize
)
print('获取到的抽奖信息为', dyn_raffle_status)
return 0, dyn_raffle_status
elif code == -9999:
print(f'抽奖动态{doc_id}已经删除')
return 404, None
user.warn(f'互动抽奖初步查询 {json_rsp}')
return -1, None
@staticmethod
async def fetch_dyn_raffle_results(
user, dyn_raffle_status: DynRaffleStatus) -> Optional[DynRaffleResults]:
json_rsp = await user.req_s(DynRaffleHandlerReq.fetch_dyn_raffle, user, dyn_raffle_status.doc_id)
code = json_rsp['code']
if not code:
print('check raffle_status', json_rsp)
data = json_rsp['data']
if 'lottery_result' not in data:
return None
lottery_result = data['lottery_result']
first_prize_result = lottery_result['first_prize_result']
second_prize_result = lottery_result.get('second_prize_result', [])
third_prize_result = lottery_result.get('third_prize_result', [])
list_first_prize_result = [int(lucky_dog['uid']) for lucky_dog in first_prize_result]
list_second_prize_result = [int(lucky_dog['uid']) for lucky_dog in second_prize_result]
list_third_prize_result = [int(lucky_dog['uid']) for lucky_dog in third_prize_result]
dyn_raffle_results = DynRaffleResults(
dyn_id=dyn_raffle_status.dyn_id,
doc_id=dyn_raffle_status.doc_id,
describe=dyn_raffle_status.describe,
uid=dyn_raffle_status.uid,
post_time=dyn_raffle_status.post_time,
lottery_time=dyn_raffle_status.lottery_time,
prize_cmt_1st=dyn_raffle_status.prize_cmt_1st,
prize_cmt_2nd=dyn_raffle_status.prize_cmt_2nd,
prize_cmt_3rd=dyn_raffle_status.prize_cmt_3rd,
prize_list_1st=list_first_prize_result,
prize_list_2nd=list_second_prize_result,
prize_list_3rd=list_third_prize_result
)
print('获取到的抽奖信息为', dyn_raffle_results)
return dyn_raffle_results
elif code == -9999:
print(f'抽奖动态{dyn_raffle_status.doc_id}已经删除')
return None
@staticmethod
async def check(user, doc_id: int):
# 确认dyn存在性
json_rsp = await user.req_s(DynRaffleHandlerReq.fetch_dyn_raffle, user, doc_id)
code = json_rsp['code']
if not code:
return True
user.info(f'{doc_id}的动态抽奖不存在')
return False
class DynRaffleJoinTask(Forced, Wait, Multi):
TASK_NAME = 'join_dyn_raffle'
@staticmethod
async def check(_, *args):
return (-2, None, *args), # 参见notifier的特殊处理,为None就会依次处理,整个过程awaitable
@staticmethod
async def follow_raffle_organizer(user, uid):
is_following, group_ids = await UtilsTask.check_follow(user, uid)
if is_following:
print('已经关注,不再处理')
return
print('未关注,即将弄到抽奖分组')
await UtilsTask.follow_user(user, uid)
group_id = await UtilsTask.fetch_group_id(user, '抽奖关注')
await UtilsTask.move2follow_group(user, uid, group_id)
return
@staticmethod
async def repost_dyn_raffle(user, orig_dynid, at_num):
if len(user.dyn_lottery_friends) < at_num:
return False
print('开始转发动态: ', orig_dynid)
at_users = [(str(uid), name) for uid, name in random.sample(user.dyn_lottery_friends, at_num)]
location = 0
ctrl = []
content = ''
for uid, name in at_users:
ulength = len(name)
ctrl_piece = {
'data': uid,
'location': location,
'length': ulength + 1, # 算上at符号
'type': 1,
}
ctrl.append(ctrl_piece)
location += ulength + 1 + 1 # 空格
# 1个空格隔开
content += f'@{name} '
message = ["emmmm...", "中奖吧!", "啊~~", "抽奖玩", "拉低中奖率2333", "反正先转了再说", "先转为敬", "大佬大佬抽奖带我.jpg", "我是非酋", "欧皇驾到"]
content += random.choice(message)
at_uids = ','.join([uid for uid, _ in at_users])
str_ctrl = json.dumps(ctrl)
json_rsp = await user.req_s(DynRaffleHandlerReq.repost_dyn, user, orig_dynid, content, at_uids, str_ctrl)
data = json_rsp['data']
print(json_rsp)
return not json_rsp['code'] and data['errmsg'] == '符合条件,允许发布'
@staticmethod
async def fetch_reposted_dynid(user, uid, orig_dynid):
offset = 0
while True:
json_rsp = await user.req_s(DynRaffleHandlerReq.fetch_dyns, user, uid, offset)
if 'cards' not in json_rsp['data']:
return None
cards = json_rsp['data']['cards']
assert cards
for dyn in cards:
desc = dyn['desc']
print(desc['orig_dy_id'], desc['dynamic_id'])
if int(orig_dynid) == int(desc['orig_dy_id']):
return int(desc['dynamic_id'])
offset = cards[-1]['desc']['dynamic_id']
@staticmethod
async def work(user, dyn_raffle_status: DynRaffleStatus):
if dyn_raffle_status.lottery_time - utils.curr_time() < 15:
user.info(f'动态{dyn_raffle_status.dyn_id}马上或已经开奖,放弃参与')
async with user.repost_del_lock:
if dyn_raffle_status.feed_limit: # 关注
await DynRaffleJoinTask.follow_raffle_organizer(user, dyn_raffle_status.uid)
# 创建动态并提交数据库
if await DynRaffleJoinTask.repost_dyn_raffle(user, dyn_raffle_status.dyn_id, dyn_raffle_status.at_num):
user.info(f'转发参与动态{dyn_raffle_status.dyn_id}成功')
for i in range(5): # 经常不能及时刷新
await asyncio.sleep(3)
dyn_id = await DynRaffleJoinTask.fetch_reposted_dynid(
user, user.dict_bili['uid'], dyn_raffle_status.dyn_id)
if dyn_id is not None:
user.info(f'查找转发动态{dyn_raffle_status.dyn_id}生成{dyn_id}')
dyn_raffle_joined = DynRaffleJoined(
dyn_id=dyn_id, orig_dynid=dyn_raffle_status.dyn_id, uid=user.dict_bili['uid'])
print(dyn_raffle_joined)
dyn_raffle_sql.insert_dynraffle_joined_table(dyn_raffle_joined)
return
user.warn(f'查找转发动态{dyn_raffle_status.dyn_id}生成失败')
else:
user.warn(f'转发参与动态{dyn_raffle_status.dyn_id}失败')
return
class DynRaffleNoticeTask(Forced, Wait, Multi):
TASK_NAME = 'null'
@staticmethod
async def check(_, *args):
return (-2, None, *args),
@staticmethod
async def unfollow_raffle_organizer(user, uid):
user.info(f'正在处理动态抽奖的取关问题')
group_id = await UtilsTask.fetch_group_id(user, '抽奖关注')
is_following, group_ids = await UtilsTask.check_follow(user, uid)
if group_id in group_ids:
await UtilsTask.unfollow(user, uid)
@staticmethod
async def work(user, dyn_raffle_status: DynRaffleStatus, dyn_raffle_results: Optional[DynRaffleResults]):
int_user_uid = int(user.dict_bili['uid'])
async with user.repost_del_lock:
dyn_raffle_joined = dyn_raffle_sql.select_by_primary_key_from_dynraffle_joined_table(
uid=int_user_uid, orig_dynid=dyn_raffle_status.dyn_id)
if dyn_raffle_joined is None:
user.info('未从数据库中查阅到动态抽奖,可能是之前已经删除了')
elif dyn_raffle_results is None or \
int_user_uid not in dyn_raffle_results.prize_list_1st and \
int_user_uid not in dyn_raffle_results.prize_list_2nd and \
int_user_uid not in dyn_raffle_results.prize_list_3rd:
# 删除动态,并且同步数据库
await DynRaffleUtilsTask.del_dyn_by_dynid(user, dyn_raffle_joined.dyn_id)
dyn_raffle_sql.del_from_dynraffle_joind_table(
uid=int_user_uid,
orig_dynid=dyn_raffle_status.dyn_id
)
# 如果本抽奖需要关注且up主的其他抽奖不再需要关注/up主不再有其他抽奖,就运行unfollow_raffle_organizer
if dyn_raffle_status.feed_limit and dyn_raffle_sql.should_unfollowed(
uid=int_user_uid, orig_uid=dyn_raffle_status.uid):
await DynRaffleNoticeTask.unfollow_raffle_organizer(user, dyn_raffle_status.uid)
else:
dyn_raffle_sql.del_from_dynraffle_joind_table(
uid=int_user_uid,
orig_dynid=dyn_raffle_status.dyn_id
)
following_uid = dyn_raffle_status.uid if dyn_raffle_status.feed_limit else 0
dyn_raffle_sql.insert_dynraffle_luckydog_table(DynRaffleLuckydog(
uid=dyn_raffle_joined.uid,
dyn_id=dyn_raffle_joined.dyn_id,
orig_dynid=dyn_raffle_joined.orig_dynid,
following_uid=following_uid
))
| 43.148256
| 117
| 0.571919
|
37938d7270a76bf648bf31c94cc51e602ce01f5d
| 1,059
|
gyp
|
Python
|
Dependencies/gyp-master/test/mac/xctest/test.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/mac/xctest/test.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/mac/xctest/test.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'classes',
'type': 'static_library',
'sources': [
'MyClass.h',
'MyClass.m',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
},
{
'target_name': 'tests',
'type': 'loadable_module',
'mac_xctest_bundle': 1,
'sources': [
'TestCase.m',
],
'dependencies': [
'classes',
],
'mac_bundle_resources': [
'resource.txt',
],
'xcode_settings': {
'WRAPPER_EXTENSION': 'xctest',
'FRAMEWORK_SEARCH_PATHS': [
'$(inherited)',
'$(DEVELOPER_FRAMEWORKS_DIR)',
],
'OTHER_LDFLAGS': [
'$(inherited)',
'-ObjC',
],
},
},
],
}
| 22.0625
| 73
| 0.463645
|
9a2cfb723ea5ef1b54e0b9cf71b50a2d748b0888
| 127,692
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/storage/_params.py
|
phcooper/azure-cli
|
1e7f7962df34f3104db4f2eefdcbdfe46ccd4bc1
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/storage/_params.py
|
phcooper/azure-cli
|
1e7f7962df34f3104db4f2eefdcbdfe46ccd4bc1
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/storage/_params.py
|
phcooper/azure-cli
|
1e7f7962df34f3104db4f2eefdcbdfe46ccd4bc1
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, get_enum_type,
get_three_state_flag, edge_zone_type)
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string,
resource_type_type, services_type, validate_entity, validate_select, validate_blob_type,
validate_included_datasets_validator, validate_custom_domain,
validate_container_public_access,
validate_table_payload_format, add_progress_callback, process_resource_group,
storage_account_key_options, process_file_download_namespace, process_metric_update_namespace,
get_char_options_validator, validate_bypass, validate_encryption_source, validate_marker,
validate_storage_data_plane_list, validate_azcopy_upload_destination_url,
validate_azcopy_remove_arguments, as_user_validator, parse_storage_account,
validate_delete_retention_days, validate_container_delete_retention_days,
validate_file_delete_retention_days, validator_change_feed_retention_days,
validate_fs_public_access, validate_logging_version, validate_or_policy, validate_policy,
get_api_version_type, blob_download_file_path_validator, blob_tier_validator, validate_subnet)
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines, too-many-branches
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Azure Files Identity Based Authentication',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
arg_group='Azure Files Identity Based Authentication',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
num_results_type = CLIArgumentType(
default=5000, validator=validate_storage_data_plane_list,
help='Specify the maximum number to return. If the request does not specify '
'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that '
'if the listing operation crosses a partition boundary, then the service will return a continuation token '
'for retrieving the remaining of the results. Provide "*" to return all.'
)
if_modified_since_type = CLIArgumentType(
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
if_unmodified_since_type = CLIArgumentType(
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
allow_shared_key_access_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-shared-key-access', '-k'], min_api='2019-04-01',
help='Indicate whether the storage account permits requests to be authorized with the account access key via '
'Shared Key. If false, then all requests, including shared access signatures, must be authorized with '
'Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.')
sas_expiration_period_type = CLIArgumentType(
options_list=['--sas-expiration-period', '--sas-exp'], min_api='2021-02-01',
help='Expiration period of the SAS Policy assigned to the storage account, DD.HH:MM:SS.'
)
key_expiration_period_in_days_type = CLIArgumentType(
options_list=['--key-expiration-period-in-days', '--key-exp-days'], min_api='2021-02-01', type=int,
help='Expiration period in days of the Key Policy assigned to the storage account'
)
allow_cross_tenant_replication_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-cross-tenant-replication', '-r'], min_api='2021-04-01',
help='Allow or disallow cross AAD tenant object replication. The default interpretation is true for this '
'property.')
default_share_permission_type = CLIArgumentType(
options_list=['--default-share-permission', '-d'],
arg_type=get_enum_type(['None', 'StorageFileDataSmbShareContributor',
'StorageFileDataSmbShareElevatedContributor',
'StorageFileDataSmbShareReader']),
min_api='2020-08-01-preview',
arg_group='Azure Files Identity Based Authentication',
help='Default share permission for users using Kerberos authentication if RBAC role is not assigned.')
t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional',
resource_type=ResourceType.DATA_STORAGE_BLOB)
t_rehydrate_priority = self.get_sdk('_generated.models._azure_blob_storage_enums#RehydratePriority',
resource_type=ResourceType.DATA_STORAGE_BLOB)
tier_type = CLIArgumentType(
arg_type=get_enum_type(t_blob_tier), min_api='2019-02-02',
help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob '
'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 '
'and this is only applicable to page blobs on premium storage accounts; For block blob, possible '
'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.'
)
rehydrate_priority_type = CLIArgumentType(
arg_type=get_enum_type(t_rehydrate_priority), options_list=('--rehydrate-priority', '-r'),
min_api='2019-02-02',
help='Indicate the priority with which to rehydrate an archived blob.')
action_type = CLIArgumentType(
help='The action of virtual network rule. Possible value is Allow.'
)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since', if_modified_since_type)
c.argument('if_unmodified_since', if_unmodified_since_type)
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']: # pylint: disable=line-too-long
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-inventory-policy') as c:
c.ignore('blob_inventory_policy_name')
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('account_name',
help='The name of the storage account within the specified resource group. Storage account names '
'must be between 3 and 24 characters in length and use numbers and lower-case letters only.')
with self.argument_context('storage account blob-inventory-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account Blob Inventory Policy, string in JSON format or json file path. See more '
'details in https://review.docs.microsoft.com/en-us/azure/storage/blobs/blob-inventory#'
'inventory-policy.')
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'],
help='The name of the storage account within the specified resource group')
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind, t_tls_version = \
self.get_models('AccountType', 'SkuName', 'Kind', 'MinimumTlsVersion',
resource_type=ResourceType.MGMT_STORAGE)
t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicate the type of storage account.',
arg_type=get_enum_type(t_kind),
default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns',
c.deprecate(target='--hierarchical-namespace', redirect='--hns', hide=True)],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(),
help='A boolean indicating whether or not the service applies a secondary layer of encryption with '
'platform managed keys for data at rest.')
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
c.argument('edge_zone', edge_zone_type, min_api='2020-08-01-preview')
c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity',
help='The identity type.')
c.argument('user_identity_id', arg_group='Identity',
help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is '
'permitted here.')
c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True)
c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True)
c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type)
c.argument('default_share_permission', default_share_permission_type)
c.argument('enable_nfs_v3', arg_type=get_three_state_flag(), is_preview=True, min_api='2021-01-01',
help='NFS 3.0 protocol support enabled if sets to true.')
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
t_tls_version = self.get_models('MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE)
t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('sku', arg_type=get_enum_type(t_sku_name),
help='Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, '
'nor can accounts of those SKU names be updated to any other value')
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity',
help='The identity type.')
c.argument('user_identity_id', arg_group='Identity',
help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is '
'permitted here.')
c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True)
c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True)
c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type)
c.argument('default_share_permission', default_share_permission_type)
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, arg_group='Customer managed key', min_api='2017-06-01',
resource_type=ResourceType.MGMT_STORAGE) as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
c.argument('key_vault_user_identity_id', options_list=['--key-vault-user-identity-id', '-u'],
min_api='2021-01-01',
help='Resource identifier of the UserAssigned identity to be associated with server-side '
'encryption on the storage account.')
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', action_type)
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(), min_api='2021-01-01',
help='A boolean indicating whether or not the service applies a secondary layer of encryption '
'with platform managed keys for data at rest.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
for item in ['create', 'update', 'show', 'delete']:
with self.argument_context('storage account management-policy {}'.format(item)) as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', action_type)
c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Change Feed Policy')
c.argument('change_feed_retention_days', is_preview=True,
options_list=['--change-feed-retention-days', '--change-feed-days'],
type=int, min_api='2019-06-01', arg_group='Change Feed Policy',
validator=validator_change_feed_retention_days,
help='Indicate the duration of changeFeed retention in days. '
'Minimum value is 1 day and maximum value is 146000 days (400 years). '
'A null value indicates an infinite retention of the change feed.'
'(Use `--enable-change-feed` without `--change-feed-days` to indicate null)')
c.argument('enable_container_delete_retention',
arg_type=get_three_state_flag(),
options_list=['--enable-container-delete-retention', '--container-retention'],
arg_group='Container Delete Retention Policy', min_api='2019-06-01',
help='Enable container delete retention policy for container soft delete when set to true. '
'Disable container delete retention policy when set to false.')
c.argument('container_delete_retention_days',
options_list=['--container-delete-retention-days', '--container-days'],
type=int, arg_group='Container Delete Retention Policy',
min_api='2019-06-01', validator=validate_container_delete_retention_days,
help='Indicate the number of days that the deleted container should be retained. The minimum '
'specified value can be 1 and the maximum value can be 365.')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
c.argument('default_service_version', options_list=['--default-service-version', '-d'],
type=get_api_version_type(), min_api='2018-07-01',
help="Indicate the default version to use for requests to the Blob service if an incoming request's "
"version is not specified.")
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_file_delete_retention_days, min_api='2019-06-01',
help='Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
c.argument('enable_smb_multichannel', options_list=['--enable-smb-multichannel', '--mc'],
arg_type=get_three_state_flag(), min_api='2020-08-01-preview', arg_group='SMB Setting',
help='Set SMB Multichannel setting for file service. Applies to Premium FileStorage only.')
c.argument('versions', arg_group='SMB Setting', min_api='2020-08-01-preview',
help="SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, "
"SMB3.1.1. Should be passed as a string with delimiter ';'.")
c.argument('authentication_methods', options_list='--auth-methods', arg_group='SMB Setting',
min_api='2020-08-01-preview',
help="SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. "
"Should be passed as a string with delimiter ';'.")
c.argument('kerberos_ticket_encryption', options_list=['--kerb-ticket-encryption', '-k'],
arg_group='SMB Setting', min_api='2020-08-01-preview',
help="Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. "
"Should be passed as a string with delimiter ';'.")
c.argument('channel_encryption', arg_group='SMB Setting', min_api='2020-08-01-preview',
help="SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, "
"AES-256-GCM. Should be passed as a string with delimiter ';' ")
with self.argument_context('storage account generate-sas') as c:
t_account_permissions = self.get_sdk('common.models#AccountPermissions')
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'])
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
or_policy_type = CLIArgumentType(
options_list=['--policy', '-p'],
help='The object replication policy definition between two storage accounts, in JSON format. '
'Multiple rules can be defined in one policy.'
)
policy_id_type = CLIArgumentType(
options_list=['--policy-id'],
help='The ID of object replication policy or "default" if the policy ID is unknown. Policy Id will be '
'auto-generated when setting on destination account. Required when setting on source account.'
)
rule_id_type = CLIArgumentType(
options_list=['--rule-id', '-r'],
help='Rule Id is auto-generated for each new rule on destination account. It is required '
'for put policy on source account.'
)
prefix_math_type = CLIArgumentType(
nargs='+', arg_group='Filters', options_list=['--prefix-match', '--prefix'],
help='Optional. Filter the results to replicate only blobs whose names begin with the specified '
'prefix.'
)
min_creation_time_type = CLIArgumentType(
options_list=['--min-creation-time', '-t'], arg_group='Filters', type=get_datetime_type(True),
help="Blobs created after the time will be replicated to the destination. It must be in datetime format "
"'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z")
with self.argument_context('storage account or-policy') as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('object_replication_policy_id', policy_id_type)
c.argument('policy_id', policy_id_type)
c.argument('source_account', options_list=['--source-account', '-s'],
help='The source storage account name or resource Id. Required when no --policy provided.')
c.argument('destination_account', options_list=['--destination-account', '-d'],
help='The destination storage account name or resource Id. Apply --account-name value as '
'destination account when there is no destination account provided in --policy and '
'--destination-account.')
c.argument('properties', or_policy_type)
c.argument('prefix_match', prefix_math_type)
c.argument('min_creation_time', min_creation_time_type)
for item in ['create', 'update']:
with self.argument_context('storage account or-policy {}'.format(item),
arg_group="Object Replication Policy Rule") as c:
c.argument('rule_id', help='Rule Id is auto-generated for each new rule on destination account. It is '
'required for put policy on source account.')
c.argument('source_container', options_list=['--source-container', '--scont'],
help='The source storage container name. Required when no --policy provided.')
c.argument('destination_container', options_list=['--destination-container', '--dcont'],
help='The destination storage container name. Required when no --policy provided.')
with self.argument_context('storage account or-policy create') as c:
c.argument('properties', or_policy_type, validator=validate_or_policy)
with self.argument_context('storage account or-policy rule') as c:
c.argument('policy_id', policy_id_type)
c.argument('source_container', options_list=['--source-container', '-s'],
help='The source storage container name.')
c.argument('destination_container', options_list=['--destination-container', '-d'],
help='The destination storage container name.')
c.argument('rule_id', rule_id_type)
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be prepended to the blob name.')
with self.argument_context('storage blob list') as c:
from ._validators import get_include_help_string
t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem',
resource_type=ResourceType.DATA_STORAGE_BLOB)
c.register_container_arguments()
c.argument('delimiter',
help='When the request includes this parameter, the operation returns a BlobPrefix element in the '
'result list that acts as a placeholder for all blobs whose names begin with the same substring '
'up to the appearance of the delimiter character. The delimiter may be a single character or a '
'string.')
c.argument('include', help="Specify one or more additional datasets to include in the response. "
"Options include: {}. Can be combined.".format(get_include_help_string(t_blob_include)),
validator=validate_included_datasets_validator(include_class=t_blob_include))
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix',
help='Filter the results to return only blobs whose name begins with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models#BlobPermissions')
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed '
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is '
'accessed using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed '
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed '
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed '
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob rewrite', resource_type=ResourceType.DATA_STORAGE_BLOB,
min_api='2020-04-08') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.argument('source_url', options_list=['--source-uri', '-u'],
help='A URL of up to 2 KB in length that specifies a file or blob. The value should be URL-encoded '
'as it would appear in a request URI. If the source is in another account, the source must either '
'be public or must be authenticated via a shared access signature. If the source is public, no '
'authentication is required.')
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease. Value can be a BlobLeaseClient object '
'or the lease ID as a string.')
c.extra('standard_blob_tier', arg_type=get_enum_type(t_blob_tier), options_list='--tier',
help='A standard blob tier value to set the blob to. For this version of the library, '
'this is only applicable to block blobs on standard storage accounts.')
c.extra('encryption_scope',
help='A predefined encryption scope used to encrypt the data on the service. An encryption scope '
'can be created using the Management API and referenced here by name. If a default encryption scope '
'has been defined at the container, this value will override it if the container-level scope is '
'configured to allow overrides. Otherwise an error will be raised.')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of '
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_rehydrate_priority_validator)
c.register_blob_arguments()
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator,
is_preview=True, help="Indicate the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.extra('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('lease_id', help='Required if the blob has an active lease.')
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.register_blob_arguments()
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
# TODO: Remove once #807 is complete. Smart Create Generation requires this parameter.
# register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',),
# help=argparse.SUPPRESS)
c.argument('tier', validator=page_blob_tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type,
completer=FilesCompleter(), validator=blob_download_file_path_validator)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage blob lease acquire') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. '
'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.')
c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for '
'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease '
'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int)
with self.argument_context('storage blob lease break') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.argument('lease_break_period', type=int,
help="This is the proposed duration of seconds that the lease should continue before it is broken, "
"between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "
"on the lease. If longer, the time remaining on the lease is used. A new lease will not be "
"available before the break period has expired, but the lease may be held for longer than the break "
"period. If this header does not appear with a break operation, a fixed-duration lease breaks after "
"the remaining lease period elapses, and an infinite lease breaks immediately.")
with self.argument_context('storage blob lease change') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 '
'(Invalid request) if the proposed lease ID is not in the correct format.', required=True)
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
for item in ['release', 'renew']:
with self.argument_context('storage blob lease {}'.format(item)) as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
with self.argument_context('storage copy') as c:
c.argument('destination',
options_list=['--destination', '-d',
c.deprecate(target='--destination-local-path', redirect='--destination')],
help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.", required=False)
c.argument('source',
options_list=['--source', '-s',
c.deprecate(target='--source-local-path', redirect='--source')],
help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.", required=False)
for item in ['destination', 'source']:
c.extra('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.extra('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.extra('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.extra('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('account_name', acct_name_type, arg_group='Storage Account', id_part=None,
options_list=['--account-name',
c.deprecate(target='--destination-account-name', redirect='--account-name')],
help='Storage account name of copy destination')
c.extra('source_account_name', arg_group='Copy source',
help='Account name of copy source storage account.')
c.extra('source_account_key', arg_group='Copy source',
help='Account key of copy source storage account. Must be used in conjunction with source storage '
'account name.')
c.extra('source_connection_string', arg_group='Copy source',
options_list=['--source-connection-string', '--src-conn'],
help='Connection string of source storage account.')
c.extra('source_sas', arg_group='Copy source',
help='Shared Access Signature (SAS) token of copy source. Must be used in conjunction with source '
'storage account name.')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition',
arg_type=if_unmodified_since_type)
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start', resource_type=ResourceType.DATA_STORAGE_BLOB) as c:
from ._validators import validate_source_url
c.register_blob_arguments()
c.register_precondition_options()
c.register_precondition_options(prefix='source_')
c.register_source_uri_arguments(validator=validate_source_url)
c.ignore('incremental_copy')
c.argument('if_match', options_list=['--destination-if-match'])
c.argument('if_modified_since', options_list=['--destination-if-modified-since'])
c.argument('if_none_match', options_list=['--destination-if-none-match'])
c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since'])
c.argument('if_tags_match_condition', options_list=['--destination-tags-condition'])
c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True,
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('container_name', options_list=['--destination-container', '-c'], required=True,
help='The container name.')
c.extra('destination_lease', options_list='--destination-lease-id',
help='The lease ID specified for this header must match the lease ID of the estination blob. '
'If the request does not include the lease ID or it is not valid, the operation fails with status '
'code 412 (Precondition Failed).')
c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source',
help='Specify this to perform the Copy Blob operation only if the lease ID given matches the '
'active lease ID of the source blob.')
c.extra('rehydrate_priority', rehydrate_priority_type)
c.extra('requires_sync', arg_type=get_three_state_flag(),
help='Enforce that the service will not return a response until the copy is complete.')
c.extra('tier', tier_type)
c.extra('tags', tags_type)
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('destination_if_unmodified_since', arg_group='Pre-condition', arg_type=if_unmodified_since_type)
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob query') as c:
from ._validators import validate_text_configuration
c.register_blob_arguments()
c.register_precondition_options()
line_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
column_separator = CLIArgumentType(help="The string used to separate columns.", default=',')
quote_char = CLIArgumentType(help="The string used to quote a specific field.", default='"')
record_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
escape_char = CLIArgumentType(help="The string used as an escape character. Default to empty.", default="")
has_header = CLIArgumentType(
arg_type=get_three_state_flag(),
help="Whether the blob data includes headers in the first line. "
"The default value is False, meaning that the data will be returned inclusive of the first line. "
"If set to True, the data will be returned exclusive of the first line.", default=False)
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease.')
c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression '
'is 256KiB. For more information about the expression syntax, please see '
'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference')
c.extra('input_format', arg_type=get_enum_type(['csv', 'json']), validator=validate_text_configuration,
help='Serialization type of the data currently stored in the blob. '
'The default is to treat the blob data as CSV data formatted in the default dialect.'
'The blob data will be reformatted according to that profile when blob format is specified. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('output_format', arg_type=get_enum_type(['csv', 'json']),
help='Output serialization type for the data stream. '
'By default the data will be returned as it is represented in the blob. '
'By providing an output format, the blob data will be reformatted according to that profile. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('in_line_separator',
arg_group='Input Json Text Configuration',
arg_type=line_separator)
c.extra('in_column_separator', arg_group='Input Delimited Text Configuration',
arg_type=column_separator)
c.extra('in_quote_char', arg_group='Input Delimited Text Configuration',
arg_type=quote_char)
c.extra('in_record_separator', arg_group='Input Delimited Text Configuration',
arg_type=record_separator)
c.extra('in_escape_char', arg_group='Input Delimited Text Configuration',
arg_type=escape_char)
c.extra('in_has_header', arg_group='Input Delimited Text Configuration',
arg_type=has_header)
c.extra('out_line_separator',
arg_group='Output Json Text Configuration',
arg_type=line_separator)
c.extra('out_column_separator', arg_group='Output Delimited Text Configuration',
arg_type=column_separator)
c.extra('out_quote_char', arg_group='Output Delimited Text Configuration',
arg_type=quote_char)
c.extra('out_record_separator', arg_group='Output Delimited Text Configuration',
arg_type=record_separator)
c.extra('out_escape_char', arg_group='Output Delimited Text Configuration',
arg_type=escape_char)
c.extra('out_has_header', arg_group='Output Delimited Text Configuration',
arg_type=has_header)
c.extra('result_file', help='Specify the file path to save result.')
c.ignore('input_config')
c.ignore('output_config')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
for item in ['create', 'extend']:
with self.argument_context('storage container immutability-policy {}'.format(item)) as c:
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.extra('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag(), help='This property can only be changed for unlocked time-based '
'retention policies. When enabled, new blocks can be '
'written to an append blob while maintaining immutability '
'protection and compliance. Only new blocks can be added '
'and any existing blocks cannot be modified or deleted. '
'This property cannot be changed with '
'ExtendImmutabilityPolicy API.')
c.extra('period', type=int, help='The immutability period for the blobs in the container since the policy '
'creation, in days.')
c.ignore('parameters')
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('tags', nargs='+',
help='Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized '
'to lower case')
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed '
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is '
'accessed using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed '
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed '
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed '
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage container-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('account_name', storage_account_type)
c.argument('resource_group_name', required=False)
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specify whether data in the container may be accessed publicly.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage container-rm create', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
for item in ['create', 'update']:
with self.argument_context('storage container-rm {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', min_api='2019-06-01',
help='Default the container to use specified encryption scope for all writes.')
c.argument('deny_encryption_scope_override',
options_list=['--deny-encryption-scope-override', '--deny-override'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', min_api='2019-06-01',
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted containers when specified.')
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
with self.argument_context('storage share-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('expand', default=None)
c.argument('x_ms_snapshot', options_list=['--snapshot'], is_preview=True,
help='The DateTime value that specifies the share snapshot to retrieve.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage share-rm delete', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('include', default='none')
with self.argument_context('storage share-rm update', resource_type=ResourceType.MGMT_STORAGE) as c:
c.ignore('x_ms_snapshot')
for item in ['create', 'update', 'snapshot']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
t_enabled_protocols, t_root_squash, t_access_tier = \
self.get_models('EnabledProtocols', 'RootSquashType', 'ShareAccessTier',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('share_quota', type=int, options_list=['--quota', '-q'],
help='The maximum size of the share in gigabytes. Must be greater than 0, and less than or '
'equal to 5TB (5120). For Large File Shares, the maximum size is 102400.')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.argument('enabled_protocols', arg_type=get_enum_type(t_enabled_protocols),
min_api='2019-06-01', help='Immutable property for file shares protocol. NFS protocol will be '
'only available for premium file shares (file shares in the FileStorage account type).')
c.argument('root_squash', arg_type=get_enum_type(t_root_squash),
min_api='2019-06-01', help='Reduction of the access rights for the remote superuser.')
c.argument('access_tier', arg_type=get_enum_type(t_access_tier), min_api='2019-06-01',
help='Access tier for specific share. GpV2 account can choose between TransactionOptimized '
'(default), Hot, and Cool. FileStorage account can choose Premium.')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted file shares when specified.')
c.argument('include_snapshot', action='store_true',
help='Include file share snapshots when specified.')
with self.argument_context('storage share-rm restore', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('deleted_version',
help='Identify the version of the deleted share that will be restored.')
c.argument('share_name',
help='The file share name. Identify the name of the deleted share that will be restored.')
c.argument('restored_name',
help='A new file share name to be restored. If not specified, deleted share name will be used.')
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue list') as c:
c.argument('include_metadata', help='Specify that queue metadata be returned in the response.')
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix', help='Filter the results to return only queues whose names '
'begin with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
c.extra('timeout', help='Request timeout in seconds. Apply to each call to the service.', type=int)
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
c.ignore('auth_mode')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs directory upload') as c:
from ._validators import validate_fs_directory_upload_destination_url
c.extra('destination_fs', options_list=['--file-system', '-f'], required=True,
help='The upload destination file system.')
c.extra('destination_path', options_list=['--destination-path', '-d'],
validator=validate_fs_directory_upload_destination_url,
help='The upload destination directory path. It should be an absolute path to file system. '
'If the specified destination path does not exist, a new directory path will be created.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to upload from.')
c.argument('recursive', recursive_type, help='Recursively upload files. If enabled, all the files '
'including the files in subdirectories will be uploaded.')
c.ignore('destination')
with self.argument_context('storage fs directory download') as c:
from ._validators import validate_fs_directory_download_source_url
c.extra('source_fs', options_list=['--file-system', '-f'], required=True,
help='The download source file system.')
c.extra('source_path', options_list=['--source-path', '-s'],
validator=validate_fs_directory_download_source_url,
help='The download source directory path. It should be an absolute path to file system.')
c.argument('destination', options_list=['--destination-path', '-d'],
help='The destination local directory path to download.')
c.argument('recursive', recursive_type, help='Recursively download files. If enabled, all the files '
'including the files in subdirectories will be downloaded.')
c.ignore('source')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
c.argument('show_next_marker', action='store_true', is_preview=True,
help='Show nextMarker in result when specified.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
for item in ['set-recursive', 'update-recursive', 'remove-recursive']:
with self.argument_context('storage fs access {}'.format(item)) as c:
c.register_fs_directory_arguments()
c.argument('acl', help='The value is a comma-separated list of access control entries. Each access control '
'entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the '
'format "[scope:][type]:[id]:[permissions]". For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control.')
c.extra('continuation',
help='Optional continuation token that can be used to resume previously stopped operation.')
c.extra('batch_size', type=int, help='Optional. If data set size exceeds batch size then operation will '
'be split into multiple requests so that progress can be tracked. Batch size should be between 1 '
'and 2000. The default when unspecified is 2000.')
c.extra('max_batches', type=int, help='Optional. Define maximum number of batches that single change '
'Access Control operation can execute. If maximum is reached before all sub-paths are processed, '
'then continuation token can be used to resume operation. Empty value indicates that maximum '
'number of batches in unbound and operation continues till end.')
c.extra('continue_on_failure', arg_type=get_three_state_flag(),
help='If set to False, the operation will terminate quickly on encountering user errors (4XX). '
'If True, the operation will ignore user errors and proceed with the operation on other '
'sub-entities of the directory. Continuation token will only be returned when '
'--continue-on-failure is True in case of user errors. If not set the default value is False '
'for this.')
| 69.73894
| 166
| 0.655781
|
c5befa9da5a8cf19ba3858634560af95900c1a42
| 744
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/mongodbclient/MongoEngineClient.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/JumpscaleLibs/clients/mongodbclient/MongoEngineClient.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 117
|
2019-09-01T11:59:19.000Z
|
2020-07-14T11:10:08.000Z
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/mongodbclient/MongoEngineClient.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 2
|
2020-04-06T15:21:23.000Z
|
2020-05-07T04:29:53.000Z
|
from Jumpscale import j
try:
from mongoengine import connect
except:
j.builders.runtimes.python3.pip_package_install("mongoengine")
from mongoengine import connect
JSConfigClient = j.baseclasses.object_config
class MongoEngineClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.MongoEngine.client
name** = "default" (S)
host = "localhost" (S)
port = 27017 (ipport)
username = "" (S)
password_ = "" (S)
alias = "" (S)
db = "" (S)
authentication_source = "" (S)
authentication_mechanism = "" (S)
ssl = False (B)
replicaset = "" (S)
"""
def _init(self, **kwargs):
kwargs = {}
connect(**kwargs)
| 23.25
| 66
| 0.573925
|
9d4627ce2de9066dd559f5fda1cb9ddd23aaa316
| 7,193
|
py
|
Python
|
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/handwriting_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/handwriting_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/handwriting_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class HandwritingRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'image': 'str',
'url': 'str',
'quick_mode': 'bool',
'char_set': 'str',
'detect_direction': 'bool'
}
attribute_map = {
'image': 'image',
'url': 'url',
'quick_mode': 'quick_mode',
'char_set': 'char_set',
'detect_direction': 'detect_direction'
}
def __init__(self, image=None, url=None, quick_mode=None, char_set=None, detect_direction=None):
"""HandwritingRequestBody - a model defined in huaweicloud sdk"""
self._image = None
self._url = None
self._quick_mode = None
self._char_set = None
self._detect_direction = None
self.discriminator = None
if image is not None:
self.image = image
if url is not None:
self.url = url
if quick_mode is not None:
self.quick_mode = quick_mode
if char_set is not None:
self.char_set = char_set
if detect_direction is not None:
self.detect_direction = detect_direction
@property
def image(self):
"""Gets the image of this HandwritingRequestBody.
与url二选一 图像数据,base64编码,要求base64编码后大小不超过10MB。图片最小边不小于8px,最长边不超过8192px,支持JPEG、JPG、PNG、BMP、TIFF格式。 图片文件Base64编码字符串,点击[这里](https://support.huaweicloud.com/ocr_faq/ocr_01_0032.html)查看详细获取方式。
:return: The image of this HandwritingRequestBody.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this HandwritingRequestBody.
与url二选一 图像数据,base64编码,要求base64编码后大小不超过10MB。图片最小边不小于8px,最长边不超过8192px,支持JPEG、JPG、PNG、BMP、TIFF格式。 图片文件Base64编码字符串,点击[这里](https://support.huaweicloud.com/ocr_faq/ocr_01_0032.html)查看详细获取方式。
:param image: The image of this HandwritingRequestBody.
:type: str
"""
self._image = image
@property
def url(self):
"""Gets the url of this HandwritingRequestBody.
与image二选一 图片的URL路径,目前支持: - 公网http/https url - OBS提供的url,使用OBS数据需要进行授权。包括对服务授权、临时授权、匿名公开授权,详情参见[配置OBS访问权限](https://support.huaweicloud.com/api-ocr/ocr_03_0132.html)。 > 说明: - 接口响应时间依赖于图片的下载时间,如果图片下载时间过长,会返回接口调用失败。 - 请保证被检测图片所在的存储服务稳定可靠,推荐使用OBS服务存储图片数据。
:return: The url of this HandwritingRequestBody.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this HandwritingRequestBody.
与image二选一 图片的URL路径,目前支持: - 公网http/https url - OBS提供的url,使用OBS数据需要进行授权。包括对服务授权、临时授权、匿名公开授权,详情参见[配置OBS访问权限](https://support.huaweicloud.com/api-ocr/ocr_03_0132.html)。 > 说明: - 接口响应时间依赖于图片的下载时间,如果图片下载时间过长,会返回接口调用失败。 - 请保证被检测图片所在的存储服务稳定可靠,推荐使用OBS服务存储图片数据。
:param url: The url of this HandwritingRequestBody.
:type: str
"""
self._url = url
@property
def quick_mode(self):
"""Gets the quick_mode of this HandwritingRequestBody.
快速模式开关,针对单行文字图片(要求图片只包含一行文字,且文字区域占比超过50%),打开时可以更快返回识别内容。可选值包括: - true:打开快速模式; - false:关闭快速模式。 > 说明: - 未传入该参数时默认为false,即关闭快速模式
:return: The quick_mode of this HandwritingRequestBody.
:rtype: bool
"""
return self._quick_mode
@quick_mode.setter
def quick_mode(self, quick_mode):
"""Sets the quick_mode of this HandwritingRequestBody.
快速模式开关,针对单行文字图片(要求图片只包含一行文字,且文字区域占比超过50%),打开时可以更快返回识别内容。可选值包括: - true:打开快速模式; - false:关闭快速模式。 > 说明: - 未传入该参数时默认为false,即关闭快速模式
:param quick_mode: The quick_mode of this HandwritingRequestBody.
:type: bool
"""
self._quick_mode = quick_mode
@property
def char_set(self):
"""Gets the char_set of this HandwritingRequestBody.
字符集设置,用户可以根据实际需要限定输出字符集范围。可选值如下所示。 - \"digit\": 数字模式; - \"letter\": 大小写字母模式; - \"digit_letter\": 数字+字母模式; - \"general\": 数字+字母+中文模式; > 说明: - 未传入该参数时,默认为“general”模式。
:return: The char_set of this HandwritingRequestBody.
:rtype: str
"""
return self._char_set
@char_set.setter
def char_set(self, char_set):
"""Sets the char_set of this HandwritingRequestBody.
字符集设置,用户可以根据实际需要限定输出字符集范围。可选值如下所示。 - \"digit\": 数字模式; - \"letter\": 大小写字母模式; - \"digit_letter\": 数字+字母模式; - \"general\": 数字+字母+中文模式; > 说明: - 未传入该参数时,默认为“general”模式。
:param char_set: The char_set of this HandwritingRequestBody.
:type: str
"""
self._char_set = char_set
@property
def detect_direction(self):
"""Gets the detect_direction of this HandwritingRequestBody.
校正图片的倾斜角度开关,可选值如下所示。 - true:校正图片的倾斜角度; - false:不校正图片的倾斜角度。 > 说明: - 支持任意角度的校正,未传入该参数时默认为“false”。
:return: The detect_direction of this HandwritingRequestBody.
:rtype: bool
"""
return self._detect_direction
@detect_direction.setter
def detect_direction(self, detect_direction):
"""Sets the detect_direction of this HandwritingRequestBody.
校正图片的倾斜角度开关,可选值如下所示。 - true:校正图片的倾斜角度; - false:不校正图片的倾斜角度。 > 说明: - 支持任意角度的校正,未传入该参数时默认为“false”。
:param detect_direction: The detect_direction of this HandwritingRequestBody.
:type: bool
"""
self._detect_direction = detect_direction
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HandwritingRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.844749
| 265
| 0.608786
|
c225d3b438c7673828bafcc07c81943dedb7c2bd
| 6,951
|
py
|
Python
|
dump_percepnet.py
|
Ryuk17/PercepNet
|
94e91f1db242447593098afc1a844b822e154e09
|
[
"BSD-3-Clause"
] | 170
|
2020-11-03T08:00:43.000Z
|
2022-03-31T12:35:25.000Z
|
dump_percepnet.py
|
Ryuk17/PercepNet
|
94e91f1db242447593098afc1a844b822e154e09
|
[
"BSD-3-Clause"
] | 31
|
2020-11-23T05:13:03.000Z
|
2022-03-25T13:18:58.000Z
|
dump_percepnet.py
|
Ryuk17/PercepNet
|
94e91f1db242447593098afc1a844b822e154e09
|
[
"BSD-3-Clause"
] | 60
|
2020-11-03T08:00:49.000Z
|
2022-03-25T03:16:19.000Z
|
#!/usr/bin/python3
'''Copyright (c) 2017-2018 Mozilla
2020-2021 Seonghun Noh
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import torch
import sys
import rnn_train
from torch.nn import Sequential, GRU, Conv1d, Linear
import numpy as np
def printVector(f, vector, name, dtype='float'):
#torch.transpose(vector, 0, 1)
v = np.reshape(vector.detach().numpy(), (-1))
#print('static const float ', name, '[', len(v), '] = \n', file=f)
f.write('static const {} {}[{}] = {{\n '.format(dtype, name, len(v)))
for i in range(0, len(v)):
f.write('{}'.format(v[i]))
if (i!=len(v)-1):
f.write(',')
else:
break
if (i%8==7):
f.write("\n ")
else:
f.write(" ")
#print(v, file=f)
f.write('\n};\n\n')
return
def dump_sequential_module(self, f, name):
activation = self[1].__class__.__name__.upper()
self[0].dump_data(f,name,activation)
Sequential.dump_data = dump_sequential_module
def dump_linear_module(self, f, name, activation):
print("printing layer " + name)
weight = self.weight
bias = self.bias
#print("weight:", weight)
#activation = self[1].__class__.__name__.upper()
printVector(f, torch.transpose(weight, 0, 1), name + '_weights')
printVector(f, bias, name + '_bias')
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weight.shape[1], weight.shape[0], activation))
Linear.dump_data = dump_linear_module
def convert_gru_input_kernel(kernel):
kernel_r, kernel_z, kernel_h = np.vsplit(kernel, 3)
kernels = [kernel_z, kernel_r, kernel_h]
return torch.tensor(np.hstack([k.T for k in kernels]))
def convert_gru_recurrent_kernel(kernel):
kernel_r, kernel_z, kernel_h = np.vsplit(kernel, 3)
kernels = [kernel_z, kernel_r, kernel_h]
return torch.tensor(np.hstack([k.T for k in kernels]))
def convert_bias(bias):
bias = bias.reshape(2, 3, -1)
return torch.tensor(bias[:, [1, 0, 2], :].reshape(-1))
def dump_gru_module(self, f, name):
print("printing layer " + name )
weights = convert_gru_input_kernel(self.weight_ih_l0.detach().numpy())
recurrent_weights = convert_gru_recurrent_kernel(self.weight_hh_l0.detach().numpy())
bias = torch.cat((self.bias_ih_l0, self.bias_hh_l0))
bias = convert_bias(bias.detach().numpy())
printVector(f, weights, name + '_weights')
printVector(f, recurrent_weights, name + '_recurrent_weights')
printVector(f, bias, name + '_bias')
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after') and not self.reset_after:
reset_after = 0
else:
reset_after = 1
neurons = weights.shape[0]//3
#max_rnn_neurons = max(max_rnn_neurons, neurons)
print('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, weights.shape[0], weights.shape[1]//3, activation, reset_after))
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, weights.shape[0], weights.shape[1]//3, activation, reset_after))
#hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
#hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
#hf.write('extern const GRULayer {};\n\n'.format(name))
GRU.dump_data = dump_gru_module
def dump_conv1d_module(self, f, name, activation):
print("printing layer " + name )
weights = self.weight
printVector(f, weights.permute(2,1,0), name + '_weights')
printVector(f, self.bias, name + '_bias')
#activation = self.activation.__name__.upper()
#max_conv_inputs = max(max_conv_inputs, weights[0].shape[1]*weights[0].shape[0])
#warn! activation hard codedW
print('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weights.shape[1], weights.shape[2], weights.shape[0], activation))
f.write('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weights.shape[1], weights.shape[2], weights.shape[0], activation))
#hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[2]))
#hf.write('#define {}_STATE_SIZE ({}*{})\n'.format(name.upper(), weights[0].shape[1], (weights[0].shape[0]-1)))
#hf.write('#define {}_DELAY {}\n'.format(name.upper(), (weights[0].shape[0]-1)//2))
#hf.write('extern const Conv1DLayer {};\n\n'.format(name));
Conv1d.dump_data = dump_conv1d_module
if __name__ == '__main__':
model = rnn_train.PercepNet()
#model = (
model.load_state_dict(torch.load(sys.argv[1]))
if len(sys.argv) > 2:
cfile = sys.argv[2]
#hfile = sys.argv[3];
else:
cfile = 'src/nnet_data.cpp'
#hfile = 'nnet_data.h'
f = open(cfile, 'w')
#hf = open(hfile, 'w')
f.write('/*This file is automatically generated from a Pytorch model*/\n\n')
f.write('#ifdef HAVE_CONFIG_H\n#include "config.h"\n#endif\n\n#include "nnet.h"\n#include "nnet_data.h"\n\n')
for name, module in model.named_children():
module.dump_data(f, name)
f.write('extern const RNNModel percepnet_model_orig = {\n')
for name, module in model.named_children():
f.write(' &{},\n'.format(name))
f.write('};\n')
f.close()
print("done")
| 44.845161
| 130
| 0.651273
|
d20211d98a98fb41830c0890c55821e6b14a847a
| 12,742
|
py
|
Python
|
convert-scripts/office.py
|
RedcraneStudio/redcrane-engine
|
4da8a7caedd6da5ffb7eda7da8c9a42396273bf6
|
[
"BSD-3-Clause"
] | 5
|
2017-05-16T22:25:59.000Z
|
2020-02-10T20:19:35.000Z
|
convert-scripts/office.py
|
RedCraneStudio/redcrane-engine
|
4da8a7caedd6da5ffb7eda7da8c9a42396273bf6
|
[
"BSD-3-Clause"
] | 1
|
2017-10-13T00:35:56.000Z
|
2017-10-13T00:35:56.000Z
|
convert-scripts/office.py
|
RedCraneStudio/redcrane-engine
|
4da8a7caedd6da5ffb7eda7da8c9a42396273bf6
|
[
"BSD-3-Clause"
] | 2
|
2017-10-07T04:40:52.000Z
|
2021-09-27T05:59:05.000Z
|
# Copyright (C) 2016 Luke San Antonio Bialecki
# All rights reserved.
import sys
import json
import base64
import struct
import pdb
import numpy as np
RGB_FORMAT = 6407
RGB_ALPHA_FORMAT = 6408
SRGB_FORMAT = 0x8C40
SRGB_ALPHA_FORMAT = 0x8C42
TEXTURE_2D_TARGET = 3553
UNSIGNED_BYTE_TYPE = 5121
CLAMP_TO_EDGE = 33071
LINEAR_MIPMAP_LINEAR = 9987
LINEAR = 9729
OBJECTS_NAME = 'nodes'
MESHES_NAME = 'meshes'
ACCESSORS_NAME = 'accessors'
IMAGES_NAME = 'images'
TEXTURES_NAME = 'textures'
SAMPLERS_NAME = 'samplers'
MATERIALS_NAME = 'materials'
BUFFER_VIEWS_NAME = 'bufferViews'
BUFFERS_NAME = 'buffers'
LAMPS_NAME = 'lights'
BASE64_DATA_HEADER = 'data:text/plain;base64,'
def get_buffer_view(gltf, buffer_view_name):
# Get the buffer
buffer_view = gltf[BUFFER_VIEWS_NAME][buffer_view_name]
buf_offset = buffer_view['byteOffset']
buf_length = buffer_view['byteLength']
buffer = gltf[BUFFERS_NAME][buffer_view['buffer']]
# Handle embedded data
data = None
if BASE64_DATA_HEADER == buffer['uri'][:len(BASE64_DATA_HEADER)]:
data = base64.b64decode(buffer['uri'][23:])
return data[buf_offset:buf_offset + buf_length]
def set_fake_mesh(gltf, obj_name, vertices, indices):
# Build the transformation matrix of this mesh
node_mat = np.identity(4)
# For each node in our hiearchy, basically.
this_node = obj_name
while this_node != "":
# Build the total transformation matrix
obj = gltf[OBJECTS_NAME][this_node]
# Although the matrix is technically stored in column major order, numpy
# expects row-major so just role with it, inverting the order of all
# transformations.
local_mat = np.array(obj['matrix']).reshape((4,4))
node_mat = np.dot(node_mat, local_mat)
# Find this node's parent
parent_node = ""
for node_name, node_obj in gltf[OBJECTS_NAME].items():
if this_node in node_obj['children']:
# We have our parent
parent_node = node_name
this_node = parent_node
obj = gltf[OBJECTS_NAME][obj_name]
# Find the mesh that this node (sorta) contains.
mesh_name = obj['meshes'][0]
# Remove the mesh
old_mesh = gltf[MESHES_NAME].pop(mesh_name)
if len(old_mesh['primitives']) > 1:
raise RuntimeError(("Fake mesh {} must only have one primitive, does"
" the material have more than one"
" material?").format(mesh))
prim = old_mesh['primitives'][0]
old_vertices_name = prim['attributes']['POSITION']
old_indices_name = prim['indices']
# Remove normals
old_normals_name = prim['attributes'].get('NORMALS', '')
if old_normals_name != '':
gltf[ACCESSORS_NAME].pop(old_normals_name)
# Change accessor names
vertices_obj = gltf[ACCESSORS_NAME].pop(old_vertices_name)
indices_obj = gltf[ACCESSORS_NAME].pop(old_indices_name)
# Remove normals
gltf[ACCESSORS_NAME].update({vertices: vertices_obj})
gltf[ACCESSORS_NAME].update({indices: indices_obj})
offset = vertices_obj['byteOffset']
stride = vertices_obj['byteStride']
count = vertices_obj['count']
vertices_data_view = get_buffer_view(gltf, vertices_obj['bufferView'])
if vertices_data_view == None:
raise RuntimeError(('Failed to find vertices data for'
' mesh {}').format(mesh))
#pdb.set_trace()
vertices_data = vertices_data_view[offset:]
# TODO: We assume 3 four byte floats per position value
out_vertices_data = bytearray(count * 3 * 4)
for i in range(count):
# Get the vertex data
x, y, z = struct.unpack_from('<fff', vertices_data, i * stride)
# Transform
new_pt = np.dot(np.array([x, y, z, 1.0]), node_mat)
# Repack
struct.pack_into('<fff', out_vertices_data, i * 3 * 4, new_pt[0],
new_pt[1], new_pt[2])
# Make a new buffer for CPU data, make a new buffer view and finally have
# the new vertices accessor reference the buffer view.
buffer_name = vertices + '_buffer'
gltf[BUFFERS_NAME][buffer_name] = {
'byteLength': len(out_vertices_data),
'uri': BASE64_DATA_HEADER + base64.b64encode(out_vertices_data).decode('ascii')
}
buffer_view_name = vertices + '_buffer_view'
gltf[BUFFER_VIEWS_NAME][buffer_view_name] = {
'buffer': buffer_name,
'byteLength': len(out_vertices_data),
'byteOffset': 0
}
gltf[ACCESSORS_NAME][vertices]['bufferView'] = buffer_view_name
gltf[ACCESSORS_NAME][vertices]['byteOffset'] = 0
gltf[ACCESSORS_NAME][vertices]['byteStride'] = 3 * 4
# Remove the object
gltf[OBJECTS_NAME].pop(obj_name)
# And all references to it!
for node_name, node in gltf[OBJECTS_NAME].items():
if obj_name in node['children']:
node['children'].remove(obj_name)
def remove_unused_accessors(gltf, save_these = []):
used_accessors = []
for mesh_name, mesh in gltf[MESHES_NAME].items():
for prim in mesh['primitives']:
these_accessors = []
these_accessors.append(prim['indices'])
these_accessors.extend(prim['attributes'].values())
for access in these_accessors:
if access not in used_accessors:
used_accessors.append(access)
rm_accessors = []
for name, access in gltf[ACCESSORS_NAME].items():
if name not in used_accessors or name not in save_these:
rm_accessors.append(name)
for buf in rm_accessors:
del gltf[ACCESSORS_NAME][buf]
def remove_unused_buffer_views(gltf):
used_buffers = []
for name, accessor in gltf[ACCESSORS_NAME].items():
used_buffers.append(accessor['bufferView'])
rm_buffers = []
for buf_name, buf in gltf[BUFFER_VIEWS_NAME].items():
if buf_name not in used_buffers:
rm_buffers.append(buf_name)
for buf in rm_buffers:
del gltf[BUFFER_VIEWS_NAME][buf]
def remove_unused_buffers(gltf):
used_buffers = []
for name, buffer_view in gltf[BUFFER_VIEWS_NAME].items():
used_buffers.append(buffer_view['buffer'])
rm_buffers = []
for buf_name, buf in gltf[BUFFERS_NAME].items():
if buf_name not in used_buffers:
rm_buffers.append(buf_name)
for buf in rm_buffers:
del gltf[BUFFERS_NAME][buf]
def remove_unused_data(gltf, save_accessors = []):
remove_unused_accessors(gltf, save_accessors)
remove_unused_buffer_views(gltf)
remove_unused_buffers(gltf)
def add_textures(gltf, textures, sampler):
if TEXTURES_NAME not in gltf:
gltf[TEXTURES_NAME] = {}
for tex, image in textures.items():
tex_gltf = {}
tex_gltf['format'] = RGB_ALPHA_FORMAT
tex_gltf['internalFormat'] = SRGB_ALPHA_FORMAT
tex_gltf['sampler'] = sampler
tex_gltf['source'] = image
tex_gltf['target'] = TEXTURE_2D_TARGET
tex_gltf['type'] = UNSIGNED_BYTE_TYPE
gltf[TEXTURES_NAME].update({tex: tex_gltf})
def add_images(gltf, images, image_dir):
if IMAGES_NAME not in gltf:
gltf[IMAGES_NAME] = {}
for image, url in images.items():
image_gltf = {}
image_gltf['uri'] = image_dir + '/' + url
gltf[IMAGES_NAME].update({image: image_gltf})
def add_lightmap_sampler(gltf, name):
sampler = {}
sampler['magFilter'] = LINEAR
sampler['minFilter'] = LINEAR_MIPMAP_LINEAR
sampler['wrapS'] = CLAMP_TO_EDGE
sampler['wrapT'] = CLAMP_TO_EDGE
gltf[SAMPLERS_NAME][name] = sampler
def make_unique_materials(gltf, mesh):
for prim in mesh['primitives']:
# Copy the material
mat_name = prim['material']
material = gltf[MATERIALS_NAME][mat_name]
# Add a new material with a new name
new_name = ''
for i in range(1,999):
new_name = mat_name + '.' + str(i)
if new_name not in gltf[MATERIALS_NAME]:
break
# Replace this primitive with that material
gltf[MATERIALS_NAME][new_name] = material.copy()
prim['material'] = new_name
def get_mesh(gltf, obj, i = 0):
meshes = obj.get('meshes', [])
# Too few meshes
if i >= len(meshes):
raise RuntimeError("Object doesn't have a mesh at index {}".format(i))
mesh = meshes[i]
return gltf[MESHES_NAME][mesh]
def set_diffusemap(gltf, obj, lightmap):
mesh_name = obj['meshes'][0]
mesh = gltf[MESHES_NAME][mesh_name]
for primitive in mesh['primitives']:
mat_name = primitive['material']
mat = gltf[MATERIALS_NAME][mat_name]
# This has the effect of removing most values for the given material.
mat['values'] = {'lightmap': lightmap}
set_technique(gltf, obj, 'forward_diffusemap')
def get_material(gltf, prim):
mat_name = prim.get('material', '')
if mat_name == '': return None
return gltf[MATERIALS_NAME][mat_name]
def remove_material_values(gltf, mesh, rm_names):
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None: continue
values = mat['values']
for name in rm_names:
if name in values:
del values[name]
def adjust_shininess(gltf, mesh, name):
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None: continue
values = mat['values']
if name in values:
shiny = values[name] / 50.0 * 16.0
if shiny > 1.0:
values[name] = shiny
def set_technique(gltf, obj, technique):
mesh = get_mesh(gltf, obj)
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None:
continue
mat['technique'] = technique
def remove_texcoords(gltf, mesh):
for prim in mesh['primitives']:
rm_names = []
for attrib_semantic, attrib_value in prim['attributes'].items():
# String matching!
if "TEXCOORD" in attrib_semantic:
rm_names.append(attrib_semantic)
for name in rm_names:
del prim['attributes'][name]
def update_fucked_texcoords(gltf, mesh):
for prim in mesh['primitives']:
if 'TEXCOORD_UVMap' in prim['attributes']:
old_mapping = prim['attributes'].pop('TEXCOORD_UVMap')
prim['attributes']['TEXCOORD_0'] = old_mapping
def remove_unused_materials(gltf):
mats_used = []
for mesh_name, mesh in gltf[MESHES_NAME].items():
for prim in mesh['primitives']:
mat_name = prim['material']
if mat_name not in mats_used:
mats_used.append(mat_name)
rm_mats = []
for mat in gltf[MATERIALS_NAME]:
if mat not in mats_used:
rm_mats.append(mat)
for mat in rm_mats:
del gltf[MATERIALS_NAME][mat]
def remove_unmaterialed_meshes(gltf):
for mesh_name, mesh in gltf[MESHES_NAME].items():
rm_prims = []
for i, prim in enumerate(mesh['primitives']):
if prim.get('material', '') == '':
rm_prims.append(i)
for prim_i in rm_prims:
del mesh['primitives'][prim_i]
def set_lamps_state(gltf, lamps, state):
for key, lamp in gltf['extras'][LAMPS_NAME].items():
if key in lamps:
lamp['active'] = state
def turn_lamps_off(gltf, lamps):
set_lamps_state(gltf, lamps, False)
def turn_lamps_on(gltf, lamps):
set_lamps_state(gltf, lamps, True)
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write('usage: ' + sys.argv[0] + ' <map.gltf> <out.gltf>\n')
sys.exit()
# Load glTF
with open(sys.argv[1]) as f:
gltf = json.load(f)
set_fake_mesh(gltf, 'Room', 'Collision_Vertices', 'Collision_Indices')
for obj_name, obj in gltf[OBJECTS_NAME].items():
try:
mesh = get_mesh(gltf, obj)
except RuntimeError:
continue
except KeyError:
raise
remove_material_values(gltf, mesh, ['specular', 'emission',
'ambient', 'uv_layers', 'textures'])
adjust_shininess(gltf, mesh, 'shininess')
remove_texcoords(gltf, mesh)
# Set dynamic lighting
set_technique(gltf, obj, 'deferred_dynamic_lights')
remove_unused_materials(gltf)
remove_unmaterialed_meshes(gltf)
turn_lamps_off(gltf, ['Left_Lamp_Spot', 'Night_Light', 'Right_Lamp_Spot'])
turn_lamps_on(gltf, ['Desk_Lamp'])
#remove_unused_data(gltf, ['Collision_Vertices', 'Collision_Indices'])
with open(sys.argv[2], 'w') as f:
json.dump(gltf, f, indent=4)
f.write('\n')
| 31.230392
| 87
| 0.63789
|
e37d9341d1effdb86e3c4eae11aa1e761621d1d4
| 2,684
|
py
|
Python
|
models/phase3_eval/assemble_pysb.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
models/phase3_eval/assemble_pysb.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
models/phase3_eval/assemble_pysb.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import join as pjoin
import os.path
from pysb import Observable
from pysb.export.kappa import KappaExporter
from indra.assemblers import PysbAssembler, IndexCardAssembler
import indra.tools.assemble_corpus as ac
def assemble_pysb(stmts, data_genes, out_file):
"""Return an assembled PySB model."""
stmts = ac.filter_direct(stmts)
stmts = ac.filter_belief(stmts, 0.95)
stmts = ac.filter_top_level(stmts)
stmts = ac.filter_gene_list(stmts, data_genes, 'all')
stmts = ac.reduce_activities(stmts)
pa = PysbAssembler()
pa.add_statements(stmts)
model = pa.make_model()
# Add observables
o = Observable('MAPK1p', model.monomers['MAPK1'](T185='p', Y187='p'))
model.add_component(o)
o = Observable('MAPK3p', model.monomers['MAPK3'](T202='p', Y204='p'))
model.add_component(o)
o = Observable('GSK3Ap', model.monomers['GSK3A'](S21='p'))
model.add_component(o)
o = Observable('GSK3Bp', model.monomers['GSK3B'](S9='p'))
model.add_component(o)
o = Observable('RPS6p', model.monomers['RPS6'](S235='p'))
model.add_component(o)
o = Observable('EIF4EBP1p', model.monomers['EIF4EBP1'](S65='p'))
model.add_component(o)
o = Observable('JUNp', model.monomers['JUN'](S73='p'))
model.add_component(o)
o = Observable('FOXO3p', model.monomers['FOXO3'](S315='p'))
model.add_component(o)
o = Observable('AKT1p', model.monomers['AKT1'](S473='p'))
model.add_component(o)
o = Observable('AKT2p', model.monomers['AKT2'](S474='p'))
model.add_component(o)
o = Observable('AKT3p', model.monomers['AKT3'](S='p'))
model.add_component(o)
o = Observable('ELK1', model.monomers['ELK1'](S383='p'))
model.add_component(o)
# Set context
pa.set_context('SKMEL28_SKIN')
pa.save_model(out_file)
ke = KappaExporter(model)
with open('%s.ka' % base_file, 'wb') as fh:
base_file, _ = os.path.splitext(out_file)
fh.write(ke.export().encode('utf-8'))
return model
def assemble_index_cards(stmts, out_folder):
counter = 1
for st in stmts:
if isinstance(st, Modification) and st.enz is None:
continue
pmids = [ev.pmid for ev in st.evidence if ev.pmid is not None]
if pmids:
pmids = ','.join(['PMID%s' % pm for pm in list(set(pmids))])
else:
pmids = 'N/A'
ica = IndexCardAssembler([st], pmc_override=pmids)
ica.make_model()
if ica.cards:
ica.save_model(pjoin(out_folder, 'index_card_%d.json' % counter))
counter += 1
| 37.277778
| 77
| 0.65611
|
baae2d45300358856c8b66b3cb2c053b91c69c81
| 3,406
|
py
|
Python
|
userlog/realtime.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 53
|
2015-01-04T17:53:40.000Z
|
2021-07-27T06:53:19.000Z
|
userlog/realtime.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 1
|
2015-07-04T11:42:45.000Z
|
2015-07-04T11:42:45.000Z
|
userlog/realtime.py
|
aaugustin/django-userlog
|
6cd34d0a319f6a954fec74420d0d391c32c46060
|
[
"BSD-3-Clause"
] | 8
|
2015-09-05T08:03:32.000Z
|
2020-02-28T08:48:13.000Z
|
import asyncio
import json
import logging
import asyncio_redis
import django
import websockets
from django.conf import settings
from .util import get_userlog_settings
if settings.DEBUG: # pragma: no cover
logger = logging.getLogger('websockets.server')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
@asyncio.coroutine
def redis_connection():
userlog = settings.CACHES['userlog']
options = userlog.get('OPTIONS', {})
if ':' in userlog['LOCATION']:
host, port = userlog['LOCATION'].rsplit(':', 1)
port = int(port)
else:
host = userlog['LOCATION']
port = 0
db = options.get('DB', 1)
password = options.get('PASSWORD', None)
redis = yield from asyncio_redis.Connection.create(
host=host, port=port, password=password, db=db)
return redis
@asyncio.coroutine
def userlog(websocket, uri):
token = yield from websocket.recv()
redis = yield from redis_connection()
token_key = 'token:{}'.format(token)
# Access control
username = yield from redis.get(token_key)
if username is None:
return
log_key = 'log:{}'.format(username)
channel = 'userlog:{}'.format(log_key)
try:
if channel.endswith('*'): # logs for several users
# Stream new lines
subscriber = yield from redis.start_subscribe()
yield from subscriber.psubscribe([channel])
while True:
reply = yield from subscriber.next_published()
data = json.loads(reply.value)
data['username'] = reply.channel.rpartition(':')[2]
line = json.dumps(data)
try:
yield from websocket.send(line)
except websockets.ConnectionClosed:
return
else: # logs for a single user
# Send backlock
log = yield from redis.lrange(log_key, 0, -1)
for item in reversed(list(log)):
line = yield from item
try:
yield from websocket.send(line)
except websockets.ConnectionClosed:
return
# Stream new lines
subscriber = yield from redis.start_subscribe()
yield from subscriber.subscribe([channel])
while True:
reply = yield from subscriber.next_published()
line = reply.value
try:
yield from websocket.send(line)
except websockets.ConnectionClosed:
return
finally:
redis.close()
# Loop one more time to complete the cancellation of redis._reader_f,
# which runs redis._reader_coroutine(), after redis.connection_lost().
yield from asyncio.sleep(0)
if __name__ == '__main__': # pragma: no cover
django.setup()
uri = websockets.parse_uri(get_userlog_settings().websocket_address)
if uri.secure:
raise ValueError("SSL support requires explicit configuration")
start_server = websockets.serve(userlog, uri.host, uri.port)
asyncio.get_event_loop().run_until_complete(start_server)
try:
asyncio.get_event_loop().run_forever()
except KeyboardInterrupt:
pass
| 31.831776
| 78
| 0.589548
|
ffa0f7e550c744edd1fc7145e85db9f1a42cb479
| 17,786
|
py
|
Python
|
pacu/models/awsapi/dataexchange.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | 1
|
2022-03-09T14:51:54.000Z
|
2022-03-09T14:51:54.000Z
|
pacu/models/awsapi/dataexchange.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | null | null | null |
pacu/models/awsapi/dataexchange.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | null | null | null |
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:47:15+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Extra, Field
class ResourceNotFoundException(BaseModel):
__root__: Any
class ThrottlingException(ResourceNotFoundException):
pass
class ValidationException(ResourceNotFoundException):
pass
class InternalServerException(ResourceNotFoundException):
pass
class ConflictException(ResourceNotFoundException):
pass
class _String(BaseModel):
__root__: str
class ServiceLimitExceededException(ResourceNotFoundException):
pass
class AccessDeniedException(ResourceNotFoundException):
pass
class StartJobResponse(BaseModel):
pass
class Arn(BaseModel):
__root__: Annotated[
str,
Field(
description='An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.'
),
]
class Id(BaseModel):
__root__: Annotated[str, Field(description='A unique identifier.')]
class AssetDestinationEntry(BaseModel):
"""
The destination for the asset.
"""
AssetId: Id
Bucket: _String
Key: Optional[_String] = None
class AssetType(Enum):
"""
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
"""
S3_SNAPSHOT = 'S3_SNAPSHOT'
class Timestamp(BaseModel):
__root__: Annotated[
datetime,
Field(
description='Dates and times in AWS Data Exchange are recorded in ISO 8601 format.'
),
]
class AssetName(BaseModel):
__root__: Annotated[
str,
Field(
description='The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.'
),
]
class AssetSourceEntry(BaseModel):
"""
The source of the assets.
"""
Bucket: _String
Key: _String
class CancelJobRequest(BaseModel):
pass
class Code(Enum):
ACCESS_DENIED_EXCEPTION = 'ACCESS_DENIED_EXCEPTION'
INTERNAL_SERVER_EXCEPTION = 'INTERNAL_SERVER_EXCEPTION'
MALWARE_DETECTED = 'MALWARE_DETECTED'
RESOURCE_NOT_FOUND_EXCEPTION = 'RESOURCE_NOT_FOUND_EXCEPTION'
SERVICE_QUOTA_EXCEEDED_EXCEPTION = 'SERVICE_QUOTA_EXCEEDED_EXCEPTION'
VALIDATION_EXCEPTION = 'VALIDATION_EXCEPTION'
MALWARE_SCAN_ENCRYPTED_FILE = 'MALWARE_SCAN_ENCRYPTED_FILE'
class Description(BaseModel):
__root__: Annotated[str, Field(description='A description of a resource.')]
class Name(BaseModel):
__root__: Annotated[str, Field(description='The name of the model.')]
class MapOfString(BaseModel):
pass
class Config:
extra = Extra.allow
class CreateDataSetRequest(BaseModel):
"""
The request body for CreateDataSet.
"""
AssetType: AssetType
Description: Description
Name: Name
Tags: Optional[MapOfString] = None
class Origin(Enum):
"""
A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers). When an owned data set is published in a product, AWS Data Exchange creates a copy of the data set. Subscribers can access that copy of the data set as an entitled data set.
"""
OWNED = 'OWNED'
ENTITLED = 'ENTITLED'
class OriginDetails(BaseModel):
ProductId: _String
class Type(Enum):
IMPORT_ASSETS_FROM_S3 = 'IMPORT_ASSETS_FROM_S3'
IMPORT_ASSET_FROM_SIGNED_URL = 'IMPORT_ASSET_FROM_SIGNED_URL'
EXPORT_ASSETS_TO_S3 = 'EXPORT_ASSETS_TO_S3'
EXPORT_ASSET_TO_SIGNED_URL = 'EXPORT_ASSET_TO_SIGNED_URL'
EXPORT_REVISIONS_TO_S3 = 'EXPORT_REVISIONS_TO_S3'
class State(Enum):
WAITING = 'WAITING'
IN_PROGRESS = 'IN_PROGRESS'
ERROR = 'ERROR'
COMPLETED = 'COMPLETED'
CANCELLED = 'CANCELLED'
TIMED_OUT = 'TIMED_OUT'
class _StringMin0Max16384(BaseModel):
__root__: Annotated[str, Field(max_length=16384, min_length=0)]
class CreateRevisionRequest(BaseModel):
"""
The request body for CreateRevision.
"""
Comment: Optional[_StringMin0Max16384] = None
Tags: Optional[MapOfString] = None
class _Boolean(BaseModel):
__root__: bool
class DataSetEntry(BaseModel):
"""
A data set is an AWS resource with one or more revisions.
"""
Arn: Arn
AssetType: AssetType
CreatedAt: Timestamp
Description: Description
Id: Id
Name: Name
Origin: Origin
OriginDetails: Optional[OriginDetails] = None
SourceId: Optional[Id] = None
UpdatedAt: Timestamp
class DeleteAssetRequest(BaseModel):
pass
class DeleteDataSetRequest(BaseModel):
pass
class DeleteRevisionRequest(BaseModel):
pass
class ImportAssetFromSignedUrlJobErrorDetails(BaseModel):
AssetName: AssetName
class ListOfAssetSourceEntry(BaseModel):
"""
The list of sources for the assets.
"""
__root__: Annotated[
List[AssetSourceEntry], Field(description='The list of sources for the assets.')
]
class Details3(BaseModel):
ImportAssetFromSignedUrlJobErrorDetails: Optional[
ImportAssetFromSignedUrlJobErrorDetails
] = None
ImportAssetsFromS3JobErrorDetails: Optional[ListOfAssetSourceEntry] = None
class ExportAssetToSignedUrlResponseDetails(BaseModel):
"""
The details of the export to signed URL response.
"""
AssetId: Id
DataSetId: Id
RevisionId: Id
SignedUrl: Optional[_String] = None
SignedUrlExpiresAt: Optional[Timestamp] = None
class ListOfAssetDestinationEntry(BaseModel):
"""
The destination where the assets will be exported.
"""
__root__: Annotated[
List[AssetDestinationEntry],
Field(description='The destination where the assets will be exported.'),
]
class ServerSideEncryptionTypes(Enum):
"""
The types of encryption supported in export jobs to Amazon S3.
"""
aws_kms = 'aws:kms'
AES256 = 'AES256'
class GetAssetRequest(BaseModel):
pass
class GetDataSetRequest(BaseModel):
pass
class GetJobRequest(BaseModel):
pass
class GetRevisionRequest(BaseModel):
pass
class _StringMin24Max24PatternAZaZ094AZaZ092AZaZ093(BaseModel):
__root__: Annotated[
str,
Field(
max_length=24,
min_length=24,
regex='^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$',
),
]
class ImportAssetFromSignedUrlResponseDetails(BaseModel):
"""
The details in the response for an import request, including the signed URL and other information.
"""
AssetName: AssetName
DataSetId: Id
Md5Hash: Optional[_StringMin24Max24PatternAZaZ094AZaZ092AZaZ093] = None
RevisionId: Id
SignedUrl: Optional[_String] = None
SignedUrlExpiresAt: Optional[Timestamp] = None
class ImportAssetsFromS3ResponseDetails(BaseModel):
"""
Details from an import from Amazon S3 response.
"""
AssetSources: ListOfAssetSourceEntry
DataSetId: Id
RevisionId: Id
class JobErrorLimitName(Enum):
"""
The name of the limit that was reached.
"""
Assets_per_revision = 'Assets per revision'
Asset_size_in_GB = 'Asset size in GB'
class _Double(BaseModel):
__root__: float
class JobErrorResourceTypes(Enum):
"""
The types of resource which the job error can apply to.
"""
REVISION = 'REVISION'
ASSET = 'ASSET'
class JobError(BaseModel):
"""
An error that occurred with the job request.
"""
Code: Code
Details: Optional[Details3] = None
LimitName: Optional[JobErrorLimitName] = None
LimitValue: Optional[_Double] = None
Message: _String
ResourceId: Optional[_String] = None
ResourceType: Optional[JobErrorResourceTypes] = None
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=25.0)]
class ListDataSetRevisionsRequest(BaseModel):
pass
class NextToken(BaseModel):
__root__: Annotated[
str,
Field(
description='The token value retrieved from a previous call to access the next page of results.'
),
]
class ListDataSetsRequest(BaseModel):
pass
class ListOfDataSetEntry(BaseModel):
__root__: List[DataSetEntry]
class ListJobsRequest(BaseModel):
pass
class RevisionDestinationEntry(BaseModel):
"""
The destination where the assets in the revision will be exported.
"""
Bucket: _String
KeyPattern: Optional[_String] = None
RevisionId: Id
class ListRevisionAssetsRequest(BaseModel):
pass
class ListTagsForResourceRequest(BaseModel):
pass
class RevisionEntry(BaseModel):
"""
A revision is a container for one or more assets.
"""
Arn: Arn
Comment: Optional[_StringMin0Max16384] = None
CreatedAt: Timestamp
DataSetId: Id
Finalized: Optional[_Boolean] = None
Id: Id
SourceId: Optional[Id] = None
UpdatedAt: Timestamp
class _DoubleMin0(_Double):
pass
class StartJobRequest(BaseModel):
pass
class TagResourceRequest(BaseModel):
"""
The request body for TagResource.
"""
Tags: MapOfString
class ListOfString(BaseModel):
__root__: List[_String]
class UntagResourceRequest(BaseModel):
pass
class UpdateAssetRequest(BaseModel):
"""
The request body for UpdateAsset.
"""
Name: AssetName
class UpdateDataSetRequest(BaseModel):
"""
The request body for UpdateDataSet.
"""
Description: Optional[Description] = None
Name: Optional[Name] = None
class UpdateRevisionRequest(BaseModel):
"""
The request body for UpdateRevision.
"""
Comment: Optional[_StringMin0Max16384] = None
Finalized: Optional[_Boolean] = None
class CreateDataSetResponse(BaseModel):
Arn: Optional[Arn] = None
AssetType: Optional[AssetType] = None
CreatedAt: Optional[Timestamp] = None
Description: Optional[Description] = None
Id: Optional[Id] = None
Name: Optional[Name] = None
Origin: Optional[Origin] = None
OriginDetails: Optional[OriginDetails] = None
SourceId: Optional[Id] = None
Tags: Optional[MapOfString] = None
UpdatedAt: Optional[Timestamp] = None
class ExportAssetToSignedUrlRequestDetails(BaseModel):
"""
Details of the operation to be performed by the job.
"""
AssetId: Id
DataSetId: Id
RevisionId: Id
class ImportAssetFromSignedUrlRequestDetails(BaseModel):
"""
Details of the operation to be performed by the job.
"""
AssetName: AssetName
DataSetId: Id
Md5Hash: _StringMin24Max24PatternAZaZ094AZaZ092AZaZ093
RevisionId: Id
class ImportAssetsFromS3RequestDetails(ImportAssetsFromS3ResponseDetails):
"""
Details of the operation to be performed by the job.
"""
pass
class CreateRevisionResponse(BaseModel):
Arn: Optional[Arn] = None
Comment: Optional[_StringMin0Max16384] = None
CreatedAt: Optional[Timestamp] = None
DataSetId: Optional[Id] = None
Finalized: Optional[_Boolean] = None
Id: Optional[Id] = None
SourceId: Optional[Id] = None
Tags: Optional[MapOfString] = None
UpdatedAt: Optional[Timestamp] = None
class GetDataSetResponse(CreateDataSetResponse):
pass
class GetRevisionResponse(CreateRevisionResponse):
pass
class ListDataSetsResponse(BaseModel):
DataSets: Optional[ListOfDataSetEntry] = None
NextToken: Optional[NextToken] = None
class ListTagsForResourceResponse(BaseModel):
Tags: Optional[MapOfString] = None
class UpdateDataSetResponse(BaseModel):
Arn: Optional[Arn] = None
AssetType: Optional[AssetType] = None
CreatedAt: Optional[Timestamp] = None
Description: Optional[Description] = None
Id: Optional[Id] = None
Name: Optional[Name] = None
Origin: Optional[Origin] = None
OriginDetails: Optional[OriginDetails] = None
SourceId: Optional[Id] = None
UpdatedAt: Optional[Timestamp] = None
class UpdateRevisionResponse(BaseModel):
Arn: Optional[Arn] = None
Comment: Optional[_StringMin0Max16384] = None
CreatedAt: Optional[Timestamp] = None
DataSetId: Optional[Id] = None
Finalized: Optional[_Boolean] = None
Id: Optional[Id] = None
SourceId: Optional[Id] = None
UpdatedAt: Optional[Timestamp] = None
class S3SnapshotAsset(BaseModel):
"""
The S3 object that is the asset.
"""
Size: _DoubleMin0
class AssetDetails(BaseModel):
S3SnapshotAsset: Optional[S3SnapshotAsset] = None
class AssetEntry(BaseModel):
"""
An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.
"""
Arn: Arn
AssetDetails: AssetDetails
AssetType: AssetType
CreatedAt: Timestamp
DataSetId: Id
Id: Id
Name: AssetName
RevisionId: Id
SourceId: Optional[Id] = None
UpdatedAt: Timestamp
class ListOfJobError(BaseModel):
__root__: List[JobError]
class ExportServerSideEncryption(BaseModel):
"""
Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.
"""
KmsKeyArn: Optional[_String] = None
Type: ServerSideEncryptionTypes
class ExportAssetsToS3ResponseDetails(BaseModel):
"""
Details about the export to Amazon S3 response.
"""
AssetDestinations: ListOfAssetDestinationEntry
DataSetId: Id
Encryption: Optional[ExportServerSideEncryption] = None
RevisionId: Id
class ListOfRevisionDestinationEntry(BaseModel):
"""
The destination where the assets in the revision will be exported.
"""
__root__: Annotated[
List[RevisionDestinationEntry],
Field(
description='The destination where the assets in the revision will be exported.'
),
]
class ExportRevisionsToS3ResponseDetails(BaseModel):
"""
Details about the export revisions to Amazon S3 response.
"""
DataSetId: Id
Encryption: Optional[ExportServerSideEncryption] = None
RevisionDestinations: ListOfRevisionDestinationEntry
class ListOfRevisionEntry(BaseModel):
__root__: List[RevisionEntry]
class ListOfAssetEntry(BaseModel):
__root__: List[AssetEntry]
class ExportAssetsToS3RequestDetails(ExportAssetsToS3ResponseDetails):
"""
Details of the operation to be performed by the job.
"""
pass
class ExportRevisionsToS3RequestDetails(ExportRevisionsToS3ResponseDetails):
"""
Details of the operation to be performed by the job.
"""
pass
class GetAssetResponse(BaseModel):
Arn: Optional[Arn] = None
AssetDetails: Optional[AssetDetails] = None
AssetType: Optional[AssetType] = None
CreatedAt: Optional[Timestamp] = None
DataSetId: Optional[Id] = None
Id: Optional[Id] = None
Name: Optional[AssetName] = None
RevisionId: Optional[Id] = None
SourceId: Optional[Id] = None
UpdatedAt: Optional[Timestamp] = None
class ListDataSetRevisionsResponse(BaseModel):
NextToken: Optional[NextToken] = None
Revisions: Optional[ListOfRevisionEntry] = None
class ListRevisionAssetsResponse(BaseModel):
Assets: Optional[ListOfAssetEntry] = None
NextToken: Optional[NextToken] = None
class UpdateAssetResponse(GetAssetResponse):
pass
class RequestDetails(BaseModel):
"""
The details for the request.
"""
ExportAssetToSignedUrl: Optional[ExportAssetToSignedUrlRequestDetails] = None
ExportAssetsToS3: Optional[ExportAssetsToS3RequestDetails] = None
ExportRevisionsToS3: Optional[ExportRevisionsToS3RequestDetails] = None
ImportAssetFromSignedUrl: Optional[ImportAssetFromSignedUrlRequestDetails] = None
ImportAssetsFromS3: Optional[ImportAssetsFromS3RequestDetails] = None
class CreateJobRequest(BaseModel):
"""
The request body for CreateJob.
"""
Details: RequestDetails
Type: Type
class ResponseDetails(BaseModel):
"""
Details for the response.
"""
ExportAssetToSignedUrl: Optional[ExportAssetToSignedUrlResponseDetails] = None
ExportAssetsToS3: Optional[ExportAssetsToS3ResponseDetails] = None
ExportRevisionsToS3: Optional[ExportRevisionsToS3ResponseDetails] = None
ImportAssetFromSignedUrl: Optional[ImportAssetFromSignedUrlResponseDetails] = None
ImportAssetsFromS3: Optional[ImportAssetsFromS3ResponseDetails] = None
class JobEntry(BaseModel):
"""
AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created.
"""
Arn: Arn
CreatedAt: Timestamp
Details: ResponseDetails
Errors: Optional[ListOfJobError] = None
Id: Id
State: State
Type: Type
UpdatedAt: Timestamp
class ListOfJobEntry(BaseModel):
__root__: List[JobEntry]
class CreateJobResponse(BaseModel):
Arn: Optional[Arn] = None
CreatedAt: Optional[Timestamp] = None
Details: Optional[ResponseDetails] = None
Errors: Optional[ListOfJobError] = None
Id: Optional[Id] = None
State: Optional[State] = None
Type: Optional[Type] = None
UpdatedAt: Optional[Timestamp] = None
class GetJobResponse(CreateJobResponse):
pass
class ListJobsResponse(BaseModel):
Jobs: Optional[ListOfJobEntry] = None
NextToken: Optional[NextToken] = None
| 23.158854
| 300
| 0.712358
|
dd5cb9ee41901adca8637b938027e520b0407503
| 88,044
|
py
|
Python
|
sklearn/linear_model/_ridge.py
|
merlynjocol/scikit-learn
|
cd5385e7112c453afff205fa0ce6a67356cbcf32
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T12:18:00.000Z
|
2022-03-12T12:18:00.000Z
|
sklearn/linear_model/_ridge.py
|
merlynjocol/scikit-learn
|
cd5385e7112c453afff205fa0ce6a67356cbcf32
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/linear_model/_ridge.py
|
merlynjocol/scikit-learn
|
cd5385e7112c453afff205fa0ce6a67356cbcf32
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from functools import partial
import warnings
import numpy as np
import numbers
from scipy import linalg
from scipy import sparse
from scipy import optimize
from scipy.sparse import linalg as sp_linalg
from ._base import LinearClassifierMixin, LinearModel
from ._base import _deprecate_normalize, _preprocess_data, _rescale_data
from ._sag import sag_solver
from ..base import MultiOutputMixin, RegressorMixin, is_classifier
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import check_scalar
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..utils.validation import check_is_fitted
from ..utils.validation import _check_sample_weight
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics import check_scoring
from ..exceptions import ConvergenceWarning
from ..utils.sparsefuncs import mean_variance_axis
def _solve_sparse_cg(
X, y, alpha, max_iter=None, tol=1e-3, verbose=0, X_offset=None, X_scale=None
):
def _get_rescaled_operator(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
return X1
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X1 = _get_rescaled_operator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol="legacy")
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(
C, y_column, maxiter=max_iter, tol=tol, atol="legacy"
)
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn(
"sparse_cg did not converge after %d iterations." % info,
ConvergenceWarning,
)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(
X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[:: n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[:: n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel()
A.flat[:: n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[:: n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn(
"Singular matrix in solving dual problem. Using "
"least-squares solution instead."
)
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[:: n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[:: n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(
K, target, sym_pos=True, overwrite_a=False
).ravel()
K.flat[:: n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz**2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _solve_lbfgs(
X, y, alpha, positive=True, max_iter=None, tol=1e-3, X_offset=None, X_scale=None
):
"""Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
"""
n_samples, n_features = X.shape
options = {}
if max_iter is not None:
options["maxiter"] = max_iter
config = {
"method": "L-BFGS-B",
"tol": tol,
"jac": True,
"options": options,
}
if positive:
config["bounds"] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * np.sum(residual)
return f, grad
result = optimize.minimize(func, x0, **config)
if not result["success"]:
warnings.warn(
"The lbfgs solver did not converge. Try increasing max_iter "
f"or tol. Currently: max_iter={max_iter} and tol={tol}",
ConvergenceWarning,
)
coefs[i] = result["x"]
return coefs
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ["auto", "sag", "saga"]:
return "csr"
else:
return ["csr", "csc", "coo"]
def ridge_regression(
X,
y,
alpha,
*,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
check_input=True,
):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {ndarray, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
alpha : float or array-like of shape (n_targets,)
Constant that multiplies the L2 term, controlling regularization
strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
When `alpha = 0`, the objective is equivalent to ordinary least
squares, solved by the :class:`LinearRegression` object. For numerical
reasons, using `alpha = 0` with the `Ridge` object is not advised.
Instead, you should use the :class:`LinearRegression` object.
If an array is passed, penalties are assumed to be specific to the
targets. Hence they must correspond in number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
Regularization improves the conditioning of the problem and
reduces the variance of the estimates. Larger values specify stronger
regularization. Alpha corresponds to ``1 / (2C)`` in other linear
models such as :class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
"""
return _ridge_regression(
X,
y,
alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
positive=positive,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input,
)
def _ridge_regression(
X,
y,
alpha,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
X_scale=None,
X_offset=None,
check_input=True,
):
has_sw = sample_weight is not None
if solver == "auto":
if positive:
solver = "lbfgs"
elif return_intercept:
# sag supports fitting intercept directly
solver = "sag"
elif not sparse.issparse(X):
solver = "cholesky"
else:
solver = "sparse_cg"
if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"):
raise ValueError(
"Known solvers are 'sparse_cg', 'cholesky', 'svd'"
" 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver
)
if positive and solver != "lbfgs":
raise ValueError(
"When positive=True, only 'lbfgs' solver can be used. "
f"Please change solver {solver} to 'lbfgs' "
"or set positive=False."
)
if solver == "lbfgs" and not positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if return_intercept and solver != "sag":
raise ValueError(
"In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False."
)
if check_input:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError(
"Number of samples in X and y does not correspond: %d != %d"
% (n_samples, n_samples_)
)
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ["sag", "saga"]:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# Some callers of this method might pass alpha as single
# element array which already has been validated.
if alpha is not None and not isinstance(alpha, (np.ndarray, tuple)):
alpha = check_scalar(
alpha,
"alpha",
target_type=numbers.Real,
min_val=0.0,
include_boundaries="left",
)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError(
"Number of targets and number of penalties do not correspond: %d != %d"
% (alpha.size, n_targets)
)
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == "sparse_cg":
coef = _solve_sparse_cg(
X,
y,
alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale,
)
elif solver == "lsqr":
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == "cholesky":
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
elif solver in ["sag", "saga"]:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {
"coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)
}
coef_, n_iter_, _ = sag_solver(
X,
target.ravel(),
sample_weight,
"squared",
alpha_i,
0,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
init,
is_saga=solver == "saga",
)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == "lbfgs":
coef = _solve_lbfgs(
X,
y,
alpha,
positive=positive,
tol=tol,
max_iter=max_iter,
X_offset=X_offset,
X_scale=X_scale,
)
if solver == "svd":
if sparse.issparse(X):
raise TypeError("SVD solver does not support sparse inputs currently")
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.positive = positive
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
self._normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
if self.solver == "lbfgs" and not self.positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if self.positive:
if self.solver not in ["auto", "lbfgs"]:
raise ValueError(
f"solver='{self.solver}' does not support positive fitting. Please"
" set the solver to 'auto' or 'lbfgs', or set `positive=False`"
)
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ["auto", "sparse_cg", "sag", "lbfgs"]:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'sparse_cg', 'sag', 'lbfgs' "
"or set `fit_intercept=False`".format(self.solver)
)
if self.solver == "lbfgs":
solver = "lbfgs"
elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
warnings.warn(
'"sag" solver requires many iterations to fit '
"an intercept with sparse inputs. Either set the "
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
"not standardized)."
)
solver = "sag"
else:
solver = "sparse_cg"
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.max_iter is not None:
self.max_iter = check_scalar(
self.max_iter, "max_iter", target_type=numbers.Integral, min_val=1
)
self.tol = check_scalar(self.tol, "tol", target_type=numbers.Real, min_val=0.0)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X,
y,
self.fit_intercept,
self._normalize,
self.copy_X,
sample_weight=sample_weight,
return_mean=True,
)
if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver="sag",
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=True,
check_input=False,
)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg solver
params = {"X_offset": X_offset, "X_scale": X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver,
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=False,
check_input=False,
**params,
)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape (n_samples, n_targets)).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, ndarray of shape (n_targets,)}, default=1.0
Constant that multiplies the L2 term, controlling regularization
strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
When `alpha = 0`, the objective is equivalent to ordinary least
squares, solved by the :class:`LinearRegression` object. For numerical
reasons, using `alpha = 0` with the `Ridge` object is not advised.
Instead, you should use the :class:`LinearRegression` object.
If an array is passed, penalties are assumed to be specific to the
targets. Hence they must correspond in number.
fit_intercept : bool, default=True
Whether to fit the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. ``X`` and ``y`` are expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.17
`random_state` to support Stochastic Average Gradient.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
:class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick.
Notes
-----
Regularization improves the conditioning of the problem and
reduces the variance of the estimates. Larger values specify stronger
regularization. Alpha corresponds to ``1 / (2C)`` in other linear
models such as :class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge()
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
X, y = self._validate_data(
X,
y,
accept_sparse=_accept_sparse,
dtype=[np.float64, np.float32],
multi_output=True,
y_numeric=True,
)
return super().fit(X, y, sample_weight=sample_weight)
class _RidgeClassifierMixin(LinearClassifierMixin):
def _prepare_data(self, X, y, sample_weight, solver):
"""Validate `X` and `y` and binarize `y`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
solver : str
The solver used in `Ridge` to know which sparse format to support.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Validated training data.
y : ndarray of shape (n_samples,)
Validated target values.
sample_weight : ndarray of shape (n_samples,)
Validated sample weights.
Y : ndarray of shape (n_samples, n_classes)
The binarized version of `y`.
"""
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X, y = self._validate_data(
X,
y,
accept_sparse=accept_sparse,
multi_output=True,
y_numeric=False,
)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
return X, y, sample_weight, Y
def predict(self, X):
"""Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
"""
check_is_fitted(self, attributes=["_label_binarizer"])
if self._label_binarizer.y_type_.startswith("multilabel"):
# Threshold such that the negative label is -1 and positive label
# is 1 to use the inverse transform of the label binarizer fitted
# during fit.
scores = 2 * (self.decision_function(X) > 0) - 1
return self._label_binarizer.inverse_transform(scores)
return super().predict(X)
@property
def classes_(self):
"""Classes labels."""
return self._label_binarizer.classes_
def _more_tags(self):
return {"multilabel": True}
class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-3
Precision of the solution.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
class_weight=None,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to RidgeClassifier.
Returns
-------
self : object
Instance of the estimator.
"""
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)
super().fit(X, Y, sample_weight=sample_weight)
return self
def _check_gcv_mode(X, gcv_mode):
possible_gcv_modes = [None, "auto", "svd", "eigen"]
if gcv_mode not in possible_gcv_modes:
raise ValueError(
"Unknown value for 'gcv_mode'. Got {} instead of one of {}".format(
gcv_mode, possible_gcv_modes
)
)
if gcv_mode in ["eigen", "svd"]:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return "svd"
return "eigen"
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
class _X_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as centered and scaled X with an added intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_samples, n_features + 1))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw
)
def _matmat(self, v):
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw[:, None]
)
def _transpose(self):
return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
class _XT_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as transposed centered and scaled X with an intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_features + 1, n_samples))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
n_features = self.shape[0]
res = np.empty(n_features, dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - (
self.X_mean * self.sqrt_sw.dot(v)
)
res[-1] = np.dot(v, self.sqrt_sw)
return res
def _matmat(self, v):
n_features = self.shape[0]
res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[
:, None
] * self.sqrt_sw.dot(v)
res[-1] = np.dot(self.sqrt_sw, v)
return res
class _IdentityRegressor:
"""Fake regressor which will directly output the prediction."""
def decision_function(self, y_predict):
return y_predict
def predict(self, y_predict):
return y_predict
class _IdentityClassifier(LinearClassifierMixin):
"""Fake classifier which will directly output the prediction.
We inherit from LinearClassifierMixin to get the proper shape for the
output `y`.
"""
def __init__(self, classes):
self.classes_ = classes
def decision_function(self, y_predict):
return y_predict
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Leave-one-out Cross-Validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id).
Dual solution: c = G^-1y
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G^-1)
The best score (negative mean squared error or user-provided scoring) is
stored in the `best_score_` attribute, and the selected hyperparameter in
`alpha_`.
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
copy_X=True,
gcv_mode=None,
store_cv_values=False,
is_clf=False,
alpha_per_target=False,
):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.is_clf = is_clf
self.alpha_per_target = alpha_per_target
@staticmethod
def _decomp_diag(v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q**2).sum(axis=-1)
@staticmethod
def _diag_dot(D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)]
return D * B
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (
safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
X_mean,
)
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (
safe_sparse_dot(X.T, X, dense_output=True)
- weight_sum * np.outer(X_mean, X_mean),
X_mean,
)
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitly centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].A
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
def _eigen_decompose_gram(self, X, y, sqrt_sw):
"""Eigendecomposition of X.X^T, used when n_samples <= n_features."""
# if X is dense it has already been centered in preprocessing
K, X_mean = self._compute_gram(X, sqrt_sw)
if self.fit_intercept:
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
K += np.outer(sqrt_sw, sqrt_sw)
eigvals, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return X_mean, eigvals, Q, QT_y
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1.0 / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
# X already centered
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
if self.fit_intercept:
# to emulate fit_intercept=True situation, add a column
# containing the square roots of the sample weights
# by centering, the other columns are orthogonal to that one
intercept_column = sqrt_sw[:, None]
X = np.hstack((X, intercept_column))
U, singvals, _ = linalg.svd(X, full_matrices=0)
singvals_sq = singvals**2
UT_y = np.dot(U.T, y)
return X_mean, singvals_sq, U, UT_y
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha**-1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = -(alpha**-1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
_normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64],
multi_output=True,
y_numeric=True,
)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.alphas = np.asarray(self.alphas)
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X,
y,
self.fit_intercept,
_normalize,
self.copy_X,
sample_weight=sample_weight,
)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == "eigen":
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == "svd":
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
sqrt_sw = np.sqrt(sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_values:
self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition)
if error:
squared_errors = (c / G_inverse_diag) ** 2
if self.alpha_per_target:
alpha_score = -squared_errors.mean(axis=0)
else:
alpha_score = -squared_errors.mean()
if self.store_cv_values:
self.cv_values_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
if self.store_cv_values:
self.cv_values_[:, i] = predictions.ravel()
if self.is_clf:
identity_estimator = _IdentityClassifier(classes=np.arange(n_y))
alpha_score = scorer(
identity_estimator, predictions, y.argmax(axis=1)
)
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
alpha_score = np.array(
[
scorer(identity_estimator, predictions[:, j], y[:, j])
for j in range(n_y)
]
)
else:
alpha_score = scorer(
identity_estimator, predictions.ravel(), y.ravel()
)
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, n_alphas
else:
cv_values_shape = n_samples, n_y, n_alphas
self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=False,
alpha_per_target=False,
):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
cv = self.cv
check_scalar_alpha = partial(
check_scalar,
target_type=numbers.Real,
min_val=0.0,
include_boundaries="neither",
)
if isinstance(self.alphas, (np.ndarray, list, tuple)):
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if n_alphas != 1:
for index, alpha in enumerate(self.alphas):
alpha = check_scalar_alpha(alpha, f"alphas[{index}]")
else:
self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas")
else:
# check for single non-iterable item
self.alphas = check_scalar_alpha(self.alphas, "alphas")
alphas = np.asarray(self.alphas)
if cv is None:
estimator = _RidgeGCV(
alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target,
)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True are incompatible")
parameters = {"alpha": alphas}
solver = "sparse_cg" if sparse.issparse(X) else "auto"
model = RidgeClassifier if is_classifier(self) else Ridge
gs = GridSearchCV(
model(
fit_intercept=self.fit_intercept,
normalize=self.normalize,
solver=solver,
),
parameters,
cv=cv,
scoring=self.scoring,
)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.best_score_ = gs.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
if hasattr(estimator, "feature_names_in_"):
self.feature_names_in_ = estimator.feature_names_in_
return self
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs efficient Leave-One-Out Cross-Validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If None, the negative mean squared error if cv is 'auto' or None
(i.e. when using leave-one-out cross-validation), and r2 score
otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {'auto', 'svd', 'eigen'}, default='auto'
Flag indicating which strategy to use when performing
Leave-One-Out Cross-Validation. Options are::
'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
'svd' : force use of singular value decomposition of X when X is
dense, eigenvalue decomposition of X^T.X when X is sparse.
'eigen' : force computation via eigendecomposition of X.X^T
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending on the shape of the training data.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
alpha_per_target : bool, default=False
Flag indicating whether to optimize the alpha value (picked from the
`alphas` parameter list) for each target separately (for multi-output
settings: multiple prediction targets). When set to `True`, after
fitting, the `alpha_` attribute will contain a value for each target.
When set to `False`, a single alpha is used for all targets.
.. versionadded:: 0.24
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_alphas) or \
shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only available if
``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
called, this attribute will contain the mean squared errors if
`scoring is None` otherwise it will contain standardized per point
prediction values.
coef_ : ndarray of shape (n_features) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float or ndarray of shape (n_targets,)
Estimated regularization parameter, or, if ``alpha_per_target=True``,
the estimated regularization parameter for each target.
best_score_ : float or ndarray of shape (n_targets,)
Score of base estimator with best alpha, or, if
``alpha_per_target=True``, a score for each target.
.. versionadded:: 0.23
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.5166...
"""
class RidgeClassifierCV(_RidgeClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors if `scoring is None` otherwise it
will contain standardized per point prediction values.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
class_weight=None,
store_cv_values=False,
):
super().__init__(
alphas=alphas,
fit_intercept=fit_intercept,
normalize=normalize,
scoring=scoring,
cv=cv,
store_cv_values=store_cv_values,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
# `RidgeClassifier` does not accept "sag" or "saga" solver and thus support
# csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept
# all sparse format.
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen")
# If cv is None, gcv mode will be used and we used the binarized Y
# since y will not be binarized in _RidgeGCV estimator.
# If cv is not None, a GridSearchCV with some RidgeClassifier
# estimators are used where y will be binarized. Thus, we pass y
# instead of the binarized Y.
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight)
return self
def _more_tags(self):
return {
"multilabel": True,
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
},
}
| 35.703163
| 88
| 0.603437
|
0224b3836e2bb4078860ca00d5beee783a70e15a
| 988
|
py
|
Python
|
src/sentry/utils/sms.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 4
|
2019-05-27T13:55:07.000Z
|
2021-03-30T07:05:09.000Z
|
src/sentry/utils/sms.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 196
|
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
src/sentry/utils/sms.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T09:53:39.000Z
|
2022-03-22T09:01:47.000Z
|
from __future__ import absolute_import
import logging
import requests
from six.moves.urllib.parse import quote
from sentry import options
logger = logging.getLogger(__name__)
def sms_available():
return bool(options.get('sms.twilio-account'))
def send_sms(body, to, from_=None):
account = options.get('sms.twilio-account')
if not account:
raise RuntimeError('SMS backend is not configured.')
if account[:2] != 'AC':
account = 'AC' + account
url = 'https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json' % \
quote(account)
rv = requests.post(
url,
auth=(account, options.get('sms.twilio-token')),
data={
'To': to,
'From': options.get('sms.twilio-number'),
'Body': body,
}
)
if not rv.ok:
logging.exception(
'Failed to send text message to %s: (%s) %s', to, rv.status_code, rv.content
)
return False
return True
| 24.7
| 88
| 0.605263
|
a66089eca815b0d0d866e0f80b7dcdf26bdb03b6
| 2,739
|
py
|
Python
|
jarviscli/plugins/caesar_cipher.py
|
WWFelina/Jarvis
|
69c4dba3e4b86478221b3d401a1f9423434309eb
|
[
"MIT"
] | 2,605
|
2017-03-10T22:44:36.000Z
|
2022-03-31T15:33:17.000Z
|
jarviscli/plugins/caesar_cipher.py
|
ritikranjan12/Jarvis
|
6a1c5ef2c08724b6ff5929bc434209a24226c09f
|
[
"MIT"
] | 729
|
2017-03-11T00:06:46.000Z
|
2022-03-31T22:04:44.000Z
|
jarviscli/plugins/caesar_cipher.py
|
ritikranjan12/Jarvis
|
6a1c5ef2c08724b6ff5929bc434209a24226c09f
|
[
"MIT"
] | 1,181
|
2017-03-10T23:24:55.000Z
|
2022-03-31T03:59:46.000Z
|
from colorama import Fore
from plugin import plugin
@plugin("caesar cipher")
def caesar_cipher_converter(jarvis, s):
option = get_option(jarvis)
if option == 1:
plain_to_cipher(jarvis)
elif option == 2:
cipher_to_plain(jarvis)
else:
return
def get_option(jarvis):
jarvis.say("~> What can I do for you?", Fore.RED)
print("1: Convert plain text to cipher")
print("2: Convert cipher to plain text")
print("3: Exit")
print()
while True:
try:
option = int(jarvis.input("Enter your choice: ", Fore.GREEN))
if option == 3:
return
elif option == 1 or option == 2:
return option
else:
jarvis.say(
"Invalid input! Enter a number from the choices provided.", Fore.YELLOW)
except ValueError:
jarvis.say(
"Invalid input! Enter a number from the choices provided.", Fore.YELLOW)
print()
def plain_to_cipher(jarvis):
user_input = get_user_input(jarvis)
converted = ""
for i in user_input:
if is_ascii(i):
if i.isalpha():
if i.isupper():
converted += chr((ord(i) - 68) % 26 + 65)
else:
converted += chr((ord(i) - 100) % 26 + 97)
else:
converted += i
else:
x = ord(i)
if 192 <= x <= 255:
converted += chr((ord(i) - 195) % 63 + 192)
else:
converted += i
jarvis.say(converted, Fore.YELLOW)
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def cipher_to_plain(jarvis):
user_input = get_user_input(jarvis)
converted = ""
for i in user_input:
if is_ascii(i):
if i.isalpha():
if i.isupper():
converted += chr((ord(i) - 62) % 26 + 65)
else:
converted += chr((ord(i) - 94) % 26 + 97)
else:
converted += i
else:
x = ord(i)
if 192 <= x <= 255:
converted += chr((ord(i) - 189) % 63 + 192)
else:
converted += i
jarvis.say(converted, Fore.YELLOW)
def get_user_input(jarvis):
while True:
try:
user_input = jarvis.input("Enter string to convert: ")
if len(user_input) > 0:
return user_input
else:
jarvis.say(
"String length should be minimum 1.", Fore.YELLOW)
except ValueError:
jarvis.say("Sorry, I didn't understand that.", Fore.RED)
continue
return
| 26.336538
| 92
| 0.493246
|
ad782f5763b64564cea8ece6bdd4f426a501c917
| 2,686
|
py
|
Python
|
WEB(BE)/community/serializers.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | 1
|
2022-03-09T17:04:26.000Z
|
2022-03-09T17:04:26.000Z
|
WEB(BE)/community/serializers.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | null | null | null |
WEB(BE)/community/serializers.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | null | null | null |
from .models import Board, Post, Comment
from rest_framework import serializers
from usersetting.models import UserSetting
class BoardListSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Board
fields = '__all__'
class PostShowSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
image = serializers.ImageField(use_url=True)
nickname = serializers.SerializerMethodField()
prev_id = serializers.SerializerMethodField()
next_id = serializers.SerializerMethodField()
def get_nickname(self, obj):
return obj.email.setting.get(email=obj.email.email).nickname
def get_prev_id(self, obj):
tmp = Post.objects.filter(board_id=obj.board_id, id__lt=obj.id)
if len(tmp) > 0:
return tmp[len(tmp)-1].id
else:
return
def get_next_id(self, obj):
tmp = Post.objects.filter(board_id=obj.board_id, id__gt=obj.id)
if len(tmp) > 0:
return tmp[0].id
else:
return
class Meta:
model = Post
fields = ['id', 'prev_id', 'next_id', 'nickname', 'title', 'content', 'image', 'is_notice', 'write_date']
class PostListSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
image = serializers.ImageField(use_url=True)
nickname = serializers.SerializerMethodField()
def get_nickname(self, obj):
return obj.email.setting.get(email=obj.email.email).nickname
class Meta:
model = Post
fields = ['id', 'nickname', 'title', 'image', 'is_notice', 'write_date']
class PostWriteSerializer(serializers.ModelSerializer):
image = serializers.ImageField(use_url=True, required=False, allow_null=True)
class Meta:
model = Post
fields = ['board_id', 'title', 'content', 'image']
class PostRewriteSerializer(serializers.ModelSerializer):
image = serializers.ImageField(use_url=True, required=False, allow_null=True)
class Meta:
model = Post
fields = ['title', 'content', 'image']
class CommentListSerializer(serializers.ModelSerializer):
nickname = serializers.SerializerMethodField()
def get_nickname(self, obj):
return obj.email.setting.get(email=obj.email.email).nickname
class Meta:
model = Comment
fields = ['id', 'nickname', 'content', 'write_date']
class CommentWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['post_id', 'content']
class CommentRewriteSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['content']
| 33.575
| 113
| 0.676471
|
d62c8e98f895e10acae9db708d575ecf35f329c8
| 2,800
|
py
|
Python
|
train.py
|
Emad2018/aipnd-project
|
2e04b8370b3e118f7e2e283df03eca2447fec40a
|
[
"MIT"
] | null | null | null |
train.py
|
Emad2018/aipnd-project
|
2e04b8370b3e118f7e2e283df03eca2447fec40a
|
[
"MIT"
] | null | null | null |
train.py
|
Emad2018/aipnd-project
|
2e04b8370b3e118f7e2e283df03eca2447fec40a
|
[
"MIT"
] | null | null | null |
from torchvision import transforms, datasets, models
import train_helper as thelp
import torch
import torch.nn as nn
import torch.optim as optim
in_args = thelp.get_input_args()
data_dir = in_args.dir
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
data_transforms = {"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
"test": transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
"valid": transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])}
image_datasets = {"train": datasets.ImageFolder(train_dir, transform=data_transforms["train"]),
"test": datasets.ImageFolder(test_dir, transform=data_transforms["test"]),
"valid": datasets.ImageFolder(valid_dir, transform=data_transforms["valid"]),
}
dataloaders = {"train": torch.utils.data.DataLoader(image_datasets["train"], batch_size=64, shuffle=True),
"test": torch.utils.data.DataLoader(image_datasets["test"], batch_size=64),
"valid": torch.utils.data.DataLoader(image_datasets["valid"], batch_size=64)}
if in_args.gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if (device == "cpu"):
print("not found gpu,.... running on cpu")
else:
device = "cpu"
if(in_args.arch == "densenet"):
model = thelp.densenet121_FlowerModel(in_args.nhu)
else:
model = thelp.vgg11_FlowerModel(in_args.nhu)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=in_args.lr)
print(in_args)
thelp.train_model(model, dataloaders, criterion,
optimizer, image_datasets, device=device, epochs=in_args.epochs, nhu=in_args.nhu)
| 51.851852
| 106
| 0.525714
|
f4481ff55e269b8e76f54ffba095fddbe94db89f
| 1,396
|
py
|
Python
|
generate_img_from_seq.py
|
TeamSundar/CRISP-RCNN
|
271de32d676dcf3524ff2d752bb67e6e801ff179
|
[
"MIT"
] | null | null | null |
generate_img_from_seq.py
|
TeamSundar/CRISP-RCNN
|
271de32d676dcf3524ff2d752bb67e6e801ff179
|
[
"MIT"
] | null | null | null |
generate_img_from_seq.py
|
TeamSundar/CRISP-RCNN
|
271de32d676dcf3524ff2d752bb67e6e801ff179
|
[
"MIT"
] | 1
|
2021-06-25T09:18:11.000Z
|
2021-06-25T09:18:11.000Z
|
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from tqdm import tqdm
def custom_onehot(arr):
img = np.zeros((len(arr),4))
for index, value in enumerate(arr):
if value=='-':
pass
elif value=='A':
img[index][0] = 1
elif value=='C':
img[index][1] = 1
elif value=='G':
img[index][2] = 1
elif value=='T':
img[index][3] = 1
else:
img[index][0:4] = 0.25
return img.T
#return img
def save_img(img,loc,i):
fig, ax = plt.subplots(figsize=(7,1))
ax.imshow(img, interpolation='none', cmap='Blues', aspect='auto')
plt.axis('off')
#plt.savefig('data/'+str(loc)+'_'+str(i+1)+'.png',bbox_inches = 'tight',pad_inches=0)
plt.savefig('moredata/'+str(loc)+'/'+str(loc)+'_'+str(i)+'.png',bbox_inches = 'tight',pad_inches=0)
plt.close(fig)
def make_images(df):
#start =
n,m = df.shape
off_list = df['N-padded off-target'].values
tar_list = df['N-padded target'].values
for i in tqdm(range(n)):
off = custom_onehot(np.array(list(off_list[i])))
save_img(off,'off',i)
tar = custom_onehot(np.array(list(tar_list[i])))
save_img(tar,'target',i)
#print (off,tar)
return None
data = pd.read_excel('dataset-SY.xlsx')
#small = data.head()
make_images(data)
| 25.381818
| 103
| 0.567335
|
8df574c20d3bee515ee158031f396cee84b10311
| 9,495
|
py
|
Python
|
imblearn/over_sampling/_random_over_sampler.py
|
cdchushig/imbalanced-learn
|
f02e7c7c2c021c85823cace405ca2c58ad4ff147
|
[
"MIT"
] | 1
|
2022-02-17T16:31:16.000Z
|
2022-02-17T16:31:16.000Z
|
imblearn/over_sampling/_random_over_sampler.py
|
cdchushig/imbalanced-learn
|
f02e7c7c2c021c85823cace405ca2c58ad4ff147
|
[
"MIT"
] | null | null | null |
imblearn/over_sampling/_random_over_sampler.py
|
cdchushig/imbalanced-learn
|
f02e7c7c2c021c85823cace405ca2c58ad4ff147
|
[
"MIT"
] | null | null | null |
"""Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis
from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
else:
raise ValueError(
f"`shrinkage` should either be a positive floating number or "
f"a dictionary mapping a class to a positive floating number. "
f"Got {repr(self.shrinkage)} instead."
)
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
}
| 36.37931
| 86
| 0.619484
|
bed6a4288f115ad24a427f13b486bf6820940a8d
| 8,959
|
py
|
Python
|
nrm/messaging.py
|
anlsys/nrm-python
|
c2655ba7e2b939d634094d10231891a92dcdc51d
|
[
"BSD-3-Clause"
] | 1
|
2021-09-25T20:11:25.000Z
|
2021-09-25T20:11:25.000Z
|
nrm/messaging.py
|
anlsys/nrm-python
|
c2655ba7e2b939d634094d10231891a92dcdc51d
|
[
"BSD-3-Clause"
] | 5
|
2021-10-01T19:32:16.000Z
|
2022-02-25T20:21:39.000Z
|
nrm/messaging.py
|
anlsys/nrm-python
|
c2655ba7e2b939d634094d10231891a92dcdc51d
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
# Copyright 2019 UChicago Argonne, LLC.
# (c.f. AUTHORS, LICENSE)
#
# This file is part of the NRM project.
# For more info, see https://github.com/anlsys/nrm-python
#
# SPDX-License-Identifier: BSD-3-Clause
###############################################################################
import json
import os
import logging
import yaml
import uuid
from jsonschema import Draft4Validator
import zmq
import warlock
import zmq.utils
import zmq.utils.monitor
from zmq.eventloop import zmqstream
_logger = logging.getLogger("nrm")
_jsonexts = ["json"]
_yamlexts = ["yml", "yaml"]
def loadschema(ext, api):
sourcedir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(sourcedir, "schemas", api + "." + ext)) as f:
if ext in _jsonexts:
s = json.load(f)
elif ext in _yamlexts:
s = yaml.load(f)
else:
raise ("Schema extension not in %s" % str(_jsonexts + _yamlexts))
Draft4Validator.check_schema(s)
return warlock.model_factory(s)
def send(apiname=None, cons=dict):
if apiname:
model = loadschema("json", apiname)
else:
model = None
def wrap(cls):
if model:
def send(self, *args, **kwargs):
self.socket.send(json.dumps(model(cons(*args, **kwargs))).encode())
else:
def send(self, msg):
try:
m = msg.encode()
except Exception:
m = msg
self.socket.send(m)
setattr(cls, "send", send)
def send_multi(self, msgs):
for msg in msgs:
send(self, msg)
setattr(cls, "send_multi", send_multi)
return cls
return wrap
def recv_callback(apiname=None):
if apiname:
return recv_callback_api(apiname)
else:
return recv_callback_noapi()
def recv_callback_noapi():
def wrap(cls):
def recv(self):
wire = self.socket.recv()
_logger.info("received message: %r", wire)
return wire
def do_recv_callback(self, frames):
_logger.info("receiving message: %r", frames)
assert len(frames) == 2
msg = frames[1]
try:
identity = frames[0].decode()
except UnicodeDecodeError:
identity = "unassigned"
assert self.callback
self.callback(msg, identity)
def setup_recv_callback(self, callback):
self.stream = zmqstream.ZMQStream(self.socket)
self.callback = callback
self.stream.on_recv(self.do_recv_callback)
setattr(cls, "recv", recv)
setattr(cls, "do_recv_callback", do_recv_callback)
setattr(cls, "setup_recv_callback", setup_recv_callback)
return cls
return wrap
def recv_callback_api(apiname):
def wrap(cls):
model = loadschema("json", apiname)
def recv(self):
wire = self.socket.recv()
_logger.debug("received message: %r", wire)
return model(json.loads(wire))
def do_recv_callback(self, frames):
_logger.info("receiving message: %r", frames)
assert len(frames) == 2
msg = model(json.loads(frames[1]))
assert self.callback
self.callback(msg, str(frames[0]))
def setup_recv_callback(self, callback):
self.stream = zmqstream.ZMQStream(self.socket)
self.callback = callback
self.stream.on_recv(self.do_recv_callback)
setattr(cls, "recv", recv)
setattr(cls, "do_recv_callback", do_recv_callback)
setattr(cls, "setup_recv_callback", setup_recv_callback)
return cls
return wrap
class RPCClient(object):
"""Implements the message layer client to the upstream RPC API."""
def __init__(self, address):
self.address = address
self.uuid = str(uuid.uuid4())
self.zmq_context = zmq.Context.instance()
self.socket = self.zmq_context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.uuid.encode())
self.socket.setsockopt(zmq.SNDHWM, 0)
self.socket.setsockopt(zmq.RCVHWM, 0)
def connect(self, wait=True):
"""Connect, and wait for the socket to be connected."""
monitor = self.socket.get_monitor_socket()
self.socket.connect(self.address)
while wait:
msg = zmq.utils.monitor.recv_monitor_message(monitor)
_logger.debug("monitor message: %r", msg)
if int(msg["event"]) == zmq.EVENT_CONNECTED:
_logger.debug("socket connected")
break
self.socket.disable_monitor()
class RPCServer(object):
"""Implements the message layer server to the upstream RPC API."""
def __init__(self, address):
self.address = address
self.zmq_context = zmq.Context.instance()
self.socket = self.zmq_context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.SNDHWM, 0)
self.socket.setsockopt(zmq.RCVHWM, 0)
self.socket.bind(address)
@recv_callback()
class UpstreamRPCServer(RPCServer):
"""Implements the message layer server to the upstream RPC API."""
def send(self, client_uuid, msg):
"""Sends a message to the identified client."""
_logger.info("sending message: %r to client: %r", msg, client_uuid)
self.socket.send_multipart([client_uuid.encode(), msg.encode()])
@send()
class UpstreamPubServer(object):
"""Implements the message layer server for the upstream PUB/SUB API."""
def __init__(self, address):
self.address = address
self.zmq_context = zmq.Context.instance()
self.socket = self.zmq_context.socket(zmq.PUB)
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.setsockopt(zmq.SNDHWM, 0)
self.socket.bind(address)
class UpstreamPubClient(object):
"""Implements the message layer client to the upstream Pub API."""
def __init__(self, address):
self.address = address
self.zmq_context = zmq.Context.instance()
self.socket = self.zmq_context.socket(zmq.SUB)
self.socket.setsockopt(zmq.RCVHWM, 0)
self.socket.setsockopt_string(zmq.SUBSCRIBE, "")
def connect(self, wait=True):
"""Creates a monitor socket and wait for the connect event."""
monitor = self.socket.get_monitor_socket()
self.socket.connect(self.address)
while wait:
msg = zmq.utils.monitor.recv_monitor_message(monitor)
_logger.debug("monitor message: %r", msg)
if int(msg["event"]) == zmq.EVENT_CONNECTED:
_logger.debug("socket connected")
break
self.socket.disable_monitor()
def recv(self):
"""Receives a message and returns it."""
frames = self.socket.recv_multipart()
_logger.info("received message: %r", frames)
assert len(frames) == 1
return frames[0]
def do_recv_callback(self, frames):
"""Receives a message from zmqstream.on_recv, passing it to a user
callback."""
_logger.info("receiving message: %r", frames)
assert len(frames) == 1
assert self.callback
self.callback(frames[0])
def setup_recv_callback(self, callback):
"""Setup a ioloop-backed callback for receiving messages."""
self.stream = zmqstream.ZMQStream(self.socket)
self.callback = callback
self.stream.on_recv(self.do_recv_callback)
class DownstreamEventServer(RPCServer):
"""Implements the message layer server for the downstream event API."""
def recv(self):
wire = self.socket.recv()
_logger.info("received message: %r", wire)
return wire
def do_recv_callback(self, frames):
_logger.info("receiving message: %r", frames)
assert len(frames) == 2
msg = frames[1]
try:
identity = frames[0].decode()
except UnicodeDecodeError:
identity = "unassigned"
assert self.callback
msg = json.loads(msg)
if "info" in msg and "threadProgress" in msg["info"]:
del msg["info"]["threadProgress"]["scopes"]
msg = json.dumps(msg)
self.callback(msg, identity)
def setup_recv_callback(self, callback):
self.stream = zmqstream.ZMQStream(self.socket)
self.callback = callback
self.stream.on_recv(self.do_recv_callback)
def downHeader(*args, **kwargs):
assert len(kwargs) == 2
assert len(args) == 0
ret = {"timestamp": kwargs.pop("timestamp")}
ret["info"] = kwargs
return ret
@send("downstreamEvent", downHeader)
class DownstreamEventClient(RPCClient):
pass
"""Implements the message layer client for the downstream event API."""
| 30.164983
| 83
| 0.605983
|
a450943b704f941d37f7b29cad22608d1af12340
| 153,480
|
py
|
Python
|
moto/ec2/models.py
|
nijave/moto
|
c2a1f4eb144cd6149d14627ea559e3fa4e7b5ca5
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/models.py
|
nijave/moto
|
c2a1f4eb144cd6149d14627ea559e3fa4e7b5ca5
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/models.py
|
nijave/moto
|
c2a1f4eb144cd6149d14627ea559e3fa4e7b5ca5
|
[
"Apache-2.0"
] | 1
|
2019-02-01T02:07:56.000Z
|
2019-02-01T02:07:56.000Z
|
from __future__ import unicode_literals
import copy
import itertools
import ipaddress
import json
import os
import re
import six
import warnings
from pkg_resources import resource_filename
import boto.ec2
from collections import defaultdict
import weakref
from datetime import datetime
from boto.ec2.instance import Instance as BotoInstance, Reservation
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.models import Model, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores
from .exceptions import (
CidrLimitExceeded,
DependencyViolationError,
EC2ClientError,
FilterNotImplementedError,
GatewayNotAttachedError,
InvalidAddressError,
InvalidAllocationIdError,
InvalidAMIIdError,
InvalidAMIAttributeItemValueError,
InvalidAssociationIdError,
InvalidCIDRSubnetError,
InvalidCustomerGatewayIdError,
InvalidDHCPOptionsIdError,
InvalidDomainError,
InvalidID,
InvalidInstanceIdError,
InvalidInternetGatewayIdError,
InvalidKeyPairDuplicateError,
InvalidKeyPairNameError,
InvalidNetworkAclIdError,
InvalidNetworkAttachmentIdError,
InvalidNetworkInterfaceIdError,
InvalidParameterValueError,
InvalidParameterValueErrorTagNull,
InvalidPermissionNotFoundError,
InvalidPermissionDuplicateError,
InvalidRouteTableIdError,
InvalidRouteError,
InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError,
InvalidSnapshotIdError,
InvalidSubnetIdError,
InvalidVolumeIdError,
InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError,
InvalidVPCIdError,
InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError,
MalformedAMIIdError,
MalformedDHCPOptionsIdError,
MissingParameterError,
MotoNotImplementedError,
OperationNotPermitted,
ResourceAlreadyAssociatedError,
RulesPerSecurityGroupLimitExceededError,
TagLimitExceeded)
from .utils import (
EC2_RESOURCE_TO_PREFIX,
EC2_PREFIX_TO_RESOURCE,
random_ami_id,
random_dhcp_option_id,
random_eip_allocation_id,
random_eip_association_id,
random_eni_attach_id,
random_eni_id,
random_instance_id,
random_internet_gateway_id,
random_ip,
random_ipv6_cidr,
random_nat_gateway_id,
random_key_pair,
random_private_ip,
random_public_ip,
random_reservation_id,
random_route_table_id,
generate_route_id,
split_route_id,
random_security_group_id,
random_snapshot_id,
random_spot_fleet_request_id,
random_spot_request_id,
random_subnet_id,
random_subnet_association_id,
random_volume_id,
random_vpc_id,
random_vpc_cidr_association_id,
random_vpc_peering_connection_id,
generic_filter,
is_valid_resource_id,
get_prefix,
simple_aws_filter_to_re,
is_valid_cidr,
filter_internet_gateways,
filter_reservations,
random_network_acl_id,
random_network_acl_subnet_association_id,
random_vpn_gateway_id,
random_vpn_connection_id,
random_customer_gateway_id,
is_tag_filter,
tag_filter_matches,
)
INSTANCE_TYPES = json.load(
open(resource_filename(__name__, 'resources/instance_types.json'), 'r')
)
AMIS = json.load(
open(os.environ.get('MOTO_AMIS_PATH') or resource_filename(
__name__, 'resources/amis.json'), 'r')
)
def utc_date_and_time():
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
def validate_resource_ids(resource_ids):
for resource_id in resource_ids:
if not is_valid_resource_id(resource_id):
raise InvalidID(resource_id=resource_id)
return True
class InstanceState(object):
def __init__(self, name='pending', code=0):
self.name = name
self.code = code
class StateReason(object):
def __init__(self, message="", code=""):
self.message = message
self.code = code
class TaggedEC2Resource(BaseModel):
def get_tags(self, *args, **kwargs):
tags = self.ec2_backend.describe_tags(
filters={'resource-id': [self.id]})
return tags
def add_tag(self, key, value):
self.ec2_backend.create_tags([self.id], {key: value})
def add_tags(self, tag_map):
for key, value in tag_map.items():
self.ec2_backend.create_tags([self.id], {key: value})
def get_filter_value(self, filter_name, method_name=None):
tags = self.get_tags()
if filter_name.startswith('tag:'):
tagname = filter_name.replace('tag:', '', 1)
for tag in tags:
if tag['key'] == tagname:
return tag['value']
return ''
elif filter_name == 'tag-key':
return [tag['key'] for tag in tags]
elif filter_name == 'tag-value':
return [tag['value'] for tag in tags]
else:
raise FilterNotImplementedError(filter_name, method_name)
class NetworkInterface(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0,
public_ip_auto_assign=True, group_ids=None):
self.ec2_backend = ec2_backend
self.id = random_eni_id()
self.device_index = device_index
self.private_ip_address = private_ip_address
self.subnet = subnet
self.instance = None
self.attachment_id = None
self.public_ip = None
self.public_ip_auto_assign = public_ip_auto_assign
self.start()
self.attachments = []
# Local set to the ENI. When attached to an instance, @property group_set
# returns groups for both self and the attached instance.
self._group_set = []
group = None
if group_ids:
for group_id in group_ids:
group = self.ec2_backend.get_security_group_from_id(group_id)
if not group:
# Create with specific group ID.
group = SecurityGroup(
self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id)
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
if group:
self._group_set.append(group)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
security_group_ids = properties.get('SecurityGroups', [])
ec2_backend = ec2_backends[region_name]
subnet_id = properties.get('SubnetId')
if subnet_id:
subnet = ec2_backend.get_subnet(subnet_id)
else:
subnet = None
private_ip_address = properties.get('PrivateIpAddress', None)
network_interface = ec2_backend.create_network_interface(
subnet,
private_ip_address,
group_ids=security_group_ids
)
return network_interface
def stop(self):
if self.public_ip_auto_assign:
self.public_ip = None
def start(self):
self.check_auto_public_ip()
def check_auto_public_ip(self):
if self.public_ip_auto_assign:
self.public_ip = random_public_ip()
@property
def group_set(self):
if self.instance and self.instance.security_groups:
return set(self._group_set) | set(self.instance.security_groups)
else:
return self._group_set
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'PrimaryPrivateIpAddress':
return self.private_ip_address
elif attribute_name == 'SecondaryPrivateIpAddresses':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"')
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name == 'network-interface-id':
return self.id
elif filter_name in ('addresses.private-ip-address', 'private-ip-address'):
return self.private_ip_address
elif filter_name == 'subnet-id':
return self.subnet.id
elif filter_name == 'vpc-id':
return self.subnet.vpc_id
elif filter_name == 'group-id':
return [group.id for group in self._group_set]
elif filter_name == 'availability-zone':
return self.subnet.availability_zone
else:
return super(NetworkInterface, self).get_filter_value(
filter_name, 'DescribeNetworkInterfaces')
class NetworkInterfaceBackend(object):
def __init__(self):
self.enis = {}
super(NetworkInterfaceBackend, self).__init__()
def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs):
eni = NetworkInterface(
self, subnet, private_ip_address, group_ids=group_ids, **kwargs)
self.enis[eni.id] = eni
return eni
def get_network_interface(self, eni_id):
for eni in self.enis.values():
if eni_id == eni.id:
return eni
raise InvalidNetworkInterfaceIdError(eni_id)
def delete_network_interface(self, eni_id):
deleted = self.enis.pop(eni_id, None)
if not deleted:
raise InvalidNetworkInterfaceIdError(eni_id)
return deleted
def describe_network_interfaces(self, filters=None):
enis = self.enis.values()
if filters:
for (_filter, _filter_value) in filters.items():
if _filter == 'network-interface-id':
_filter = 'id'
enis = [eni for eni in enis if getattr(
eni, _filter) in _filter_value]
elif _filter == 'group-id':
original_enis = enis
enis = []
for eni in original_enis:
for group in eni.group_set:
if group.id in _filter_value:
enis.append(eni)
break
else:
self.raise_not_implemented_error(
"The filter '{0}' for DescribeNetworkInterfaces".format(_filter))
return enis
def attach_network_interface(self, eni_id, instance_id, device_index):
eni = self.get_network_interface(eni_id)
instance = self.get_instance(instance_id)
return instance.attach_eni(eni, device_index)
def detach_network_interface(self, attachment_id):
found_eni = None
for eni in self.enis.values():
if eni.attachment_id == attachment_id:
found_eni = eni
break
else:
raise InvalidNetworkAttachmentIdError(attachment_id)
found_eni.instance.detach_eni(found_eni)
def modify_network_interface_attribute(self, eni_id, group_id):
eni = self.get_network_interface(eni_id)
group = self.get_security_group_from_id(group_id)
eni._group_set = [group]
def get_all_network_interfaces(self, eni_ids=None, filters=None):
enis = self.enis.values()
if eni_ids:
enis = [eni for eni in enis if eni.id in eni_ids]
if len(enis) != len(eni_ids):
invalid_id = list(set(eni_ids).difference(
set([eni.id for eni in enis])))[0]
raise InvalidNetworkInterfaceIdError(invalid_id)
return generic_filter(filters, enis)
class Instance(TaggedEC2Resource, BotoInstance):
def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs):
super(Instance, self).__init__()
self.ec2_backend = ec2_backend
self.id = random_instance_id()
self.image_id = image_id
self._state = InstanceState("running", 16)
self._reason = ""
self._state_reason = StateReason()
self.user_data = user_data
self.security_groups = security_groups
self.instance_type = kwargs.get("instance_type", "m1.small")
self.region_name = kwargs.get("region_name", "us-east-1")
placement = kwargs.get("placement", None)
self.vpc_id = None
self.subnet_id = kwargs.get("subnet_id")
in_ec2_classic = not bool(self.subnet_id)
self.key_name = kwargs.get("key_name")
self.ebs_optimized = kwargs.get("ebs_optimized", False)
self.source_dest_check = "true"
self.launch_time = utc_date_and_time()
self.disable_api_termination = kwargs.get("disable_api_termination", False)
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
associate_public_ip = kwargs.get("associate_public_ip", False)
if in_ec2_classic:
# If we are in EC2-Classic, autoassign a public IP
associate_public_ip = True
amis = self.ec2_backend.describe_images(filters={'image-id': image_id})
ami = amis[0] if amis else None
if ami is None:
warnings.warn('Could not find AMI with image-id:{0}, '
'in the near future this will '
'cause an error.\n'
'Use ec2_backend.describe_images() to'
'find suitable image for your test'.format(image_id),
PendingDeprecationWarning)
self.platform = ami.platform if ami else None
self.virtualization_type = ami.virtualization_type if ami else 'paravirtual'
self.architecture = ami.architecture if ami else 'x86_64'
# handle weird bug around user_data -- something grabs the repr(), so
# it must be clean
if isinstance(self.user_data, list) and len(self.user_data) > 0:
if six.PY3 and isinstance(self.user_data[0], six.binary_type):
# string will have a "b" prefix -- need to get rid of it
self.user_data[0] = self.user_data[0].decode('utf-8')
elif six.PY2 and isinstance(self.user_data[0], six.text_type):
# string will have a "u" prefix -- need to get rid of it
self.user_data[0] = self.user_data[0].encode('utf-8')
if self.subnet_id:
subnet = ec2_backend.get_subnet(self.subnet_id)
self.vpc_id = subnet.vpc_id
self._placement.zone = subnet.availability_zone
if associate_public_ip is None:
# Mapping public ip hasnt been explicitly enabled or disabled
associate_public_ip = subnet.map_public_ip_on_launch == 'true'
elif placement:
self._placement.zone = placement
else:
self._placement.zone = ec2_backend.region_name + 'a'
self.block_device_mapping = BlockDeviceMapping()
self._private_ips = set()
self.prep_nics(
kwargs.get("nics", {}),
private_ip=kwargs.get("private_ip"),
associate_public_ip=associate_public_ip
)
def __del__(self):
try:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
for ip in self._private_ips:
subnet.del_subnet_ip(ip)
except Exception:
# Its not "super" critical we clean this up, as reset will do this
# worst case we'll get IP address exaustion... rarely
pass
def setup_defaults(self):
# Default have an instance with root volume should you not wish to
# override with attach volume cmd.
volume = self.ec2_backend.create_volume(8, 'us-east-1a')
self.ec2_backend.attach_volume(volume.id, self.id, '/dev/sda1')
def teardown_defaults(self):
volume_id = self.block_device_mapping['/dev/sda1'].volume_id
self.ec2_backend.detach_volume(volume_id, self.id, '/dev/sda1')
self.ec2_backend.delete_volume(volume_id)
@property
def get_block_device_mapping(self):
return self.block_device_mapping.items()
@property
def private_ip(self):
return self.nics[0].private_ip_address
@property
def private_dns(self):
formatted_ip = self.private_ip.replace('.', '-')
if self.region_name == "us-east-1":
return "ip-{0}.ec2.internal".format(formatted_ip)
else:
return "ip-{0}.{1}.compute.internal".format(formatted_ip, self.region_name)
@property
def public_ip(self):
return self.nics[0].public_ip
@property
def public_dns(self):
if self.public_ip:
formatted_ip = self.public_ip.replace('.', '-')
if self.region_name == "us-east-1":
return "ec2-{0}.compute-1.amazonaws.com".format(formatted_ip)
else:
return "ec2-{0}.{1}.compute.amazonaws.com".format(formatted_ip, self.region_name)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
security_group_ids = properties.get('SecurityGroups', [])
group_names = [ec2_backend.get_security_group_from_id(
group_id).name for group_id in security_group_ids]
reservation = ec2_backend.add_instances(
image_id=properties['ImageId'],
user_data=properties.get('UserData'),
count=1,
security_group_names=group_names,
instance_type=properties.get("InstanceType", "m1.small"),
subnet_id=properties.get("SubnetId"),
key_name=properties.get("KeyName"),
private_ip=properties.get('PrivateIpAddress'),
)
instance = reservation.instances[0]
for tag in properties.get("Tags", []):
instance.add_tag(tag["Key"], tag["Value"])
return instance
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
all_instances = ec2_backend.all_instances()
# the resource_name for instances is the stack name, logical id, and random suffix separated
# by hyphens. So to lookup the instances using the 'aws:cloudformation:logical-id' tag, we need to
# extract the logical-id from the resource_name
logical_id = resource_name.split('-')[1]
for instance in all_instances:
instance_tags = instance.get_tags()
for tag in instance_tags:
if tag['key'] == 'aws:cloudformation:logical-id' and tag['value'] == logical_id:
instance.delete(region_name)
@property
def physical_resource_id(self):
return self.id
def start(self, *args, **kwargs):
for nic in self.nics.values():
nic.start()
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
def stop(self, *args, **kwargs):
for nic in self.nics.values():
nic.stop()
self._state.name = "stopped"
self._state.code = 80
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown")
def delete(self, region):
self.terminate()
def terminate(self, *args, **kwargs):
for nic in self.nics.values():
nic.stop()
self.teardown_defaults()
if self._spot_fleet_id:
spot_fleet = self.ec2_backend.get_spot_fleet_request(self._spot_fleet_id)
for spec in spot_fleet.launch_specs:
if spec.instance_type == self.instance_type and spec.subnet_id == self.subnet_id:
break
spot_fleet.fulfilled_capacity -= spec.weighted_capacity
spot_fleet.spot_requests = [req for req in spot_fleet.spot_requests if req.instance != self]
self._state.name = "terminated"
self._state.code = 48
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown")
def reboot(self, *args, **kwargs):
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
@property
def dynamic_group_list(self):
if self.nics:
groups = []
for nic in self.nics.values():
for group in nic.group_set:
groups.append(group)
return groups
else:
return self.security_groups
def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None):
self.nics = {}
if self.subnet_id:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
if not private_ip:
private_ip = subnet.get_available_subnet_ip(instance=self)
else:
subnet.request_ip(private_ip, instance=self)
self._private_ips.add(private_ip)
elif private_ip is None:
# Preserve old behaviour if in EC2-Classic mode
private_ip = random_private_ip()
# Primary NIC defaults
primary_nic = {'SubnetId': self.subnet_id,
'PrivateIpAddress': private_ip,
'AssociatePublicIpAddress': associate_public_ip}
primary_nic = dict((k, v) for k, v in primary_nic.items() if v)
# If empty NIC spec but primary NIC values provided, create NIC from
# them.
if primary_nic and not nic_spec:
nic_spec[0] = primary_nic
nic_spec[0]['DeviceIndex'] = 0
# Flesh out data structures and associations
for nic in nic_spec.values():
device_index = int(nic.get('DeviceIndex'))
nic_id = nic.get('NetworkInterfaceId')
if nic_id:
# If existing NIC found, use it.
use_nic = self.ec2_backend.get_network_interface(nic_id)
use_nic.device_index = device_index
use_nic.public_ip_auto_assign = False
else:
# If primary NIC values provided, use them for the primary NIC.
if device_index == 0 and primary_nic:
nic.update(primary_nic)
if 'SubnetId' in nic:
subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
else:
subnet = None
group_id = nic.get('SecurityGroupId')
group_ids = [group_id] if group_id else []
use_nic = self.ec2_backend.create_network_interface(subnet,
nic.get(
'PrivateIpAddress'),
device_index=device_index,
public_ip_auto_assign=nic.get(
'AssociatePublicIpAddress', False),
group_ids=group_ids)
self.attach_eni(use_nic, device_index)
def attach_eni(self, eni, device_index):
device_index = int(device_index)
self.nics[device_index] = eni
# This is used upon associate/disassociate public IP.
eni.instance = self
eni.attachment_id = random_eni_attach_id()
eni.device_index = device_index
return eni.attachment_id
def detach_eni(self, eni):
self.nics.pop(eni.device_index, None)
eni.instance = None
eni.attachment_id = None
eni.device_index = None
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AvailabilityZone':
return self.placement
elif attribute_name == 'PrivateDnsName':
return self.private_dns
elif attribute_name == 'PublicDnsName':
return self.public_dns
elif attribute_name == 'PrivateIp':
return self.private_ip
elif attribute_name == 'PublicIp':
return self.public_ip
raise UnformattedGetAttTemplateException()
class InstanceBackend(object):
def __init__(self):
self.reservations = OrderedDict()
super(InstanceBackend, self).__init__()
def get_instance(self, instance_id):
for instance in self.all_instances():
if instance.id == instance_id:
return instance
raise InvalidInstanceIdError(instance_id)
def add_instances(self, image_id, count, user_data, security_group_names,
**kwargs):
new_reservation = Reservation()
new_reservation.id = random_reservation_id()
security_groups = [self.get_security_group_from_name(name)
for name in security_group_names]
security_groups.extend(self.get_security_group_from_id(sg_id)
for sg_id in kwargs.pop("security_group_ids", []))
self.reservations[new_reservation.id] = new_reservation
tags = kwargs.pop("tags", {})
instance_tags = tags.get('instance', {})
for index in range(count):
new_instance = Instance(
self,
image_id,
user_data,
security_groups,
**kwargs
)
new_reservation.instances.append(new_instance)
new_instance.add_tags(instance_tags)
new_instance.setup_defaults()
return new_reservation
def start_instances(self, instance_ids):
started_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.start()
started_instances.append(instance)
return started_instances
def stop_instances(self, instance_ids):
stopped_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.stop()
stopped_instances.append(instance)
return stopped_instances
def terminate_instances(self, instance_ids):
terminated_instances = []
if not instance_ids:
raise EC2ClientError(
"InvalidParameterCombination", "No instances specified")
for instance in self.get_multi_instances_by_id(instance_ids):
instance.terminate()
terminated_instances.append(instance)
return terminated_instances
def reboot_instances(self, instance_ids):
rebooted_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.reboot()
rebooted_instances.append(instance)
return rebooted_instances
def modify_instance_attribute(self, instance_id, key, value):
instance = self.get_instance(instance_id)
setattr(instance, key, value)
return instance
def modify_instance_security_groups(self, instance_id, new_group_list):
instance = self.get_instance(instance_id)
setattr(instance, 'security_groups', new_group_list)
return instance
def describe_instance_attribute(self, instance_id, key):
if key == 'group_set':
key = 'security_groups'
instance = self.get_instance(instance_id)
value = getattr(instance, key)
return instance, value
def all_instances(self):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
instances.append(instance)
return instances
def all_running_instances(self):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.state_code == 16:
instances.append(instance)
return instances
def get_multi_instances_by_id(self, instance_ids):
"""
:param instance_ids: A string list with instance ids
:return: A list with instance objects
"""
result = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id in instance_ids:
result.append(instance)
# TODO: Trim error message down to specific invalid id.
if instance_ids and len(instance_ids) > len(result):
raise InvalidInstanceIdError(instance_ids)
return result
def get_instance_by_id(self, instance_id):
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id == instance_id:
return instance
def get_reservations_by_instance_ids(self, instance_ids, filters=None):
""" Go through all of the reservations and filter to only return those
associated with the given instance_ids.
"""
reservations = []
for reservation in self.all_reservations():
reservation_instance_ids = [
instance.id for instance in reservation.instances]
matching_reservation = any(
instance_id in reservation_instance_ids for instance_id in instance_ids)
if matching_reservation:
reservation.instances = [
instance for instance in reservation.instances if instance.id in instance_ids]
reservations.append(reservation)
found_instance_ids = [
instance.id for reservation in reservations for instance in reservation.instances]
if len(found_instance_ids) != len(instance_ids):
invalid_id = list(set(instance_ids).difference(
set(found_instance_ids)))[0]
raise InvalidInstanceIdError(invalid_id)
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
def all_reservations(self, filters=None):
reservations = [copy.copy(reservation) for reservation in self.reservations.values()]
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
class KeyPair(object):
def __init__(self, name, fingerprint, material):
self.name = name
self.fingerprint = fingerprint
self.material = material
def get_filter_value(self, filter_name):
if filter_name == 'key-name':
return self.name
elif filter_name == 'fingerprint':
return self.fingerprint
else:
raise FilterNotImplementedError(filter_name, 'DescribeKeyPairs')
class KeyPairBackend(object):
def __init__(self):
self.keypairs = {}
super(KeyPairBackend, self).__init__()
def create_key_pair(self, name):
if name in self.keypairs:
raise InvalidKeyPairDuplicateError(name)
keypair = KeyPair(name, **random_key_pair())
self.keypairs[name] = keypair
return keypair
def delete_key_pair(self, name):
if name in self.keypairs:
self.keypairs.pop(name)
return True
def describe_key_pairs(self, key_names=None, filters=None):
results = []
if key_names:
results = [keypair for keypair in self.keypairs.values()
if keypair.name in key_names]
if len(key_names) > len(results):
unknown_keys = set(key_names) - set(results)
raise InvalidKeyPairNameError(unknown_keys)
else:
results = self.keypairs.values()
if filters:
return generic_filter(filters, results)
else:
return results
def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name)
keypair = KeyPair(key_name, **random_key_pair())
self.keypairs[key_name] = keypair
return keypair
class TagBackend(object):
VALID_TAG_FILTERS = ['key',
'resource-id',
'resource-type',
'value']
VALID_TAG_RESOURCE_FILTER_TYPES = ['customer-gateway',
'dhcp-options',
'image',
'instance',
'internet-gateway',
'network-acl',
'network-interface',
'reserved-instances',
'route-table',
'security-group',
'snapshot',
'spot-instances-request',
'subnet',
'volume',
'vpc',
'vpc-peering-connection'
'vpn-connection',
'vpn-gateway']
def __init__(self):
self.tags = defaultdict(dict)
super(TagBackend, self).__init__()
def create_tags(self, resource_ids, tags):
if None in set([tags[tag] for tag in tags]):
raise InvalidParameterValueErrorTagNull()
for resource_id in resource_ids:
if resource_id in self.tags:
if len(self.tags[resource_id]) + len([tag for tag in tags if not tag.startswith("aws:")]) > 50:
raise TagLimitExceeded()
elif len([tag for tag in tags if not tag.startswith("aws:")]) > 50:
raise TagLimitExceeded()
for resource_id in resource_ids:
for tag in tags:
self.tags[resource_id][tag] = tags[tag]
return True
def delete_tags(self, resource_ids, tags):
for resource_id in resource_ids:
for tag in tags:
if tag in self.tags[resource_id]:
if tags[tag] is None:
self.tags[resource_id].pop(tag)
elif tags[tag] == self.tags[resource_id][tag]:
self.tags[resource_id].pop(tag)
return True
def describe_tags(self, filters=None):
import re
results = []
key_filters = []
resource_id_filters = []
resource_type_filters = []
value_filters = []
if filters is not None:
for tag_filter in filters:
if tag_filter in self.VALID_TAG_FILTERS:
if tag_filter == 'key':
for value in filters[tag_filter]:
key_filters.append(re.compile(
simple_aws_filter_to_re(value)))
if tag_filter == 'resource-id':
for value in filters[tag_filter]:
resource_id_filters.append(
re.compile(simple_aws_filter_to_re(value)))
if tag_filter == 'resource-type':
for value in filters[tag_filter]:
resource_type_filters.append(value)
if tag_filter == 'value':
for value in filters[tag_filter]:
value_filters.append(re.compile(
simple_aws_filter_to_re(value)))
for resource_id, tags in self.tags.items():
for key, value in tags.items():
add_result = False
if filters is None:
add_result = True
else:
key_pass = False
id_pass = False
type_pass = False
value_pass = False
if key_filters:
for pattern in key_filters:
if pattern.match(key) is not None:
key_pass = True
else:
key_pass = True
if resource_id_filters:
for pattern in resource_id_filters:
if pattern.match(resource_id) is not None:
id_pass = True
else:
id_pass = True
if resource_type_filters:
for resource_type in resource_type_filters:
if EC2_PREFIX_TO_RESOURCE[get_prefix(resource_id)] == resource_type:
type_pass = True
else:
type_pass = True
if value_filters:
for pattern in value_filters:
if pattern.match(value) is not None:
value_pass = True
else:
value_pass = True
if key_pass and id_pass and type_pass and value_pass:
add_result = True
# If we're not filtering, or we are filtering and this
if add_result:
result = {
'resource_id': resource_id,
'key': key,
'value': value,
'resource_type': EC2_PREFIX_TO_RESOURCE[get_prefix(resource_id)],
}
results.append(result)
return results
class Ami(TaggedEC2Resource):
def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None,
name=None, description=None, owner_id=111122223333,
public=False, virtualization_type=None, architecture=None,
state='available', creation_date=None, platform=None,
image_type='machine', image_location=None, hypervisor=None,
root_device_type='standard', root_device_name='/dev/sda1', sriov='simple',
region_name='us-east-1a'
):
self.ec2_backend = ec2_backend
self.id = ami_id
self.state = state
self.name = name
self.image_type = image_type
self.image_location = image_location
self.owner_id = owner_id
self.description = description
self.virtualization_type = virtualization_type
self.architecture = architecture
self.kernel_id = None
self.platform = platform
self.hypervisor = hypervisor
self.root_device_name = root_device_name
self.root_device_type = root_device_type
self.sriov = sriov
self.creation_date = utc_date_and_time() if creation_date is None else creation_date
if instance:
self.instance = instance
self.instance_id = instance.id
self.virtualization_type = instance.virtualization_type
self.architecture = instance.architecture
self.kernel_id = instance.kernel
self.platform = instance.platform
elif source_ami:
"""
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
"We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI."
~ 2014.09.29
"""
self.virtualization_type = source_ami.virtualization_type
self.architecture = source_ami.architecture
self.kernel_id = source_ami.kernel_id
self.platform = source_ami.platform
if not name:
self.name = source_ami.name
if not description:
self.description = source_ami.description
self.launch_permission_groups = set()
self.launch_permission_users = set()
if public:
self.launch_permission_groups.add('all')
# AWS auto-creates these, we should reflect the same.
volume = self.ec2_backend.create_volume(15, region_name)
self.ebs_snapshot = self.ec2_backend.create_snapshot(
volume.id, "Auto-created snapshot for AMI %s" % self.id, owner_id)
self.ec2_backend.delete_volume(volume.id)
@property
def is_public(self):
return 'all' in self.launch_permission_groups
@property
def is_public_string(self):
return str(self.is_public).lower()
def get_filter_value(self, filter_name):
if filter_name == 'virtualization-type':
return self.virtualization_type
elif filter_name == 'kernel-id':
return self.kernel_id
elif filter_name in ['architecture', 'platform']:
return getattr(self, filter_name)
elif filter_name == 'image-id':
return self.id
elif filter_name == 'is-public':
return self.is_public_string
elif filter_name == 'state':
return self.state
elif filter_name == 'name':
return self.name
elif filter_name == 'owner-id':
return self.owner_id
else:
return super(Ami, self).get_filter_value(
filter_name, 'DescribeImages')
class AmiBackend(object):
AMI_REGEX = re.compile("ami-[a-z0-9]+")
def __init__(self):
self.amis = {}
self._load_amis()
super(AmiBackend, self).__init__()
def _load_amis(self):
for ami in AMIS:
ami_id = ami['ami_id']
self.amis[ami_id] = Ami(self, **ami)
def create_image(self, instance_id, name=None, description=None, context=None):
# TODO: check that instance exists and pull info from it.
ami_id = random_ami_id()
instance = self.get_instance(instance_id)
ami = Ami(self, ami_id, instance=instance, source_ami=None,
name=name, description=description,
owner_id=context.get_current_user() if context else '111122223333')
self.amis[ami_id] = ami
return ami
def copy_image(self, source_image_id, source_region, name=None, description=None):
source_ami = ec2_backends[source_region].describe_images(
ami_ids=[source_image_id])[0]
ami_id = random_ami_id()
ami = Ami(self, ami_id, instance=None, source_ami=source_ami,
name=name, description=description)
self.amis[ami_id] = ami
return ami
def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None,
context=None):
images = self.amis.values()
if len(ami_ids):
# boto3 seems to default to just searching based on ami ids if that parameter is passed
# and if no images are found, it raises an errors
malformed_ami_ids = [ami_id for ami_id in ami_ids if not ami_id.startswith('ami-')]
if malformed_ami_ids:
raise MalformedAMIIdError(malformed_ami_ids)
images = [ami for ami in images if ami.id in ami_ids]
if len(images) == 0:
raise InvalidAMIIdError(ami_ids)
else:
# Limit images by launch permissions
if exec_users:
tmp_images = []
for ami in images:
for user_id in exec_users:
if user_id in ami.launch_permission_users:
tmp_images.append(ami)
images = tmp_images
# Limit by owner ids
if owners:
# support filtering by Owners=['self']
owners = list(map(
lambda o: context.get_current_user()
if context and o == 'self' else o,
owners))
images = [ami for ami in images if ami.owner_id in owners]
# Generic filters
if filters:
return generic_filter(filters, images)
return images
def deregister_image(self, ami_id):
if ami_id in self.amis:
self.amis.pop(ami_id)
return True
raise InvalidAMIIdError(ami_id)
def get_launch_permission_groups(self, ami_id):
ami = self.describe_images(ami_ids=[ami_id])[0]
return ami.launch_permission_groups
def get_launch_permission_users(self, ami_id):
ami = self.describe_images(ami_ids=[ami_id])[0]
return ami.launch_permission_users
def validate_permission_targets(self, user_ids=None, group=None):
# If anything is invalid, nothing is added. (No partial success.)
if user_ids:
"""
AWS docs:
"The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)."
http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html
"""
for user_id in user_ids:
if len(user_id) != 12 or not user_id.isdigit():
raise InvalidAMIAttributeItemValueError("userId", user_id)
if group and group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
def add_launch_permission(self, ami_id, user_ids=None, group=None):
ami = self.describe_images(ami_ids=[ami_id])[0]
self.validate_permission_targets(user_ids=user_ids, group=group)
if user_ids:
for user_id in user_ids:
ami.launch_permission_users.add(user_id)
if group:
ami.launch_permission_groups.add(group)
return True
def remove_launch_permission(self, ami_id, user_ids=None, group=None):
ami = self.describe_images(ami_ids=[ami_id])[0]
self.validate_permission_targets(user_ids=user_ids, group=group)
if user_ids:
for user_id in user_ids:
ami.launch_permission_users.discard(user_id)
if group:
ami.launch_permission_groups.discard(group)
return True
class Region(object):
def __init__(self, name, endpoint):
self.name = name
self.endpoint = endpoint
class Zone(object):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
class RegionsAndZonesBackend(object):
regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()]
zones = dict(
(region, [Zone(region + c, region) for c in 'abc'])
for region in [r.name for r in regions])
def describe_regions(self, region_names=[]):
if len(region_names) == 0:
return self.regions
ret = []
for name in region_names:
for region in self.regions:
if region.name == name:
ret.append(region)
return ret
def describe_availability_zones(self):
return self.zones[self.region_name]
def get_zone_by_name(self, name):
for zone in self.zones[self.region_name]:
if zone.name == name:
return zone
class SecurityRule(object):
def __init__(self, ip_protocol, from_port, to_port, ip_ranges, source_groups):
self.ip_protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.ip_ranges = ip_ranges or []
self.source_groups = source_groups
@property
def unique_representation(self):
return "{0}-{1}-{2}-{3}-{4}".format(
self.ip_protocol,
self.from_port,
self.to_port,
self.ip_ranges,
self.source_groups
)
def __eq__(self, other):
return self.unique_representation == other.unique_representation
class SecurityGroup(TaggedEC2Resource):
def __init__(self, ec2_backend, group_id, name, description, vpc_id=None):
self.ec2_backend = ec2_backend
self.id = group_id
self.name = name
self.description = description
self.ingress_rules = []
self.egress_rules = [SecurityRule(-1, None, None, ['0.0.0.0/0'], [])]
self.enis = {}
self.vpc_id = vpc_id
self.owner_id = "123456789012"
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc_id = properties.get('VpcId')
security_group = ec2_backend.create_security_group(
name=resource_name,
description=properties.get('GroupDescription'),
vpc_id=vpc_id,
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
security_group.add_tag(tag_key, tag_value)
for ingress_rule in properties.get('SecurityGroupIngress', []):
source_group_id = ingress_rule.get('SourceSecurityGroupId')
ec2_backend.authorize_security_group_ingress(
group_name_or_id=security_group.id,
ip_protocol=ingress_rule['IpProtocol'],
from_port=ingress_rule['FromPort'],
to_port=ingress_rule['ToPort'],
ip_ranges=ingress_rule.get('CidrIp'),
source_group_ids=[source_group_id],
vpc_id=vpc_id,
)
return security_group
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls._delete_security_group_given_vpc_id(
original_resource.name, original_resource.vpc_id, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties.get('VpcId')
cls._delete_security_group_given_vpc_id(
resource_name, vpc_id, region_name)
@classmethod
def _delete_security_group_given_vpc_id(cls, resource_name, vpc_id, region_name):
ec2_backend = ec2_backends[region_name]
security_group = ec2_backend.get_security_group_from_name(
resource_name, vpc_id)
if security_group:
security_group.delete(region_name)
def delete(self, region_name):
''' Not exposed as part of the ELB API - used for CloudFormation. '''
self.ec2_backend.delete_security_group(group_id=self.id)
@property
def physical_resource_id(self):
return self.id
def matches_filter(self, key, filter_value):
def to_attr(filter_name):
attr = None
if filter_name == 'group-name':
attr = 'name'
elif filter_name == 'group-id':
attr = 'id'
elif filter_name == 'vpc-id':
attr = 'vpc_id'
else:
attr = filter_name.replace('-', '_')
return attr
if key.startswith('ip-permission'):
match = re.search(r"ip-permission.(*)", key)
ingress_attr = to_attr(match.groups()[0])
for ingress in self.ingress_rules:
if getattr(ingress, ingress_attr) in filter_value:
return True
elif is_tag_filter(key):
tag_value = self.get_filter_value(key)
if isinstance(filter_value, list):
return tag_filter_matches(self, key, filter_value)
return tag_value in filter_value
else:
attr_name = to_attr(key)
return getattr(self, attr_name) in filter_value
return False
def matches_filters(self, filters):
for key, value in filters.items():
if not self.matches_filter(key, value):
return False
return True
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'GroupId':
return self.id
raise UnformattedGetAttTemplateException()
def add_ingress_rule(self, rule):
if rule in self.ingress_rules:
raise InvalidPermissionDuplicateError()
else:
self.ingress_rules.append(rule)
def add_egress_rule(self, rule):
self.egress_rules.append(rule)
def get_number_of_ingress_rules(self):
return sum(
len(rule.ip_ranges) + len(rule.source_groups)
for rule in self.ingress_rules)
def get_number_of_egress_rules(self):
return sum(
len(rule.ip_ranges) + len(rule.source_groups)
for rule in self.egress_rules)
class SecurityGroupBackend(object):
def __init__(self):
# the key in the dict group is the vpc_id or None (non-vpc)
self.groups = defaultdict(dict)
# Create the default security group
self.create_security_group("default", "default group")
super(SecurityGroupBackend, self).__init__()
def create_security_group(self, name, description, vpc_id=None, force=False):
if not description:
raise MissingParameterError('GroupDescription')
group_id = random_security_group_id()
if not force:
existing_group = self.get_security_group_from_name(name, vpc_id)
if existing_group:
raise InvalidSecurityGroupDuplicateError(name)
group = SecurityGroup(self, group_id, name, description, vpc_id=vpc_id)
self.groups[vpc_id][group_id] = group
return group
def describe_security_groups(self, group_ids=None, groupnames=None, filters=None):
matches = itertools.chain(*[x.values()
for x in self.groups.values()])
if group_ids:
matches = [grp for grp in matches
if grp.id in group_ids]
if len(group_ids) > len(matches):
unknown_ids = set(group_ids) - set(matches)
raise InvalidSecurityGroupNotFoundError(unknown_ids)
if groupnames:
matches = [grp for grp in matches
if grp.name in groupnames]
if len(groupnames) > len(matches):
unknown_names = set(groupnames) - set(matches)
raise InvalidSecurityGroupNotFoundError(unknown_names)
if filters:
matches = [grp for grp in matches
if grp.matches_filters(filters)]
return matches
def _delete_security_group(self, vpc_id, group_id):
if self.groups[vpc_id][group_id].enis:
raise DependencyViolationError(
"{0} is being utilized by {1}".format(group_id, 'ENIs'))
return self.groups[vpc_id].pop(group_id)
def delete_security_group(self, name=None, group_id=None):
if group_id:
# loop over all the SGs, find the right one
for vpc_id, groups in self.groups.items():
if group_id in groups:
return self._delete_security_group(vpc_id, group_id)
raise InvalidSecurityGroupNotFoundError(group_id)
elif name:
# Group Name. Has to be in standard EC2, VPC needs to be
# identified by group_id
group = self.get_security_group_from_name(name)
if group:
return self._delete_security_group(None, group.id)
raise InvalidSecurityGroupNotFoundError(name)
def get_security_group_from_id(self, group_id):
# 2 levels of chaining necessary since it's a complex structure
all_groups = itertools.chain.from_iterable(
[x.values() for x in self.groups.values()])
for group in all_groups:
if group.id == group_id:
return group
def get_security_group_from_name(self, name, vpc_id=None):
for group_id, group in self.groups[vpc_id].items():
if group.name == name:
return group
def get_security_group_by_name_or_id(self, group_name_or_id, vpc_id):
# try searching by id, fallbacks to name search
group = self.get_security_group_from_id(group_name_or_id)
if group is None:
group = self.get_security_group_from_name(group_name_or_id, vpc_id)
return group
def authorize_security_group_ingress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
group, group.get_number_of_ingress_rules(),
ip_ranges, source_group_names, source_group_ids)
source_group_names = source_group_names if source_group_names else []
source_group_ids = source_group_ids if source_group_ids else []
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
# for VPCs
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
group.add_ingress_rule(security_rule)
def revoke_security_group_ingress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
if security_rule in group.ingress_rules:
group.ingress_rules.remove(security_rule)
return security_rule
raise InvalidPermissionNotFoundError()
def authorize_security_group_egress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
group, group.get_number_of_egress_rules(),
ip_ranges, source_group_names, source_group_ids)
source_group_names = source_group_names if source_group_names else []
source_group_ids = source_group_ids if source_group_ids else []
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
# for VPCs
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
group.add_egress_rule(security_rule)
def revoke_security_group_egress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
if security_rule in group.egress_rules:
group.egress_rules.remove(security_rule)
return security_rule
raise InvalidPermissionNotFoundError()
def _verify_group_will_respect_rule_count_limit(
self, group, current_rule_nb,
ip_ranges, source_group_names=None, source_group_ids=None):
max_nb_rules = 50 if group.vpc_id else 100
future_group_nb_rules = current_rule_nb
if ip_ranges:
future_group_nb_rules += len(ip_ranges)
if source_group_ids:
future_group_nb_rules += len(source_group_ids)
if source_group_names:
future_group_nb_rules += len(source_group_names)
if future_group_nb_rules > max_nb_rules:
raise RulesPerSecurityGroupLimitExceededError
class SecurityGroupIngress(object):
def __init__(self, security_group, properties):
self.security_group = security_group
self.properties = properties
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
group_name = properties.get('GroupName')
group_id = properties.get('GroupId')
ip_protocol = properties.get("IpProtocol")
cidr_ip = properties.get("CidrIp")
cidr_ipv6 = properties.get("CidrIpv6")
from_port = properties.get("FromPort")
source_security_group_id = properties.get("SourceSecurityGroupId")
source_security_group_name = properties.get("SourceSecurityGroupName")
# source_security_owner_id =
# properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT
to_port = properties.get("ToPort")
assert group_id or group_name
assert source_security_group_name or cidr_ip or cidr_ipv6 or source_security_group_id
assert ip_protocol
if source_security_group_id:
source_security_group_ids = [source_security_group_id]
else:
source_security_group_ids = None
if source_security_group_name:
source_security_group_names = [source_security_group_name]
else:
source_security_group_names = None
if cidr_ip:
ip_ranges = [cidr_ip]
else:
ip_ranges = []
if group_id:
security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[
0]
else:
security_group = ec2_backend.describe_security_groups(
groupnames=[group_name])[0]
ec2_backend.authorize_security_group_ingress(
group_name_or_id=security_group.id,
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
ip_ranges=ip_ranges,
source_group_ids=source_security_group_ids,
source_group_names=source_security_group_names,
)
return cls(security_group, properties)
class VolumeAttachment(object):
def __init__(self, volume, instance, device, status):
self.volume = volume
self.attach_time = utc_date_and_time()
self.instance = instance
self.device = device
self.status = status
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
instance_id = properties['InstanceId']
volume_id = properties['VolumeId']
ec2_backend = ec2_backends[region_name]
attachment = ec2_backend.attach_volume(
volume_id=volume_id,
instance_id=instance_id,
device_path=properties['Device'],
)
return attachment
class Volume(TaggedEC2Resource):
def __init__(self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False):
self.id = volume_id
self.size = size
self.zone = zone
self.create_time = utc_date_and_time()
self.attachment = None
self.snapshot_id = snapshot_id
self.ec2_backend = ec2_backend
self.encrypted = encrypted
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
volume = ec2_backend.create_volume(
size=properties.get('Size'),
zone_name=properties.get('AvailabilityZone'),
)
return volume
@property
def physical_resource_id(self):
return self.id
@property
def status(self):
if self.attachment:
return 'in-use'
else:
return 'available'
def get_filter_value(self, filter_name):
if filter_name.startswith('attachment') and not self.attachment:
return None
elif filter_name == 'attachment.attach-time':
return self.attachment.attach_time
elif filter_name == 'attachment.device':
return self.attachment.device
elif filter_name == 'attachment.instance-id':
return self.attachment.instance.id
elif filter_name == 'attachment.status':
return self.attachment.status
elif filter_name == 'create-time':
return self.create_time
elif filter_name == 'size':
return self.size
elif filter_name == 'snapshot-id':
return self.snapshot_id
elif filter_name == 'status':
return self.status
elif filter_name == 'volume-id':
return self.id
elif filter_name == 'encrypted':
return str(self.encrypted).lower()
elif filter_name == 'availability-zone':
return self.zone.name
else:
return super(Volume, self).get_filter_value(
filter_name, 'DescribeVolumes')
class Snapshot(TaggedEC2Resource):
def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False, owner_id='123456789012'):
self.id = snapshot_id
self.volume = volume
self.description = description
self.start_time = utc_date_and_time()
self.create_volume_permission_groups = set()
self.ec2_backend = ec2_backend
self.status = 'completed'
self.encrypted = encrypted
self.owner_id = owner_id
def get_filter_value(self, filter_name):
if filter_name == 'description':
return self.description
elif filter_name == 'snapshot-id':
return self.id
elif filter_name == 'start-time':
return self.start_time
elif filter_name == 'volume-id':
return self.volume.id
elif filter_name == 'volume-size':
return self.volume.size
elif filter_name == 'encrypted':
return str(self.encrypted).lower()
elif filter_name == 'status':
return self.status
else:
return super(Snapshot, self).get_filter_value(
filter_name, 'DescribeSnapshots')
class EBSBackend(object):
def __init__(self):
self.volumes = {}
self.attachments = {}
self.snapshots = {}
super(EBSBackend, self).__init__()
def create_volume(self, size, zone_name, snapshot_id=None, encrypted=False):
volume_id = random_volume_id()
zone = self.get_zone_by_name(zone_name)
if snapshot_id:
snapshot = self.get_snapshot(snapshot_id)
if size is None:
size = snapshot.volume.size
if snapshot.encrypted:
encrypted = snapshot.encrypted
volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted)
self.volumes[volume_id] = volume
return volume
def describe_volumes(self, volume_ids=None, filters=None):
matches = self.volumes.values()
if volume_ids:
matches = [vol for vol in matches
if vol.id in volume_ids]
if len(volume_ids) > len(matches):
unknown_ids = set(volume_ids) - set(matches)
raise InvalidVolumeIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def get_volume(self, volume_id):
volume = self.volumes.get(volume_id, None)
if not volume:
raise InvalidVolumeIdError(volume_id)
return volume
def delete_volume(self, volume_id):
if volume_id in self.volumes:
return self.volumes.pop(volume_id)
raise InvalidVolumeIdError(volume_id)
def attach_volume(self, volume_id, instance_id, device_path):
volume = self.get_volume(volume_id)
instance = self.get_instance(instance_id)
if not volume or not instance:
return False
volume.attachment = VolumeAttachment(
volume, instance, device_path, 'attached')
# Modify instance to capture mount of block device.
bdt = BlockDeviceType(volume_id=volume_id, status=volume.status, size=volume.size,
attach_time=utc_date_and_time())
instance.block_device_mapping[device_path] = bdt
return volume.attachment
def detach_volume(self, volume_id, instance_id, device_path):
volume = self.get_volume(volume_id)
self.get_instance(instance_id)
old_attachment = volume.attachment
if not old_attachment:
raise InvalidVolumeAttachmentError(volume_id, instance_id)
old_attachment.status = 'detached'
volume.attachment = None
return old_attachment
def create_snapshot(self, volume_id, description, owner_id=None):
snapshot_id = random_snapshot_id()
volume = self.get_volume(volume_id)
params = [self, snapshot_id, volume, description, volume.encrypted]
if owner_id:
params.append(owner_id)
snapshot = Snapshot(*params)
self.snapshots[snapshot_id] = snapshot
return snapshot
def describe_snapshots(self, snapshot_ids=None, filters=None):
matches = self.snapshots.values()
if snapshot_ids:
matches = [snap for snap in matches
if snap.id in snapshot_ids]
if len(snapshot_ids) > len(matches):
unknown_ids = set(snapshot_ids) - set(matches)
raise InvalidSnapshotIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def copy_snapshot(self, source_snapshot_id, source_region, description=None):
source_snapshot = ec2_backends[source_region].describe_snapshots(
snapshot_ids=[source_snapshot_id])[0]
snapshot_id = random_snapshot_id()
snapshot = Snapshot(self, snapshot_id, volume=source_snapshot.volume,
description=description, encrypted=source_snapshot.encrypted)
self.snapshots[snapshot_id] = snapshot
return snapshot
def get_snapshot(self, snapshot_id):
snapshot = self.snapshots.get(snapshot_id, None)
if not snapshot:
raise InvalidSnapshotIdError(snapshot_id)
return snapshot
def delete_snapshot(self, snapshot_id):
if snapshot_id in self.snapshots:
return self.snapshots.pop(snapshot_id)
raise InvalidSnapshotIdError(snapshot_id)
def get_create_volume_permission_groups(self, snapshot_id):
snapshot = self.get_snapshot(snapshot_id)
return snapshot.create_volume_permission_groups
def add_create_volume_permission(self, snapshot_id, user_id=None, group=None):
if user_id:
self.raise_not_implemented_error(
"The UserId parameter for ModifySnapshotAttribute")
if group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
snapshot = self.get_snapshot(snapshot_id)
snapshot.create_volume_permission_groups.add(group)
return True
def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None):
if user_id:
self.raise_not_implemented_error(
"The UserId parameter for ModifySnapshotAttribute")
if group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
snapshot = self.get_snapshot(snapshot_id)
snapshot.create_volume_permission_groups.discard(group)
return True
class VPC(TaggedEC2Resource):
def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default',
amazon_provided_ipv6_cidr_block=False):
self.ec2_backend = ec2_backend
self.id = vpc_id
self.cidr_block = cidr_block
self.cidr_block_association_set = {}
self.dhcp_options = None
self.state = 'available'
self.instance_tenancy = instance_tenancy
self.is_default = 'true' if is_default else 'false'
self.enable_dns_support = 'true'
# This attribute is set to 'true' only for default VPCs
# or VPCs created using the wizard of the VPC console
self.enable_dns_hostnames = 'true' if is_default else 'false'
self.associate_vpc_cidr_block(cidr_block)
if amazon_provided_ipv6_cidr_block:
self.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc = ec2_backend.create_vpc(
cidr_block=properties['CidrBlock'],
instance_tenancy=properties.get('InstanceTenancy', 'default')
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
vpc.add_tag(tag_key, tag_value)
return vpc
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name in ('vpc-id', 'vpcId'):
return self.id
elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'):
return self.cidr_block
elif filter_name in ('cidr-block-association.cidr-block', 'ipv6-cidr-block-association.ipv6-cidr-block'):
return [c['cidr_block'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)]
elif filter_name in ('cidr-block-association.association-id', 'ipv6-cidr-block-association.association-id'):
return self.cidr_block_association_set.keys()
elif filter_name in ('cidr-block-association.state', 'ipv6-cidr-block-association.state'):
return [c['cidr_block_state']['state'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)]
elif filter_name in ('instance_tenancy', 'InstanceTenancy'):
return self.instance_tenancy
elif filter_name in ('is-default', 'isDefault'):
return self.is_default
elif filter_name == 'state':
return self.state
elif filter_name in ('dhcp-options-id', 'dhcpOptionsId'):
if not self.dhcp_options:
return None
return self.dhcp_options.id
else:
return super(VPC, self).get_filter_value(filter_name, 'DescribeVpcs')
def associate_vpc_cidr_block(self, cidr_block, amazon_provided_ipv6_cidr_block=False):
max_associations = 5 if not amazon_provided_ipv6_cidr_block else 1
if len(self.get_cidr_block_association_set(amazon_provided_ipv6_cidr_block)) >= max_associations:
raise CidrLimitExceeded(self.id, max_associations)
association_id = random_vpc_cidr_association_id()
association_set = {
'association_id': association_id,
'cidr_block_state': {'state': 'associated', 'StatusMessage': ''}
}
association_set['cidr_block'] = random_ipv6_cidr() if amazon_provided_ipv6_cidr_block else cidr_block
self.cidr_block_association_set[association_id] = association_set
return association_set
def disassociate_vpc_cidr_block(self, association_id):
if self.cidr_block == self.cidr_block_association_set.get(association_id, {}).get('cidr_block'):
raise OperationNotPermitted(association_id)
response = self.cidr_block_association_set.pop(association_id, {})
if response:
response['vpc_id'] = self.id
response['cidr_block_state']['state'] = 'disassociating'
return response
def get_cidr_block_association_set(self, ipv6=False):
return [c for c in self.cidr_block_association_set.values() if ('::/' if ipv6 else '.') in c.get('cidr_block')]
class VPCBackend(object):
__refs__ = defaultdict(list)
def __init__(self):
self.vpcs = {}
self.__refs__[self.__class__].append(weakref.ref(self))
super(VPCBackend, self).__init__()
@classmethod
def get_instances(cls):
for inst_ref in cls.__refs__[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False):
vpc_id = random_vpc_id()
vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block)
self.vpcs[vpc_id] = vpc
# AWS creates a default main route table and security group.
self.create_route_table(vpc_id, main=True)
# AWS creates a default Network ACL
self.create_network_acl(vpc_id, default=True)
default = self.get_security_group_from_name('default', vpc_id=vpc_id)
if not default:
self.create_security_group(
'default', 'default VPC security group', vpc_id=vpc_id)
return vpc
def get_vpc(self, vpc_id):
if vpc_id not in self.vpcs:
raise InvalidVPCIdError(vpc_id)
return self.vpcs.get(vpc_id)
# get vpc by vpc id and aws region
def get_cross_vpc(self, vpc_id, peer_region):
for vpcs in self.get_instances():
if vpcs.region_name == peer_region:
match_vpc = vpcs.get_vpc(vpc_id)
return match_vpc
def get_all_vpcs(self, vpc_ids=None, filters=None):
matches = self.vpcs.values()
if vpc_ids:
matches = [vpc for vpc in matches
if vpc.id in vpc_ids]
if len(vpc_ids) > len(matches):
unknown_ids = set(vpc_ids) - set(matches)
raise InvalidVPCIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def delete_vpc(self, vpc_id):
# Delete route table if only main route table remains.
route_tables = self.get_all_route_tables(filters={'vpc-id': vpc_id})
if len(route_tables) > 1:
raise DependencyViolationError(
"The vpc {0} has dependencies and cannot be deleted.".format(vpc_id)
)
for route_table in route_tables:
self.delete_route_table(route_table.id)
# Delete default security group if exists.
default = self.get_security_group_from_name('default', vpc_id=vpc_id)
if default:
self.delete_security_group(group_id=default.id)
# Now delete VPC.
vpc = self.vpcs.pop(vpc_id, None)
if not vpc:
raise InvalidVPCIdError(vpc_id)
if vpc.dhcp_options:
vpc.dhcp_options.vpc = None
self.delete_dhcp_options_set(vpc.dhcp_options.id)
vpc.dhcp_options = None
return vpc
def describe_vpc_attribute(self, vpc_id, attr_name):
vpc = self.get_vpc(vpc_id)
if attr_name in ('enable_dns_support', 'enable_dns_hostnames'):
return getattr(vpc, attr_name)
else:
raise InvalidParameterValueError(attr_name)
def modify_vpc_attribute(self, vpc_id, attr_name, attr_value):
vpc = self.get_vpc(vpc_id)
if attr_name in ('enable_dns_support', 'enable_dns_hostnames'):
setattr(vpc, attr_name, attr_value)
else:
raise InvalidParameterValueError(attr_name)
def disassociate_vpc_cidr_block(self, association_id):
for vpc in self.vpcs.values():
response = vpc.disassociate_vpc_cidr_block(association_id)
if response:
return response
else:
raise InvalidVpcCidrBlockAssociationIdError(association_id)
def associate_vpc_cidr_block(self, vpc_id, cidr_block, amazon_provided_ipv6_cidr_block):
vpc = self.get_vpc(vpc_id)
return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block)
class VPCPeeringConnectionStatus(object):
def __init__(self, code='initiating-request', message=''):
self.code = code
self.message = message
def deleted(self):
self.code = 'deleted'
self.message = 'Deleted by {deleter ID}'
def initiating(self):
self.code = 'initiating-request'
self.message = 'Initiating Request to {accepter ID}'
def pending(self):
self.code = 'pending-acceptance'
self.message = 'Pending Acceptance by {accepter ID}'
def accept(self):
self.code = 'active'
self.message = 'Active'
def reject(self):
self.code = 'rejected'
self.message = 'Inactive'
class VPCPeeringConnection(TaggedEC2Resource):
def __init__(self, vpc_pcx_id, vpc, peer_vpc):
self.id = vpc_pcx_id
self.vpc = vpc
self.peer_vpc = peer_vpc
self._status = VPCPeeringConnectionStatus()
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc = ec2_backend.get_vpc(properties['VpcId'])
peer_vpc = ec2_backend.get_vpc(properties['PeerVpcId'])
vpc_pcx = ec2_backend.create_vpc_peering_connection(vpc, peer_vpc)
return vpc_pcx
@property
def physical_resource_id(self):
return self.id
class VPCPeeringConnectionBackend(object):
def __init__(self):
self.vpc_pcxs = {}
super(VPCPeeringConnectionBackend, self).__init__()
def create_vpc_peering_connection(self, vpc, peer_vpc):
vpc_pcx_id = random_vpc_peering_connection_id()
vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc)
vpc_pcx._status.pending()
self.vpc_pcxs[vpc_pcx_id] = vpc_pcx
return vpc_pcx
def get_all_vpc_peering_connections(self):
return self.vpc_pcxs.values()
def get_vpc_peering_connection(self, vpc_pcx_id):
if vpc_pcx_id not in self.vpc_pcxs:
raise InvalidVPCPeeringConnectionIdError(vpc_pcx_id)
return self.vpc_pcxs.get(vpc_pcx_id)
def delete_vpc_peering_connection(self, vpc_pcx_id):
deleted = self.get_vpc_peering_connection(vpc_pcx_id)
deleted._status.deleted()
return deleted
def accept_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept()
return vpc_pcx
def reject_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.reject()
return vpc_pcx
class Subnet(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az,
map_public_ip_on_launch):
self.ec2_backend = ec2_backend
self.id = subnet_id
self.vpc_id = vpc_id
self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
self._availability_zone = availability_zone
self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch
# Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8)
self._subnet_ip_generator = self.cidr.hosts()
self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS
self._unused_ips = set() # if instance is destroyed hold IP here for reuse
self._subnet_ips = {} # has IP: instance
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties['VpcId']
cidr_block = properties['CidrBlock']
availability_zone = properties.get('AvailabilityZone')
ec2_backend = ec2_backends[region_name]
subnet = ec2_backend.create_subnet(
vpc_id=vpc_id,
cidr_block=cidr_block,
availability_zone=availability_zone,
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
subnet.add_tag(tag_key, tag_value)
return subnet
@property
def availability_zone(self):
return self._availability_zone
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
"""
API Version 2014-10-01 defines the following filters for DescribeSubnets:
* availabilityZone
* available-ip-address-count
* cidrBlock
* defaultForAz
* state
* subnet-id
* tag:key=value
* tag-key
* tag-value
* vpc-id
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html
"""
if filter_name in ('cidr', 'cidrBlock', 'cidr-block'):
return self.cidr_block
elif filter_name in ('vpc-id', 'vpcId'):
return self.vpc_id
elif filter_name == 'subnet-id':
return self.id
elif filter_name in ('availabilityZone', 'availability-zone'):
return self.availability_zone
elif filter_name in ('defaultForAz', 'default-for-az'):
return self.default_for_az
else:
return super(Subnet, self).get_filter_value(
filter_name, 'DescribeSubnets')
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AvailabilityZone':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"')
raise UnformattedGetAttTemplateException()
def get_available_subnet_ip(self, instance):
try:
new_ip = self._unused_ips.pop()
except KeyError:
new_ip = six.next(self._subnet_ip_generator)
# Skips any IP's if they've been manually specified
while str(new_ip) in self._subnet_ips:
new_ip = six.next(self._subnet_ip_generator)
if new_ip == self.cidr.broadcast_address:
raise StopIteration() # Broadcast address cant be used obviously
# TODO StopIteration will be raised if no ip's available, not sure how aws handles this.
new_ip = str(new_ip)
self._subnet_ips[new_ip] = instance
return new_ip
def request_ip(self, ip, instance):
if ipaddress.ip_address(ip) not in self.cidr:
raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr))
if ip in self._subnet_ips:
raise Exception('IP already in use')
try:
self._unused_ips.remove(ip)
except KeyError:
pass
self._subnet_ips[ip] = instance
return ip
def del_subnet_ip(self, ip):
try:
del self._subnet_ips[ip]
self._unused_ips.add(ip)
except KeyError:
pass # Unknown IP
class SubnetBackend(object):
def __init__(self):
# maps availability zone to dict of (subnet_id, subnet)
self.subnets = defaultdict(dict)
super(SubnetBackend, self).__init__()
def get_subnet(self, subnet_id):
for subnets in self.subnets.values():
if subnet_id in subnets:
return subnets[subnet_id]
raise InvalidSubnetIdError(subnet_id)
def create_subnet(self, vpc_id, cidr_block, availability_zone):
subnet_id = random_subnet_id()
self.get_vpc(vpc_id) # Validate VPC exists
# if this is the first subnet for an availability zone,
# consider it the default
default_for_az = str(availability_zone not in self.subnets).lower()
map_public_ip_on_launch = default_for_az
subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone,
default_for_az, map_public_ip_on_launch)
# AWS associates a new subnet with the default Network ACL
self.associate_default_network_acl_with_subnet(subnet_id)
self.subnets[availability_zone][subnet_id] = subnet
return subnet
def get_all_subnets(self, subnet_ids=None, filters=None):
# Extract a list of all subnets
matches = itertools.chain(*[x.values()
for x in self.subnets.values()])
if subnet_ids:
matches = [sn for sn in matches
if sn.id in subnet_ids]
if len(subnet_ids) > len(matches):
unknown_ids = set(subnet_ids) - set(matches)
raise InvalidSubnetIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def delete_subnet(self, subnet_id):
for subnets in self.subnets.values():
if subnet_id in subnets:
return subnets.pop(subnet_id, None)
raise InvalidSubnetIdError(subnet_id)
def modify_subnet_attribute(self, subnet_id, map_public_ip):
subnet = self.get_subnet(subnet_id)
if map_public_ip not in ('true', 'false'):
raise InvalidParameterValueError(map_public_ip)
subnet.map_public_ip_on_launch = map_public_ip
class SubnetRouteTableAssociation(object):
def __init__(self, route_table_id, subnet_id):
self.route_table_id = route_table_id
self.subnet_id = subnet_id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
route_table_id = properties['RouteTableId']
subnet_id = properties['SubnetId']
ec2_backend = ec2_backends[region_name]
subnet_association = ec2_backend.create_subnet_association(
route_table_id=route_table_id,
subnet_id=subnet_id,
)
return subnet_association
class SubnetRouteTableAssociationBackend(object):
def __init__(self):
self.subnet_associations = {}
super(SubnetRouteTableAssociationBackend, self).__init__()
def create_subnet_association(self, route_table_id, subnet_id):
subnet_association = SubnetRouteTableAssociation(
route_table_id, subnet_id)
self.subnet_associations["{0}:{1}".format(
route_table_id, subnet_id)] = subnet_association
return subnet_association
class RouteTable(TaggedEC2Resource):
def __init__(self, ec2_backend, route_table_id, vpc_id, main=False):
self.ec2_backend = ec2_backend
self.id = route_table_id
self.vpc_id = vpc_id
self.main = main
self.associations = {}
self.routes = {}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties['VpcId']
ec2_backend = ec2_backends[region_name]
route_table = ec2_backend.create_route_table(
vpc_id=vpc_id,
)
return route_table
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name == "association.main":
# Note: Boto only supports 'true'.
# https://github.com/boto/boto/issues/1742
if self.main:
return 'true'
else:
return 'false'
elif filter_name == "route-table-id":
return self.id
elif filter_name == "vpc-id":
return self.vpc_id
elif filter_name == "association.route-table-id":
return self.id
elif filter_name == "association.route-table-association-id":
return self.associations.keys()
elif filter_name == "association.subnet-id":
return self.associations.values()
else:
return super(RouteTable, self).get_filter_value(
filter_name, 'DescribeRouteTables')
class RouteTableBackend(object):
def __init__(self):
self.route_tables = {}
super(RouteTableBackend, self).__init__()
def create_route_table(self, vpc_id, main=False):
route_table_id = random_route_table_id()
vpc = self.get_vpc(vpc_id) # Validate VPC exists
route_table = RouteTable(self, route_table_id, vpc_id, main=main)
self.route_tables[route_table_id] = route_table
# AWS creates a default local route.
self.create_route(route_table_id, vpc.cidr_block, local=True)
return route_table
def get_route_table(self, route_table_id):
route_table = self.route_tables.get(route_table_id, None)
if not route_table:
raise InvalidRouteTableIdError(route_table_id)
return route_table
def get_all_route_tables(self, route_table_ids=None, filters=None):
route_tables = self.route_tables.values()
if route_table_ids:
route_tables = [
route_table for route_table in route_tables if route_table.id in route_table_ids]
if len(route_tables) != len(route_table_ids):
invalid_id = list(set(route_table_ids).difference(
set([route_table.id for route_table in route_tables])))[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, route_tables)
def delete_route_table(self, route_table_id):
route_table = self.get_route_table(route_table_id)
if route_table.associations:
raise DependencyViolationError(
"The routeTable '{0}' has dependencies and cannot be deleted.".format(route_table_id)
)
self.route_tables.pop(route_table_id)
return True
def associate_route_table(self, route_table_id, subnet_id):
# Idempotent if association already exists.
route_tables_by_subnet = self.get_all_route_tables(
filters={'association.subnet-id': [subnet_id]})
if route_tables_by_subnet:
for association_id, check_subnet_id in route_tables_by_subnet[0].associations.items():
if subnet_id == check_subnet_id:
return association_id
# Association does not yet exist, so create it.
route_table = self.get_route_table(route_table_id)
self.get_subnet(subnet_id) # Validate subnet exists
association_id = random_subnet_association_id()
route_table.associations[association_id] = subnet_id
return association_id
def disassociate_route_table(self, association_id):
for route_table in self.route_tables.values():
if association_id in route_table.associations:
return route_table.associations.pop(association_id, None)
raise InvalidAssociationIdError(association_id)
def replace_route_table_association(self, association_id, route_table_id):
# Idempotent if association already exists.
new_route_table = self.get_route_table(route_table_id)
if association_id in new_route_table.associations:
return association_id
# Find route table which currently has the association, error if none.
route_tables_by_association_id = self.get_all_route_tables(
filters={'association.route-table-association-id': [association_id]})
if not route_tables_by_association_id:
raise InvalidAssociationIdError(association_id)
# Remove existing association, create new one.
previous_route_table = route_tables_by_association_id[0]
subnet_id = previous_route_table.associations.pop(association_id, None)
return self.associate_route_table(route_table_id, subnet_id)
class Route(object):
def __init__(self, route_table, destination_cidr_block, local=False,
gateway=None, instance=None, interface=None, vpc_pcx=None):
self.id = generate_route_id(route_table.id, destination_cidr_block)
self.route_table = route_table
self.destination_cidr_block = destination_cidr_block
self.local = local
self.gateway = gateway
self.instance = instance
self.interface = interface
self.vpc_pcx = vpc_pcx
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
gateway_id = properties.get('GatewayId')
instance_id = properties.get('InstanceId')
interface_id = properties.get('NetworkInterfaceId')
pcx_id = properties.get('VpcPeeringConnectionId')
route_table_id = properties['RouteTableId']
ec2_backend = ec2_backends[region_name]
route_table = ec2_backend.create_route(
route_table_id=route_table_id,
destination_cidr_block=properties.get('DestinationCidrBlock'),
gateway_id=gateway_id,
instance_id=instance_id,
interface_id=interface_id,
vpc_peering_connection_id=pcx_id,
)
return route_table
class RouteBackend(object):
def __init__(self):
super(RouteBackend, self).__init__()
def create_route(self, route_table_id, destination_cidr_block, local=False,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None):
route_table = self.get_route_table(route_table_id)
if interface_id:
self.raise_not_implemented_error(
"CreateRoute to NetworkInterfaceId")
gateway = None
if gateway_id:
if EC2_RESOURCE_TO_PREFIX['vpn-gateway'] in gateway_id:
gateway = self.get_vpn_gateway(gateway_id)
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
gateway = self.get_internet_gateway(gateway_id)
route = Route(route_table, destination_cidr_block, local=local,
gateway=gateway,
instance=self.get_instance(
instance_id) if instance_id else None,
interface=None,
vpc_pcx=self.get_vpc_peering_connection(
vpc_peering_connection_id) if vpc_peering_connection_id else None)
route_table.routes[route.id] = route
return route
def replace_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None):
route_table = self.get_route_table(route_table_id)
route_id = generate_route_id(route_table.id, destination_cidr_block)
route = route_table.routes[route_id]
if interface_id:
self.raise_not_implemented_error(
"ReplaceRoute to NetworkInterfaceId")
route.gateway = None
if gateway_id:
if EC2_RESOURCE_TO_PREFIX['vpn-gateway'] in gateway_id:
route.gateway = self.get_vpn_gateway(gateway_id)
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
route.gateway = self.get_internet_gateway(gateway_id)
route.instance = self.get_instance(
instance_id) if instance_id else None
route.interface = None
route.vpc_pcx = self.get_vpc_peering_connection(
vpc_peering_connection_id) if vpc_peering_connection_id else None
route_table.routes[route.id] = route
return route
def get_route(self, route_id):
route_table_id, destination_cidr_block = split_route_id(route_id)
route_table = self.get_route_table(route_table_id)
return route_table.get(route_id)
def delete_route(self, route_table_id, destination_cidr_block):
route_table = self.get_route_table(route_table_id)
route_id = generate_route_id(route_table_id, destination_cidr_block)
deleted = route_table.routes.pop(route_id, None)
if not deleted:
raise InvalidRouteError(route_table_id, destination_cidr_block)
return deleted
class InternetGateway(TaggedEC2Resource):
def __init__(self, ec2_backend):
self.ec2_backend = ec2_backend
self.id = random_internet_gateway_id()
self.vpc = None
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
return ec2_backend.create_internet_gateway()
@property
def physical_resource_id(self):
return self.id
@property
def attachment_state(self):
if self.vpc:
return "available"
else:
return "detached"
class InternetGatewayBackend(object):
def __init__(self):
self.internet_gateways = {}
super(InternetGatewayBackend, self).__init__()
def create_internet_gateway(self):
igw = InternetGateway(self)
self.internet_gateways[igw.id] = igw
return igw
def describe_internet_gateways(self, internet_gateway_ids=None, filters=None):
igws = []
if internet_gateway_ids is None:
igws = self.internet_gateways.values()
else:
for igw_id in internet_gateway_ids:
if igw_id in self.internet_gateways:
igws.append(self.internet_gateways[igw_id])
else:
raise InvalidInternetGatewayIdError(igw_id)
if filters is not None:
igws = filter_internet_gateways(igws, filters)
return igws
def delete_internet_gateway(self, internet_gateway_id):
igw = self.get_internet_gateway(internet_gateway_id)
if igw.vpc:
raise DependencyViolationError(
"{0} is being utilized by {1}".format(internet_gateway_id, igw.vpc.id)
)
self.internet_gateways.pop(internet_gateway_id)
return True
def detach_internet_gateway(self, internet_gateway_id, vpc_id):
igw = self.get_internet_gateway(internet_gateway_id)
if not igw.vpc or igw.vpc.id != vpc_id:
raise GatewayNotAttachedError(internet_gateway_id, vpc_id)
igw.vpc = None
return True
def attach_internet_gateway(self, internet_gateway_id, vpc_id):
igw = self.get_internet_gateway(internet_gateway_id)
if igw.vpc:
raise ResourceAlreadyAssociatedError(internet_gateway_id)
vpc = self.get_vpc(vpc_id)
igw.vpc = vpc
return True
def get_internet_gateway(self, internet_gateway_id):
igw_ids = [internet_gateway_id]
return self.describe_internet_gateways(internet_gateway_ids=igw_ids)[0]
class VPCGatewayAttachment(BaseModel):
def __init__(self, gateway_id, vpc_id):
self.gateway_id = gateway_id
self.vpc_id = vpc_id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
attachment = ec2_backend.create_vpc_gateway_attachment(
gateway_id=properties['InternetGatewayId'],
vpc_id=properties['VpcId'],
)
ec2_backend.attach_internet_gateway(
properties['InternetGatewayId'], properties['VpcId'])
return attachment
@property
def physical_resource_id(self):
return self.vpc_id
class VPCGatewayAttachmentBackend(object):
def __init__(self):
self.gateway_attachments = {}
super(VPCGatewayAttachmentBackend, self).__init__()
def create_vpc_gateway_attachment(self, vpc_id, gateway_id):
attachment = VPCGatewayAttachment(vpc_id, gateway_id)
self.gateway_attachments[gateway_id] = attachment
return attachment
class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
def __init__(self, ec2_backend, spot_request_id, price, image_id, type,
valid_from, valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data, instance_type, placement,
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id,
**kwargs):
super(SpotInstanceRequest, self).__init__(**kwargs)
ls = LaunchSpecification()
self.ec2_backend = ec2_backend
self.launch_specification = ls
self.id = spot_request_id
self.state = "open"
self.price = price
self.type = type
self.valid_from = valid_from
self.valid_until = valid_until
self.launch_group = launch_group
self.availability_zone_group = availability_zone_group
self.user_data = user_data # NOT
ls.kernel = kernel_id
ls.ramdisk = ramdisk_id
ls.image_id = image_id
ls.key_name = key_name
ls.instance_type = instance_type
ls.placement = placement
ls.monitored = monitoring_enabled
ls.subnet_id = subnet_id
self.spot_fleet_id = spot_fleet_id
if security_groups:
for group_name in security_groups:
group = self.ec2_backend.get_security_group_from_name(
group_name)
if group:
ls.groups.append(group)
else:
# If not security groups, add the default
default_group = self.ec2_backend.get_security_group_from_name(
"default")
ls.groups.append(default_group)
self.instance = self.launch_instance()
def get_filter_value(self, filter_name):
if filter_name == 'state':
return self.state
elif filter_name == 'spot-instance-request-id':
return self.id
else:
return super(SpotInstanceRequest, self).get_filter_value(
filter_name, 'DescribeSpotInstanceRequests')
def launch_instance(self):
reservation = self.ec2_backend.add_instances(
image_id=self.launch_specification.image_id, count=1, user_data=self.user_data,
instance_type=self.launch_specification.instance_type,
subnet_id=self.launch_specification.subnet_id,
key_name=self.launch_specification.key_name,
security_group_names=[],
security_group_ids=self.launch_specification.groups,
spot_fleet_id=self.spot_fleet_id,
)
instance = reservation.instances[0]
return instance
@six.add_metaclass(Model)
class SpotRequestBackend(object):
def __init__(self):
self.spot_instance_requests = {}
super(SpotRequestBackend, self).__init__()
def request_spot_instances(self, price, image_id, count, type, valid_from,
valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data,
instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id=None):
requests = []
for _ in range(count):
spot_request_id = random_spot_request_id()
request = SpotInstanceRequest(self,
spot_request_id, price, image_id, type, valid_from, valid_until,
launch_group, availability_zone_group, key_name, security_groups,
user_data, instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id)
self.spot_instance_requests[spot_request_id] = request
requests.append(request)
return requests
@Model.prop('SpotInstanceRequest')
def describe_spot_instance_requests(self, filters=None):
requests = self.spot_instance_requests.values()
return generic_filter(filters, requests)
def cancel_spot_instance_requests(self, request_ids):
requests = []
for request_id in request_ids:
requests.append(self.spot_instance_requests.pop(request_id))
return requests
class SpotFleetLaunchSpec(object):
def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id,
instance_type, key_name, monitoring, spot_price, subnet_id, user_data,
weighted_capacity):
self.ebs_optimized = ebs_optimized
self.group_set = group_set
self.iam_instance_profile = iam_instance_profile
self.image_id = image_id
self.instance_type = instance_type
self.key_name = key_name
self.monitoring = monitoring
self.spot_price = spot_price
self.subnet_id = subnet_id
self.user_data = user_data
self.weighted_capacity = float(weighted_capacity)
class SpotFleetRequest(TaggedEC2Resource):
def __init__(self, ec2_backend, spot_fleet_request_id, spot_price,
target_capacity, iam_fleet_role, allocation_strategy, launch_specs):
self.ec2_backend = ec2_backend
self.id = spot_fleet_request_id
self.spot_price = spot_price
self.target_capacity = int(target_capacity)
self.iam_fleet_role = iam_fleet_role
self.allocation_strategy = allocation_strategy
self.state = "active"
self.fulfilled_capacity = 0.0
self.launch_specs = []
for spec in launch_specs:
self.launch_specs.append(SpotFleetLaunchSpec(
ebs_optimized=spec['ebs_optimized'],
group_set=[val for key, val in spec.items(
) if key.startswith("group_set")],
iam_instance_profile=spec.get('iam_instance_profile._arn'),
image_id=spec['image_id'],
instance_type=spec['instance_type'],
key_name=spec.get('key_name'),
monitoring=spec.get('monitoring._enabled'),
spot_price=spec.get('spot_price', self.spot_price),
subnet_id=spec['subnet_id'],
user_data=spec.get('user_data'),
weighted_capacity=spec['weighted_capacity'],
)
)
self.spot_requests = []
self.create_spot_requests(self.target_capacity)
@property
def physical_resource_id(self):
return self.id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json[
'Properties']['SpotFleetRequestConfigData']
ec2_backend = ec2_backends[region_name]
spot_price = properties.get('SpotPrice')
target_capacity = properties['TargetCapacity']
iam_fleet_role = properties['IamFleetRole']
allocation_strategy = properties['AllocationStrategy']
launch_specs = properties["LaunchSpecifications"]
launch_specs = [
dict([(camelcase_to_underscores(key), val)
for key, val in launch_spec.items()])
for launch_spec
in launch_specs
]
spot_fleet_request = ec2_backend.request_spot_fleet(spot_price,
target_capacity, iam_fleet_role, allocation_strategy,
launch_specs)
return spot_fleet_request
def get_launch_spec_counts(self, weight_to_add):
weight_map = defaultdict(int)
weight_so_far = 0
if self.allocation_strategy == 'diversified':
launch_spec_index = 0
while True:
launch_spec = self.launch_specs[
launch_spec_index % len(self.launch_specs)]
weight_map[launch_spec] += 1
weight_so_far += launch_spec.weighted_capacity
if weight_so_far >= weight_to_add:
break
launch_spec_index += 1
else: # lowestPrice
cheapest_spec = sorted(
# FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present
self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0]
weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity)
weight_map[cheapest_spec] = int(
weight_so_far // cheapest_spec.weighted_capacity)
return weight_map, weight_so_far
def create_spot_requests(self, weight_to_add):
weight_map, added_weight = self.get_launch_spec_counts(weight_to_add)
for launch_spec, count in weight_map.items():
requests = self.ec2_backend.request_spot_instances(
price=launch_spec.spot_price,
image_id=launch_spec.image_id,
count=count,
type="persistent",
valid_from=None,
valid_until=None,
launch_group=None,
availability_zone_group=None,
key_name=launch_spec.key_name,
security_groups=launch_spec.group_set,
user_data=launch_spec.user_data,
instance_type=launch_spec.instance_type,
placement=None,
kernel_id=None,
ramdisk_id=None,
monitoring_enabled=launch_spec.monitoring,
subnet_id=launch_spec.subnet_id,
spot_fleet_id=self.id,
)
self.spot_requests.extend(requests)
self.fulfilled_capacity += added_weight
return self.spot_requests
def terminate_instances(self):
instance_ids = []
new_fulfilled_capacity = self.fulfilled_capacity
for req in self.spot_requests:
instance = req.instance
for spec in self.launch_specs:
if spec.instance_type == instance.instance_type and spec.subnet_id == instance.subnet_id:
break
if new_fulfilled_capacity - spec.weighted_capacity < self.target_capacity:
continue
new_fulfilled_capacity -= spec.weighted_capacity
instance_ids.append(instance.id)
self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids]
self.ec2_backend.terminate_instances(instance_ids)
class SpotFleetBackend(object):
def __init__(self):
self.spot_fleet_requests = {}
super(SpotFleetBackend, self).__init__()
def request_spot_fleet(self, spot_price, target_capacity, iam_fleet_role,
allocation_strategy, launch_specs):
spot_fleet_request_id = random_spot_fleet_request_id()
request = SpotFleetRequest(self, spot_fleet_request_id, spot_price,
target_capacity, iam_fleet_role, allocation_strategy, launch_specs)
self.spot_fleet_requests[spot_fleet_request_id] = request
return request
def get_spot_fleet_request(self, spot_fleet_request_id):
return self.spot_fleet_requests[spot_fleet_request_id]
def describe_spot_fleet_instances(self, spot_fleet_request_id):
spot_fleet = self.get_spot_fleet_request(spot_fleet_request_id)
return spot_fleet.spot_requests
def describe_spot_fleet_requests(self, spot_fleet_request_ids):
requests = self.spot_fleet_requests.values()
if spot_fleet_request_ids:
requests = [
request for request in requests if request.id in spot_fleet_request_ids]
return requests
def cancel_spot_fleet_requests(self, spot_fleet_request_ids, terminate_instances):
spot_requests = []
for spot_fleet_request_id in spot_fleet_request_ids:
spot_fleet = self.spot_fleet_requests[spot_fleet_request_id]
if terminate_instances:
spot_fleet.target_capacity = 0
spot_fleet.terminate_instances()
spot_requests.append(spot_fleet)
del self.spot_fleet_requests[spot_fleet_request_id]
return spot_requests
def modify_spot_fleet_request(self, spot_fleet_request_id, target_capacity, terminate_instances):
if target_capacity < 0:
raise ValueError('Cannot reduce spot fleet capacity below 0')
spot_fleet_request = self.spot_fleet_requests[spot_fleet_request_id]
delta = target_capacity - spot_fleet_request.fulfilled_capacity
spot_fleet_request.target_capacity = target_capacity
if delta > 0:
spot_fleet_request.create_spot_requests(delta)
elif delta < 0 and terminate_instances == 'Default':
spot_fleet_request.terminate_instances()
return True
class ElasticAddress(object):
def __init__(self, domain, address=None):
if address:
self.public_ip = address
else:
self.public_ip = random_ip()
self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None
self.domain = domain
self.instance = None
self.eni = None
self.association_id = None
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
properties = cloudformation_json.get('Properties')
instance_id = None
if properties:
domain = properties.get('Domain')
eip = ec2_backend.allocate_address(
domain=domain if domain else 'standard')
instance_id = properties.get('InstanceId')
else:
eip = ec2_backend.allocate_address(domain='standard')
if instance_id:
instance = ec2_backend.get_instance_by_id(instance_id)
ec2_backend.associate_address(instance, address=eip.public_ip)
return eip
@property
def physical_resource_id(self):
return self.public_ip
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AllocationId':
return self.allocation_id
raise UnformattedGetAttTemplateException()
def get_filter_value(self, filter_name):
if filter_name == 'allocation-id':
return self.allocation_id
elif filter_name == 'association-id':
return self.association_id
elif filter_name == 'domain':
return self.domain
elif filter_name == 'instance-id' and self.instance:
return self.instance.id
elif filter_name == 'network-interface-id' and self.eni:
return self.eni.id
elif filter_name == 'private-ip-address' and self.eni:
return self.eni.private_ip_address
elif filter_name == 'public-ip':
return self.public_ip
else:
# TODO: implement network-interface-owner-id
raise FilterNotImplementedError(filter_name, 'DescribeAddresses')
class ElasticAddressBackend(object):
def __init__(self):
self.addresses = []
super(ElasticAddressBackend, self).__init__()
def allocate_address(self, domain, address=None):
if domain not in ['standard', 'vpc']:
raise InvalidDomainError(domain)
if address:
address = ElasticAddress(domain, address)
else:
address = ElasticAddress(domain)
self.addresses.append(address)
return address
def address_by_ip(self, ips):
eips = [address for address in self.addresses
if address.public_ip in ips]
# TODO: Trim error message down to specific invalid address.
if not eips or len(ips) > len(eips):
raise InvalidAddressError(ips)
return eips
def address_by_allocation(self, allocation_ids):
eips = [address for address in self.addresses
if address.allocation_id in allocation_ids]
# TODO: Trim error message down to specific invalid id.
if not eips or len(allocation_ids) > len(eips):
raise InvalidAllocationIdError(allocation_ids)
return eips
def address_by_association(self, association_ids):
eips = [address for address in self.addresses
if address.association_id in association_ids]
# TODO: Trim error message down to specific invalid id.
if not eips or len(association_ids) > len(eips):
raise InvalidAssociationIdError(association_ids)
return eips
def associate_address(self, instance=None, eni=None, address=None, allocation_id=None, reassociate=False):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
eip = eips[0]
new_instance_association = bool(instance and (
not eip.instance or eip.instance.id == instance.id))
new_eni_association = bool(
eni and (not eip.eni or eni.id == eip.eni.id))
if new_instance_association or new_eni_association or reassociate:
eip.instance = instance
eip.eni = eni
if not eip.eni and instance:
# default to primary network interface
eip.eni = instance.nics[0]
if eip.eni:
eip.eni.public_ip = eip.public_ip
if eip.domain == "vpc":
eip.association_id = random_eip_association_id()
return eip
raise ResourceAlreadyAssociatedError(eip.public_ip)
def describe_addresses(self, allocation_ids=None, public_ips=None, filters=None):
matches = self.addresses
if allocation_ids:
matches = [addr for addr in matches
if addr.allocation_id in allocation_ids]
if len(allocation_ids) > len(matches):
unknown_ids = set(allocation_ids) - set(matches)
raise InvalidAllocationIdError(unknown_ids)
if public_ips:
matches = [addr for addr in matches
if addr.public_ip in public_ips]
if len(public_ips) > len(matches):
unknown_ips = set(allocation_ids) - set(matches)
raise InvalidAddressError(unknown_ips)
if filters:
matches = generic_filter(filters, matches)
return matches
def disassociate_address(self, address=None, association_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif association_id:
eips = self.address_by_association([association_id])
eip = eips[0]
if eip.eni:
eip.eni.public_ip = None
if eip.eni.instance and eip.eni.instance._state.name == "running":
eip.eni.check_auto_public_ip()
eip.eni = None
eip.instance = None
eip.association_id = None
return True
def release_address(self, address=None, allocation_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
eip = eips[0]
self.disassociate_address(address=eip.public_ip)
eip.allocation_id = None
self.addresses.remove(eip)
return True
class DHCPOptionsSet(TaggedEC2Resource):
def __init__(self, ec2_backend, domain_name_servers=None, domain_name=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
self.ec2_backend = ec2_backend
self._options = {
"domain-name-servers": domain_name_servers,
"domain-name": domain_name,
"ntp-servers": ntp_servers,
"netbios-name-servers": netbios_name_servers,
"netbios-node-type": netbios_node_type,
}
self.id = random_dhcp_option_id()
self.vpc = None
def get_filter_value(self, filter_name):
"""
API Version 2015-10-01 defines the following filters for DescribeDhcpOptions:
* dhcp-options-id
* key
* value
* tag:key=value
* tag-key
* tag-value
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
"""
if filter_name == 'dhcp-options-id':
return self.id
elif filter_name == 'key':
return list(self._options.keys())
elif filter_name == 'value':
values = [item for item in list(self._options.values()) if item]
return itertools.chain(*values)
else:
return super(DHCPOptionsSet, self).get_filter_value(
filter_name, 'DescribeDhcpOptions')
@property
def options(self):
return self._options
class DHCPOptionsSetBackend(object):
def __init__(self):
self.dhcp_options_sets = {}
super(DHCPOptionsSetBackend, self).__init__()
def associate_dhcp_options(self, dhcp_options, vpc):
dhcp_options.vpc = vpc
vpc.dhcp_options = dhcp_options
def create_dhcp_options(
self, domain_name_servers=None, domain_name=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
NETBIOS_NODE_TYPES = [1, 2, 4, 8]
for field_value in domain_name_servers, ntp_servers, netbios_name_servers:
if field_value and len(field_value) > 4:
raise InvalidParameterValueError(",".join(field_value))
if netbios_node_type and int(netbios_node_type[0]) not in NETBIOS_NODE_TYPES:
raise InvalidParameterValueError(netbios_node_type)
options = DHCPOptionsSet(
self, domain_name_servers, domain_name, ntp_servers,
netbios_name_servers, netbios_node_type
)
self.dhcp_options_sets[options.id] = options
return options
def describe_dhcp_options(self, options_ids=None):
options_sets = []
for option_id in options_ids or []:
if option_id in self.dhcp_options_sets:
options_sets.append(self.dhcp_options_sets[option_id])
else:
raise InvalidDHCPOptionsIdError(option_id)
return options_sets or self.dhcp_options_sets.values()
def delete_dhcp_options_set(self, options_id):
if not (options_id and options_id.startswith('dopt-')):
raise MalformedDHCPOptionsIdError(options_id)
if options_id in self.dhcp_options_sets:
if self.dhcp_options_sets[options_id].vpc:
raise DependencyViolationError(
"Cannot delete assigned DHCP options.")
self.dhcp_options_sets.pop(options_id)
else:
raise InvalidDHCPOptionsIdError(options_id)
return True
def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None):
dhcp_options_sets = self.dhcp_options_sets.values()
if dhcp_options_ids:
dhcp_options_sets = [
dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids]
if len(dhcp_options_sets) != len(dhcp_options_ids):
invalid_id = list(set(dhcp_options_ids).difference(
set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0]
raise InvalidDHCPOptionsIdError(invalid_id)
return generic_filter(filters, dhcp_options_sets)
class VPNConnection(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type,
customer_gateway_id, vpn_gateway_id):
self.ec2_backend = ec2_backend
self.id = id
self.state = 'available'
self.customer_gateway_configuration = {}
self.type = type
self.customer_gateway_id = customer_gateway_id
self.vpn_gateway_id = vpn_gateway_id
self.tunnels = None
self.options = None
self.static_routes = None
def get_filter_value(self, filter_name):
return super(VPNConnection, self).get_filter_value(
filter_name, 'DescribeVpnConnections')
class VPNConnectionBackend(object):
def __init__(self):
self.vpn_connections = {}
super(VPNConnectionBackend, self).__init__()
def create_vpn_connection(self, type, customer_gateway_id,
vpn_gateway_id,
static_routes_only=None):
vpn_connection_id = random_vpn_connection_id()
if static_routes_only:
pass
vpn_connection = VPNConnection(
self, id=vpn_connection_id, type=type,
customer_gateway_id=customer_gateway_id,
vpn_gateway_id=vpn_gateway_id
)
self.vpn_connections[vpn_connection.id] = vpn_connection
return vpn_connection
def delete_vpn_connection(self, vpn_connection_id):
if vpn_connection_id in self.vpn_connections:
self.vpn_connections.pop(vpn_connection_id)
else:
raise InvalidVpnConnectionIdError(vpn_connection_id)
return True
def describe_vpn_connections(self, vpn_connection_ids=None):
vpn_connections = []
for vpn_connection_id in vpn_connection_ids or []:
if vpn_connection_id in self.vpn_connections:
vpn_connections.append(self.vpn_connections[vpn_connection_id])
else:
raise InvalidVpnConnectionIdError(vpn_connection_id)
return vpn_connections or self.vpn_connections.values()
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
vpn_connections = self.vpn_connections.values()
if vpn_connection_ids:
vpn_connections = [vpn_connection for vpn_connection in vpn_connections
if vpn_connection.id in vpn_connection_ids]
if len(vpn_connections) != len(vpn_connection_ids):
invalid_id = list(set(vpn_connection_ids).difference(
set([vpn_connection.id for vpn_connection in vpn_connections])))[0]
raise InvalidVpnConnectionIdError(invalid_id)
return generic_filter(filters, vpn_connections)
class NetworkAclBackend(object):
def __init__(self):
self.network_acls = {}
super(NetworkAclBackend, self).__init__()
def get_network_acl(self, network_acl_id):
network_acl = self.network_acls.get(network_acl_id, None)
if not network_acl:
raise InvalidNetworkAclIdError(network_acl_id)
return network_acl
def create_network_acl(self, vpc_id, default=False):
network_acl_id = random_network_acl_id()
self.get_vpc(vpc_id)
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
self.network_acls[network_acl_id] = network_acl
return network_acl
def get_all_network_acls(self, network_acl_ids=None, filters=None):
network_acls = self.network_acls.values()
if network_acl_ids:
network_acls = [network_acl for network_acl in network_acls
if network_acl.id in network_acl_ids]
if len(network_acls) != len(network_acl_ids):
invalid_id = list(set(network_acl_ids).difference(
set([network_acl.id for network_acl in network_acls])))[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, network_acls)
def delete_network_acl(self, network_acl_id):
deleted = self.network_acls.pop(network_acl_id, None)
if not deleted:
raise InvalidNetworkAclIdError(network_acl_id)
return deleted
def create_network_acl_entry(self, network_acl_id, rule_number,
protocol, rule_action, egress, cidr_block,
icmp_code, icmp_type, port_range_from,
port_range_to):
network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
network_acl = self.get_network_acl(network_acl_id)
network_acl.network_acl_entries.append(network_acl_entry)
return network_acl_entry
def delete_network_acl_entry(self, network_acl_id, rule_number, egress):
network_acl = self.get_network_acl(network_acl_id)
entry = next(entry for entry in network_acl.network_acl_entries
if entry.egress == egress and entry.rule_number == rule_number)
if entry is not None:
network_acl.network_acl_entries.remove(entry)
return entry
def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type, port_range_from, port_range_to):
self.delete_network_acl_entry(network_acl_id, rule_number, egress)
network_acl_entry = self.create_network_acl_entry(network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
return network_acl_entry
def replace_network_acl_association(self, association_id,
network_acl_id):
# lookup existing association for subnet and delete it
default_acl = next(value for key, value in self.network_acls.items()
if association_id in value.associations.keys())
subnet_id = None
for key, value in default_acl.associations.items():
if key == association_id:
subnet_id = default_acl.associations[key].subnet_id
del default_acl.associations[key]
break
new_assoc_id = random_network_acl_subnet_association_id()
association = NetworkAclAssociation(self,
new_assoc_id,
subnet_id,
network_acl_id)
new_acl = self.get_network_acl(network_acl_id)
new_acl.associations[new_assoc_id] = association
return association
def associate_default_network_acl_with_subnet(self, subnet_id):
association_id = random_network_acl_subnet_association_id()
acl = next(acl for acl in self.network_acls.values() if acl.default)
acl.associations[association_id] = NetworkAclAssociation(self, association_id,
subnet_id, acl.id)
class NetworkAclAssociation(object):
def __init__(self, ec2_backend, new_association_id,
subnet_id, network_acl_id):
self.ec2_backend = ec2_backend
self.id = new_association_id
self.new_association_id = new_association_id
self.subnet_id = subnet_id
self.network_acl_id = network_acl_id
super(NetworkAclAssociation, self).__init__()
class NetworkAcl(TaggedEC2Resource):
def __init__(self, ec2_backend, network_acl_id, vpc_id, default=False):
self.ec2_backend = ec2_backend
self.id = network_acl_id
self.vpc_id = vpc_id
self.network_acl_entries = []
self.associations = {}
self.default = 'true' if default is True else 'false'
def get_filter_value(self, filter_name):
if filter_name == "default":
return self.default
elif filter_name == "vpc-id":
return self.vpc_id
elif filter_name == "association.network-acl-id":
return self.id
elif filter_name == "association.subnet-id":
return [assoc.subnet_id for assoc in self.associations.values()]
else:
return super(NetworkAcl, self).get_filter_value(
filter_name, 'DescribeNetworkAcls')
class NetworkAclEntry(TaggedEC2Resource):
def __init__(self, ec2_backend, network_acl_id, rule_number,
protocol, rule_action, egress, cidr_block,
icmp_code, icmp_type, port_range_from,
port_range_to):
self.ec2_backend = ec2_backend
self.network_acl_id = network_acl_id
self.rule_number = rule_number
self.protocol = protocol
self.rule_action = rule_action
self.egress = egress
self.cidr_block = cidr_block
self.icmp_code = icmp_code
self.icmp_type = icmp_type
self.port_range_from = port_range_from
self.port_range_to = port_range_to
class VpnGateway(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type):
self.ec2_backend = ec2_backend
self.id = id
self.type = type
self.attachments = {}
super(VpnGateway, self).__init__()
def get_filter_value(self, filter_name):
return super(VpnGateway, self).get_filter_value(
filter_name, 'DescribeVpnGateways')
class VpnGatewayAttachment(object):
def __init__(self, vpc_id, state):
self.vpc_id = vpc_id
self.state = state
super(VpnGatewayAttachment, self).__init__()
class VpnGatewayBackend(object):
def __init__(self):
self.vpn_gateways = {}
super(VpnGatewayBackend, self).__init__()
def create_vpn_gateway(self, type='ipsec.1'):
vpn_gateway_id = random_vpn_gateway_id()
vpn_gateway = VpnGateway(self, vpn_gateway_id, type)
self.vpn_gateways[vpn_gateway_id] = vpn_gateway
return vpn_gateway
def get_all_vpn_gateways(self, filters=None):
vpn_gateways = self.vpn_gateways.values()
return generic_filter(filters, vpn_gateways)
def get_vpn_gateway(self, vpn_gateway_id):
vpn_gateway = self.vpn_gateways.get(vpn_gateway_id, None)
if not vpn_gateway:
raise InvalidVpnGatewayIdError(vpn_gateway_id)
return vpn_gateway
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
vpn_gateway = self.get_vpn_gateway(vpn_gateway_id)
self.get_vpc(vpc_id)
attachment = VpnGatewayAttachment(vpc_id, state='attached')
vpn_gateway.attachments[vpc_id] = attachment
return attachment
def delete_vpn_gateway(self, vpn_gateway_id):
deleted = self.vpn_gateways.pop(vpn_gateway_id, None)
if not deleted:
raise InvalidVpnGatewayIdError(vpn_gateway_id)
return deleted
def detach_vpn_gateway(self, vpn_gateway_id, vpc_id):
vpn_gateway = self.get_vpn_gateway(vpn_gateway_id)
self.get_vpc(vpc_id)
detached = vpn_gateway.attachments.pop(vpc_id, None)
if not detached:
raise InvalidVPCIdError(vpc_id)
return detached
class CustomerGateway(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type, ip_address, bgp_asn):
self.ec2_backend = ec2_backend
self.id = id
self.type = type
self.ip_address = ip_address
self.bgp_asn = bgp_asn
self.attachments = {}
super(CustomerGateway, self).__init__()
def get_filter_value(self, filter_name):
return super(CustomerGateway, self).get_filter_value(
filter_name, 'DescribeCustomerGateways')
class CustomerGatewayBackend(object):
def __init__(self):
self.customer_gateways = {}
super(CustomerGatewayBackend, self).__init__()
def create_customer_gateway(self, type='ipsec.1', ip_address=None, bgp_asn=None):
customer_gateway_id = random_customer_gateway_id()
customer_gateway = CustomerGateway(
self, customer_gateway_id, type, ip_address, bgp_asn)
self.customer_gateways[customer_gateway_id] = customer_gateway
return customer_gateway
def get_all_customer_gateways(self, filters=None):
customer_gateways = self.customer_gateways.values()
return generic_filter(filters, customer_gateways)
def get_customer_gateway(self, customer_gateway_id):
customer_gateway = self.customer_gateways.get(
customer_gateway_id, None)
if not customer_gateway:
raise InvalidCustomerGatewayIdError(customer_gateway_id)
return customer_gateway
def delete_customer_gateway(self, customer_gateway_id):
deleted = self.customer_gateways.pop(customer_gateway_id, None)
if not deleted:
raise InvalidCustomerGatewayIdError(customer_gateway_id)
return deleted
class NatGateway(object):
def __init__(self, backend, subnet_id, allocation_id):
# public properties
self.id = random_nat_gateway_id()
self.subnet_id = subnet_id
self.allocation_id = allocation_id
self.state = 'available'
self.private_ip = random_private_ip()
# protected properties
self._created_at = datetime.utcnow()
self._backend = backend
# NOTE: this is the core of NAT Gateways creation
self._eni = self._backend.create_network_interface(
backend.get_subnet(self.subnet_id), self.private_ip)
# associate allocation with ENI
self._backend.associate_address(
eni=self._eni, allocation_id=self.allocation_id)
@property
def vpc_id(self):
subnet = self._backend.get_subnet(self.subnet_id)
return subnet.vpc_id
@property
def create_time(self):
return iso_8601_datetime_with_milliseconds(self._created_at)
@property
def network_interface_id(self):
return self._eni.id
@property
def public_ip(self):
eips = self._backend.address_by_allocation([self.allocation_id])
return eips[0].public_ip
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
nat_gateway = ec2_backend.create_nat_gateway(
cloudformation_json['Properties']['SubnetId'],
cloudformation_json['Properties']['AllocationId'],
)
return nat_gateway
class NatGatewayBackend(object):
def __init__(self):
self.nat_gateways = {}
super(NatGatewayBackend, self).__init__()
def get_all_nat_gateways(self, filters):
return self.nat_gateways.values()
def create_nat_gateway(self, subnet_id, allocation_id):
nat_gateway = NatGateway(self, subnet_id, allocation_id)
self.nat_gateways[nat_gateway.id] = nat_gateway
return nat_gateway
def delete_nat_gateway(self, nat_gateway_id):
return self.nat_gateways.pop(nat_gateway_id)
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, EBSBackend,
RegionsAndZonesBackend, SecurityGroupBackend, AmiBackend,
VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend,
NetworkInterfaceBackend, VPNConnectionBackend,
VPCPeeringConnectionBackend,
RouteTableBackend, RouteBackend, InternetGatewayBackend,
VPCGatewayAttachmentBackend, SpotFleetBackend,
SpotRequestBackend, ElasticAddressBackend, KeyPairBackend,
DHCPOptionsSetBackend, NetworkAclBackend, VpnGatewayBackend,
CustomerGatewayBackend, NatGatewayBackend):
def __init__(self, region_name):
self.region_name = region_name
super(EC2Backend, self).__init__()
# Default VPC exists by default, which is the current behavior
# of EC2-VPC. See for detail:
#
# docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html
#
if not self.vpcs:
vpc = self.create_vpc('172.31.0.0/16')
else:
# For now this is included for potential
# backward-compatibility issues
vpc = self.vpcs.values()[0]
# Create default subnet for each availability zone
ip, _ = vpc.cidr_block.split('/')
ip = ip.split('.')
ip[2] = 0
for zone in self.describe_availability_zones():
az_name = zone.name
cidr_block = '.'.join(str(i) for i in ip) + '/20'
self.create_subnet(vpc.id, cidr_block, availability_zone=az_name)
ip[2] += 16
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
# Use this to generate a proper error template response when in a response
# handler.
def raise_error(self, code, message):
raise EC2ClientError(code, message)
def raise_not_implemented_error(self, blurb):
raise MotoNotImplementedError(blurb)
def do_resources_exist(self, resource_ids):
for resource_id in resource_ids:
resource_prefix = get_prefix(resource_id)
if resource_prefix == EC2_RESOURCE_TO_PREFIX['customer-gateway']:
self.get_customer_gateway(customer_gateway_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['dhcp-options']:
self.describe_dhcp_options(options_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['image']:
self.describe_images(ami_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['instance']:
self.get_instance_by_id(instance_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['internet-gateway']:
self.describe_internet_gateways(
internet_gateway_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-acl']:
self.get_all_network_acls()
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']:
self.describe_network_interfaces(
filters={'network-interface-id': resource_id})
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['reserved-instance']:
self.raise_not_implemented_error('DescribeReservedInstances')
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['route-table']:
self.get_route_table(route_table_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['security-group']:
self.describe_security_groups(group_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['snapshot']:
self.get_snapshot(snapshot_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['spot-instance-request']:
self.describe_spot_instance_requests(
filters={'spot-instance-request-id': resource_id})
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['subnet']:
self.get_subnet(subnet_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['volume']:
self.get_volume(volume_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpc']:
self.get_vpc(vpc_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpc-peering-connection']:
self.get_vpc_peering_connection(vpc_pcx_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpn-connection']:
self.describe_vpn_connections(vpn_connection_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpn-gateway']:
self.get_vpn_gateway(vpn_gateway_id=resource_id)
return True
ec2_backends = {region.name: EC2Backend(region.name)
for region in RegionsAndZonesBackend.regions}
| 38.679435
| 136
| 0.631112
|
1850227088baad4245c0f4827c78e79277e0cbbf
| 48,104
|
py
|
Python
|
nltk/sem/boxer.py
|
abimurali1993/dev
|
295182a75b9b6031656b5c6c10866d517a1992cf
|
[
"Apache-2.0"
] | 2
|
2015-03-10T14:33:16.000Z
|
2015-03-10T14:33:18.000Z
|
nltk/sem/boxer.py
|
abimurali1993/dev
|
295182a75b9b6031656b5c6c10866d517a1992cf
|
[
"Apache-2.0"
] | null | null | null |
nltk/sem/boxer.py
|
abimurali1993/dev
|
295182a75b9b6031656b5c6c10866d517a1992cf
|
[
"Apache-2.0"
] | null | null | null |
# Natural Language Toolkit: Interface to Boxer
# <http://svn.ask.it.usyd.edu.au/trac/candc/wiki/boxer>
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2013 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
An interface to Boxer.
This interface relies on the latest version of the development (subversion) version of
C&C and Boxer.
Usage:
Set the environment variable CANDCHOME to the bin directory of your CandC installation.
The models directory should be in the CandC root directory.
For example:
/path/to/candc/
bin/
candc
boxer
models/
boxer/
"""
from __future__ import print_function, unicode_literals
import os
import re
import operator
import subprocess
from optparse import OptionParser
import tempfile
from functools import reduce
from nltk.internals import Counter, find_binary
from nltk.sem.logic import (ExpectedMoreTokensException, ParseException,
UnexpectedTokenException, Variable)
from nltk.sem.drt import (DRS, DrtApplicationExpression, DrtEqualityExpression,
DrtNegatedExpression, DrtOrExpression, DrtParser,
DrtProposition, DrtTokens, DrtVariableExpression)
from nltk.compat import python_2_unicode_compatible
class Boxer(object):
"""
This class is an interface to Johan Bos's program Boxer, a wide-coverage
semantic parser that produces Discourse Representation Structures (DRSs).
"""
def __init__(self, boxer_drs_interpreter=None, elimeq=False, bin_dir=None, verbose=False):
"""
:param boxer_drs_interpreter: A class that converts from the
``AbstractBoxerDrs`` object hierarchy to a different object. The
default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK
DRT hierarchy.
:param elimeq: When set to true, Boxer removes all equalities from the
DRSs and discourse referents standing in the equality relation are
unified, but only if this can be done in a meaning-preserving manner.
"""
if boxer_drs_interpreter is None:
boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter()
self._boxer_drs_interpreter = boxer_drs_interpreter
self._elimeq = elimeq
self.set_bin_dir(bin_dir, verbose)
def set_bin_dir(self, bin_dir, verbose=False):
self._candc_bin = self._find_binary('candc', bin_dir, verbose)
self._candc_models_path = os.path.normpath(os.path.join(self._candc_bin[:-5], '../models'))
self._boxer_bin = self._find_binary('boxer', bin_dir, verbose)
def interpret(self, input, discourse_id=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param input: str Input sentence to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
discourse_ids = ([discourse_id] if discourse_id is not None else None)
d, = self.batch_interpret_multisentence([[input]], discourse_ids, question, verbose)
if not d:
raise Exception('Unable to interpret: "%s"' % input)
return d
def interpret_multisentence(self, input, discourse_id=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param input: list of str Input sentences to parse as a single discourse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
discourse_ids = ([discourse_id] if discourse_id is not None else None)
d, = self.batch_interpret_multisentence([input], discourse_ids, question, verbose)
if not d:
raise Exception('Unable to interpret: "%s"' % input)
return d
def batch_interpret(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of str Input sentences to parse as individual discourses
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: list of ``drt.AbstractDrs``
"""
return self.batch_interpret_multisentence([[input] for input in inputs], discourse_ids, question, verbose)
def batch_interpret_multisentence(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def _call_candc(self, inputs, discourse_ids, question, verbose=False):
"""
Call the ``candc`` binary with the given input.
:param inputs: list of list of str Input discourses to parse
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:param filename: str A filename for the output file
:return: stdout
"""
args = ['--models', os.path.join(self._candc_models_path, ['boxer','questions'][question]),
'--candc-printer', 'boxer']
return self._call('\n'.join(sum((["<META>'%s'" % id] + d for d,id in zip(inputs,discourse_ids)), [])), self._candc_bin, args, verbose)
def _call_boxer(self, candc_out, verbose=False):
"""
Call the ``boxer`` binary with the given input.
:param candc_out: str output from C&C parser
:return: stdout
"""
f = None
try:
fd, temp_filename = tempfile.mkstemp(prefix='boxer-', suffix='.in', text=True)
f = os.fdopen(fd, 'w')
f.write(candc_out)
finally:
if f: f.close()
args = ['--box', 'false',
'--semantics', 'drs',
'--flat', 'false',
'--resolve', 'true',
'--elimeq', ['false','true'][self._elimeq],
'--format', 'prolog',
'--instantiate', 'true',
'--input', temp_filename]
stdout = self._call(None, self._boxer_bin, args, verbose)
os.remove(temp_filename)
return stdout
def _find_binary(self, name, bin_dir, verbose=False):
return find_binary(name,
path_to_bin=bin_dir,
env_vars=['CANDCHOME'],
url='http://svn.ask.it.usyd.edu.au/trac/candc/',
binary_names=[name, name + '.exe'],
verbose=verbose)
def _call(self, input_str, binary, args=[], verbose=False):
"""
Call the binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param binary: The location of the binary to call
:param args: A list of command-line arguments.
:return: stdout
"""
if verbose:
print('Calling:', binary)
print('Args:', args)
print('Input:', input_str)
print('Command:', binary + ' ' + ' '.join(args))
# Call via a subprocess
if input_str is None:
cmd = [binary] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
cmd = 'echo "%s" | %s %s' % (input_str, binary, ' '.join(args))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = p.communicate()
if verbose:
print('Return code:', p.returncode)
if stdout: print('stdout:\n', stdout, '\n')
if stderr: print('stderr:\n', stderr, '\n')
if p.returncode != 0:
raise Exception('ERROR CALLING: %s %s\nReturncode: %d\n%s' % (binary, ' '.join(args), p.returncode, stderr))
return stdout
def _parse_to_drs_dict(self, boxer_out, use_disc_id):
lines = boxer_out.split('\n')
drs_dict = {}
i = 0
while i < len(lines):
line = lines[i]
if line.startswith('id('):
comma_idx = line.index(',')
discourse_id = line[3:comma_idx]
if discourse_id[0] == "'" and discourse_id[-1] == "'":
discourse_id = discourse_id[1:-1]
drs_id = line[comma_idx+1:line.index(')')]
i += 1
line = lines[i]
assert line.startswith('sem(%s,' % drs_id)
assert line.endswith(').')
search_start = len('sem(%s,[' % drs_id)
brace_count = 1
drs_start = -1
for j,c in enumerate(line[search_start:]):
if(c == '['):
brace_count += 1
if(c == ']'):
brace_count -= 1
if(brace_count == 0):
drs_start = search_start + j + 2
break
assert drs_start > -1
drs_input = line[drs_start:-2].strip()
parsed = self._parse_drs(drs_input, discourse_id, use_disc_id)
drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed)
i += 1
return drs_dict
def _parse_drs(self, drs_string, discourse_id, use_disc_id):
return BoxerOutputDrsParser([None,discourse_id][use_disc_id]).parse(drs_string)
class BoxerOutputDrsParser(DrtParser):
def __init__(self, discourse_id=None):
"""
This class is used to parse the Prolog DRS output from Boxer into a
hierarchy of python objects.
"""
DrtParser.__init__(self)
self.discourse_id = discourse_id
self.sentence_id_offset = None
self.quote_chars = [("'", "'", "\\", False)]
self._label_counter = None
def parse(self, data, signature=None):
self._label_counter = Counter(-1)
return DrtParser.parse(self, data, signature)
def get_all_symbols(self):
return ['(', ')', ',', '[', ']',':']
def handle(self, tok, context):
return self.handle_drs(tok)
def attempt_adjuncts(self, expression, context):
return expression
def parse_condition(self, indices):
"""
Parse a DRS condition
:return: list of ``AbstractDrs``
"""
tok = self.token()
accum = self.handle_condition(tok, indices)
if accum is None:
raise UnexpectedTokenException(tok)
return accum
def handle_drs(self, tok):
if tok == 'drs':
return self.parse_drs()
elif tok in ['merge', 'smerge']:
return self._handle_binary_expression(self._make_merge_expression)(None, [])
elif tok in ['alfa']:
return self._handle_alfa(self._make_merge_expression)(None, [])
def handle_condition(self, tok, indices):
"""
Handle a DRS condition
:param indices: list of int
:return: list of ``AbstractDrs``
"""
if tok == 'not':
return [self._handle_not()]
if tok == 'or':
conds = [self._handle_binary_expression(self._make_or_expression)]
elif tok == 'imp':
conds = [self._handle_binary_expression(self._make_imp_expression)]
elif tok == 'eq':
conds = [self._handle_eq()]
elif tok == 'prop':
conds = [self._handle_prop()]
elif tok == 'pred':
conds = [self._handle_pred()]
elif tok == 'named':
conds = [self._handle_named()]
elif tok == 'rel':
conds = [self._handle_rel()]
elif tok == 'timex':
conds = self._handle_timex()
elif tok == 'card':
conds = [self._handle_card()]
elif tok == 'whq':
conds = [self._handle_whq()]
else:
conds = []
return sum([[cond(sent_index, word_indices) for cond in conds] for sent_index, word_indices in self._sent_and_word_indices(indices)], [])
def _handle_not(self):
self.assertToken(self.token(), '(')
drs = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return BoxerNot(drs)
def _handle_pred(self):
#pred(_G3943, dog, n, 0)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
name = self.token()
self.assertToken(self.token(), ',')
pos = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
def _handle_pred_f(sent_index, word_indices):
return BoxerPred(self.discourse_id, sent_index, word_indices, variable, name, pos, sense)
return _handle_pred_f
def _handle_named(self):
#named(x0, john, per, 0)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
name = self.token()
self.assertToken(self.token(), ',')
type = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerNamed(self.discourse_id, sent_index, word_indices, variable, name, type, sense)
def _handle_rel(self):
#rel(_G3993, _G3943, agent, 0)
self.assertToken(self.token(), '(')
var1 = self.parse_variable()
self.assertToken(self.token(), ',')
var2 = self.parse_variable()
self.assertToken(self.token(), ',')
rel = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerRel(self.discourse_id, sent_index, word_indices, var1, var2, rel, sense)
def _handle_timex(self):
#timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX'))
self.assertToken(self.token(), '(')
arg = self.parse_variable()
self.assertToken(self.token(), ',')
new_conds = self._handle_time_expression(arg)
self.assertToken(self.token(), ')')
return new_conds
def _handle_time_expression(self, arg):
#date([]: (+), []:'XXXX', [1004]:'04', []:'XX')
tok = self.token()
self.assertToken(self.token(), '(')
if tok == 'date':
conds = self._handle_date(arg)
elif tok == 'time':
conds = self._handle_time(arg)
else:
return None
self.assertToken(self.token(), ')')
return [lambda sent_index, word_indices: BoxerPred(self.discourse_id, sent_index, word_indices, arg, tok, 'n', 0)] + \
[lambda sent_index, word_indices: cond for cond in conds]
def _handle_date(self, arg):
#[]: (+), []:'XXXX', [1004]:'04', []:'XX'
conds = []
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
self.assertToken(self.token(), '(')
pol = self.token()
self.assertToken(self.token(), ')')
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_pol_%s' % (pol), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
year = self.token()
if year != 'XXXX':
year = year.replace(':', '_')
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_year_%s' % (year), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
month = self.token()
if month != 'XX':
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_month_%s' % (month), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
day = self.token()
if day != 'XX':
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_day_%s' % (day), 'a', 0))
return conds
def _handle_time(self, arg):
#time([1018]:'18', []:'XX', []:'XX')
conds = []
self._parse_index_list()
hour = self.token()
if hour != 'XX':
conds.append(self._make_atom('r_hour_2',arg,hour))
self.assertToken(self.token(), ',')
self._parse_index_list()
min = self.token()
if min != 'XX':
conds.append(self._make_atom('r_min_2',arg,min))
self.assertToken(self.token(), ',')
self._parse_index_list()
sec = self.token()
if sec != 'XX':
conds.append(self._make_atom('r_sec_2',arg,sec))
return conds
def _handle_card(self):
#card(_G18535, 28, ge)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
value = self.token()
self.assertToken(self.token(), ',')
type = self.token()
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerCard(self.discourse_id, sent_index, word_indices, variable, value, type)
def _handle_prop(self):
#prop(_G15949, drs(...))
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
drs = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerProp(self.discourse_id, sent_index, word_indices, variable, drs)
def _parse_index_list(self):
#[1001,1002]:
indices = []
self.assertToken(self.token(), '[')
while self.token(0) != ']':
indices.append(self.parse_index())
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ':')
return indices
def parse_drs(self):
#drs([[1001]:_G3943],
# [[1002]:pred(_G3943, dog, n, 0)]
# )
label = self._label_counter.get()
self.assertToken(self.token(), '(')
self.assertToken(self.token(), '[')
refs = set()
while self.token(0) != ']':
indices = self._parse_index_list()
refs.add(self.parse_variable())
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ',')
self.assertToken(self.token(), '[')
conds = []
while self.token(0) != ']':
indices = self._parse_index_list()
conds.extend(self.parse_condition(indices))
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ')')
return BoxerDrs(label, list(refs), conds)
def _handle_binary_expression(self, make_callback):
self.assertToken(self.token(), '(')
drs1 = self.parse_Expression(None)
self.assertToken(self.token(), ',')
drs2 = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: make_callback(sent_index, word_indices, drs1, drs2)
def _handle_alfa(self, make_callback):
self.assertToken(self.token(), '(')
type = self.token()
self.assertToken(self.token(), ',')
drs1 = self.parse_Expression(None)
self.assertToken(self.token(), ',')
drs2 = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: make_callback(sent_index, word_indices, drs1, drs2)
def _handle_eq(self):
self.assertToken(self.token(), '(')
var1 = self.parse_variable()
self.assertToken(self.token(), ',')
var2 = self.parse_variable()
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerEq(self.discourse_id, sent_index, word_indices, var1, var2)
def _handle_whq(self):
self.assertToken(self.token(), '(')
self.assertToken(self.token(), '[')
ans_types = []
while self.token(0) != ']':
cat = self.token()
self.assertToken(self.token(), ':')
if cat == 'des':
ans_types.append(self.token())
elif cat == 'num':
ans_types.append('number')
typ = self.token()
if typ == 'cou':
ans_types.append('count')
else:
ans_types.append(typ)
else:
ans_types.append(self.token())
self.token() #swallow the ']'
self.assertToken(self.token(), ',')
d1 = self.parse_Expression(None)
self.assertToken(self.token(), ',')
ref = self.parse_variable()
self.assertToken(self.token(), ',')
d2 = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerWhq(self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2)
def _make_merge_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerDrs(drs1.label, drs1.refs + drs2.refs, drs1.conds + drs2.conds)
def _make_or_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2)
def _make_imp_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerDrs(drs1.label, drs1.refs, drs1.conds, drs2)
def parse_variable(self):
var = self.token()
assert re.match('^[ex]\d+$', var), var
return int(var[1:])
def parse_index(self):
return int(self.token())
def _sent_and_word_indices(self, indices):
"""
:return: list of (sent_index, word_indices) tuples
"""
sent_indices = set((i / 1000)-1 for i in indices if i>=0)
if sent_indices:
pairs = []
for sent_index in sent_indices:
word_indices = [(i % 1000)-1 for i in indices if sent_index == (i / 1000)-1]
pairs.append((sent_index, word_indices))
return pairs
else:
word_indices = [(i % 1000)-1 for i in indices]
return [(None, word_indices)]
class BoxerDrsParser(DrtParser):
"""
Reparse the str form of subclasses of ``AbstractBoxerDrs``
"""
def __init__(self, discourse_id=None):
DrtParser.__init__(self)
self.discourse_id = discourse_id
def get_all_symbols(self):
return [DrtTokens.OPEN, DrtTokens.CLOSE, DrtTokens.COMMA, DrtTokens.OPEN_BRACKET, DrtTokens.CLOSE_BRACKET]
def attempt_adjuncts(self, expression, context):
return expression
def handle(self, tok, context):
try:
if tok == 'drs':
self.assertNextToken(DrtTokens.OPEN)
label = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
refs = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
conds = self.handle_conds(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerDrs(label, refs, conds)
elif tok == 'pred':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
name = self.token()
self.assertNextToken(DrtTokens.COMMA)
pos = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense)
elif tok == 'named':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
name = self.token()
self.assertNextToken(DrtTokens.COMMA)
type = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerNamed(disc_id, sent_id, word_ids, variable, name, type, sense)
elif tok == 'rel':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
var1 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
var2 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
rel = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense)
elif tok == 'prop':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
drs = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerProp(disc_id, sent_id, word_ids, variable, drs)
elif tok == 'not':
self.assertNextToken(DrtTokens.OPEN)
drs = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerNot(drs)
elif tok == 'imp':
self.assertNextToken(DrtTokens.OPEN)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerDrs(drs1.label, drs1.refs, drs1.conds, drs2)
elif tok == 'or':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2)
elif tok == 'eq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
var1 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
var2 = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerEq(disc_id, sent_id, word_ids, var1, var2)
elif tok == 'card':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
value = self.token()
self.assertNextToken(DrtTokens.COMMA)
type = self.token()
self.assertNextToken(DrtTokens.CLOSE)
return BoxerCard(disc_id, sent_id, word_ids, var, value, type)
elif tok == 'whq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
ans_types = self.handle_refs()
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2)
except Exception as e:
raise ParseException(self._currentIndex, str(e))
assert False, repr(tok)
def nullableIntToken(self):
t = self.token()
return [None,int(t)][t != 'None']
def get_next_token_variable(self, description):
try:
return self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, 'Variable expected.')
class AbstractBoxerDrs(object):
def variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
variables, events, propositions = self._variables()
return (variables - (events | propositions), events, propositions - events)
def variable_types(self):
vartypes = {}
for t,vars in zip(('z','e','p'), self.variables()):
for v in vars:
vartypes[v] = t
return vartypes
def _variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
return (set(), set(), set())
def atoms(self):
return set()
def clean(self):
return self
def _clean_name(self, name):
return name.replace('-','_').replace("'", "_")
def renumber_sentences(self, f):
return self
def __hash__(self):
return hash("%s" % self)
@python_2_unicode_compatible
class BoxerDrs(AbstractBoxerDrs):
def __init__(self, label, refs, conds, consequent=None):
AbstractBoxerDrs.__init__(self)
self.label = label
self.refs = refs
self.conds = conds
self.consequent = consequent
def _variables(self):
variables = (set(), set(), set())
for cond in self.conds:
for s,v in zip(variables, cond._variables()):
s.update(v)
if self.consequent is not None:
for s,v in zip(variables, self.consequent._variables()):
s.update(v)
return variables
def atoms(self):
atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set())
if self.consequent is not None:
atoms.update(self.consequent.atoms())
return atoms
def clean(self):
consequent = (self.consequent.clean() if self.consequent else None)
return BoxerDrs(self.label, self.refs, [c.clean() for c in self.conds], consequent)
def renumber_sentences(self, f):
consequent = (self.consequent.renumber_sentences(f) if self.consequent else None)
return BoxerDrs(self.label, self.refs, [c.renumber_sentences(f) for c in self.conds], consequent)
def __repr__(self):
s = 'drs(%s, [%s], [%s])' % (self.label,
', '.join("%s" % r for r in self.refs),
', '.join("%s" % c for c in self.conds))
if self.consequent is not None:
s = 'imp(%s, %s)' % (s, self.consequent)
return s
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.label == other.label and \
self.refs == other.refs and \
len(self.conds) == len(other.conds) and \
reduce(operator.and_, (c1==c2 for c1,c2 in zip(self.conds, other.conds))) and \
self.consequent == other.consequent
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
@python_2_unicode_compatible
class BoxerNot(AbstractBoxerDrs):
def __init__(self, drs):
AbstractBoxerDrs.__init__(self)
self.drs = drs
def _variables(self):
return self.drs._variables()
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerNot(self.drs.clean())
def renumber_sentences(self, f):
return BoxerNot(self.drs.renumber_sentences(f))
def __repr__(self):
return 'not(%s)' % (self.drs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.drs == other.drs
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
@python_2_unicode_compatible
class BoxerIndexed(AbstractBoxerDrs):
def __init__(self, discourse_id, sent_index, word_indices):
AbstractBoxerDrs.__init__(self)
self.discourse_id = discourse_id
self.sent_index = sent_index
self.word_indices = word_indices
def atoms(self):
return set([self])
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.discourse_id == other.discourse_id and \
self.sent_index == other.sent_index and \
self.word_indices == other.word_indices and \
reduce(operator.and_, (s==o for s,o in zip(self, other)))
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
def __repr__(self):
s = '%s(%s, %s, [%s]' % (self._pred(), self.discourse_id,
self.sent_index, ', '.join("%s" % wi for wi in self.word_indices))
for v in self:
s += ', %s' % v
return s + ')'
class BoxerPred(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.pos = pos
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerPred(self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.pos, self.sense)
def clean(self):
return BoxerPred(self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.pos, self.sense)
def renumber_sentences(self, f):
new_sent_index = f(self.sent_index)
return BoxerPred(self.discourse_id, new_sent_index, self.word_indices, self.var, self.name, self.pos, self.sense)
def __iter__(self):
return iter((self.var, self.name, self.pos, self.sense))
def _pred(self):
return 'pred'
class BoxerNamed(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.type = type
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerNamed(self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.type, self.sense)
def clean(self):
return BoxerNamed(self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.type, self.sense)
def renumber_sentences(self, f):
return BoxerNamed(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.name, self.type, self.sense)
def __iter__(self):
return iter((self.var, self.name, self.type, self.sense))
def _pred(self):
return 'named'
class BoxerRel(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
self.rel = rel
self.sense = sense
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def clean(self):
return BoxerRel(self.discourse_id, self.sent_index, self.word_indices, self.var1, self.var2, self._clean_name(self.rel), self.sense)
def renumber_sentences(self, f):
return BoxerRel(self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2, self.rel, self.sense)
def __iter__(self):
return iter((self.var1, self.var2, self.rel, self.sense))
def _pred(self):
return 'rel'
class BoxerProp(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, drs):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.drs = drs
def _variables(self):
return tuple(map(operator.or_, (set(), set(), set([self.var])), self.drs._variables()))
def referenced_labels(self):
return set([self.drs])
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerProp(self.discourse_id, self.sent_index, self.word_indices, self.var, self.drs.clean())
def renumber_sentences(self, f):
return BoxerProp(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.drs.renumber_sentences(f))
def __iter__(self):
return iter((self.var, self.drs))
def _pred(self):
return 'prop'
class BoxerEq(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def atoms(self):
return set()
def renumber_sentences(self, f):
return BoxerEq(self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2)
def __iter__(self):
return iter((self.var1, self.var2))
def _pred(self):
return 'eq'
class BoxerCard(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, value, type):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.value = value
self.type = type
def _variables(self):
return (set([self.var]), set(), set())
def renumber_sentences(self, f):
return BoxerCard(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.value, self.type)
def __iter__(self):
return iter((self.var, self.value, self.type))
def _pred(self):
return 'card'
class BoxerOr(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.drs1 = drs1
self.drs2 = drs2
def _variables(self):
return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables()))
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerOr(self.discourse_id, self.sent_index, self.word_indices, self.drs1.clean(), self.drs2.clean())
def renumber_sentences(self, f):
return BoxerOr(self.discourse_id, f(self.sent_index), self.word_indices, self.drs1, self.drs2)
def __iter__(self):
return iter((self.drs1, self.drs2))
def _pred(self):
return 'or'
class BoxerWhq(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.ans_types = ans_types
self.drs1 = drs1
self.variable = variable
self.drs2 = drs2
def _variables(self):
return tuple(map(operator.or_, (set([self.variable]), set(), set()), self.drs1._variables(), self.drs2._variables()))
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerWhq(self.discourse_id, self.sent_index, self.word_indices, self.ans_types, self.drs1.clean(), self.variable, self.drs2.clean())
def renumber_sentences(self, f):
return BoxerWhq(self.discourse_id, f(self.sent_index), self.word_indices, self.ans_types, self.drs1, self.variable, self.drs2)
def __iter__(self):
return iter(('['+','.join(self.ans_types)+']', self.drs1, self.variable, self.drs2))
def _pred(self):
return 'whq'
class PassthroughBoxerDrsInterpreter(object):
def interpret(self, ex):
return ex
class NltkDrtBoxerDrsInterpreter(object):
def __init__(self, occur_index=False):
self._occur_index = occur_index
def interpret(self, ex):
"""
:param ex: ``AbstractBoxerDrs``
:return: ``AbstractDrs``
"""
if isinstance(ex, BoxerDrs):
drs = DRS([Variable('x%d' % r) for r in ex.refs], list(map(self.interpret, ex.conds)))
if ex.label is not None:
drs.label = Variable('x%d' % ex.label)
if ex.consequent is not None:
drs.consequent = self.interpret(ex.consequent)
return drs
elif isinstance(ex, BoxerNot):
return DrtNegatedExpression(self.interpret(ex.drs))
elif isinstance(ex, BoxerPred):
pred = self._add_occur_indexing('%s_%s' % (ex.pos, ex.name), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerNamed):
pred = self._add_occur_indexing('ne_%s_%s' % (ex.type, ex.name), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerRel):
pred = self._add_occur_indexing('%s' % (ex.rel), ex)
return self._make_atom(pred, 'x%d' % ex.var1, 'x%d' % ex.var2)
elif isinstance(ex, BoxerProp):
return DrtProposition(Variable('x%d' % ex.var), self.interpret(ex.drs))
elif isinstance(ex, BoxerEq):
return DrtEqualityExpression(DrtVariableExpression(Variable('x%d' % ex.var1)),
DrtVariableExpression(Variable('x%d' % ex.var2)))
elif isinstance(ex, BoxerCard):
pred = self._add_occur_indexing('card_%s_%s' % (ex.type, ex.value), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerOr):
return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2))
elif isinstance(ex, BoxerWhq):
drs1 = self.interpret(ex.drs1)
drs2 = self.interpret(ex.drs2)
return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
assert False, '%s: %s' % (ex.__class__.__name__, ex)
def _make_atom(self, pred, *args):
accum = DrtVariableExpression(Variable(pred))
for arg in args:
accum = DrtApplicationExpression(accum, DrtVariableExpression(Variable(arg)))
return accum
def _add_occur_indexing(self, base, ex):
if self._occur_index and ex.sent_index is not None:
if ex.discourse_id:
base += '_%s' % ex.discourse_id
base += '_s%s' % ex.sent_index
base += '_w%s' % sorted(ex.word_indices)[0]
return base
class UnparseableInputException(Exception):
pass
if __name__ == '__main__':
opts = OptionParser("usage: %prog TEXT [options]")
opts.add_option("--verbose", "-v", help="display verbose logs", action="store_true", default=False, dest="verbose")
opts.add_option("--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol")
opts.add_option("--question", "-q", help="input is a question", action="store_true", default=False, dest="question")
opts.add_option("--occur", "-o", help="occurrence index", action="store_true", default=False, dest="occur_index")
(options, args) = opts.parse_args()
if len(args) != 1:
opts.error("incorrect number of arguments")
interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index)
drs = Boxer(interpreter).interpret_multisentence(args[0].split(r'\n'), question=options.question, verbose=options.verbose)
if drs is None:
print(None)
else:
drs = drs.simplify().eliminate_equality()
if options.fol:
print(drs.fol().normalize())
else:
drs.normalize().pprint()
| 39.204564
| 147
| 0.598204
|
3acddec8a75960544601f90f8794282fc15c8d76
| 2,885
|
py
|
Python
|
examples/contrastModel.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | 2
|
2015-05-21T15:16:46.000Z
|
2015-10-23T17:47:36.000Z
|
examples/contrastModel.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | null | null | null |
examples/contrastModel.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
#Starting Date:9/21/2009
import numpy
from osrefl.model.sample_prep import *
from osrefl.model.samples import *
from numpy import log, abs, min, max
from pylab import figure, show, subplot, imshow
from osrefl.loaders.andr_load import *
from osrefl.theory.scatter import *
from osrefl.theory import approximations, scatter
from osrefl.viewers import view
from numpy import log
from pylab import figure
Au_measurments = Data()
#Au = Ellipse(SLD = 4.506842e-6,dim=[3.75e4,3.75e4,600.0])
Au = [None]*1
test_data = [None]*1
#shape test
stdDim=[3.9e4,3.75e4,550.0]
fillMediaSLD = [4.506842e-6,5.506842e-6,6.506842e-6]
feHight = 630.0
liquid = [None]*3
models = [None]*3
print Au_measurments.type
liquid[0] = Layer(SLD = fillMediaSLD[0],thickness_value = feHight)
liquid[1] = Layer(SLD = fillMediaSLD[1],thickness_value = feHight)
liquid[2] = Layer(SLD = fillMediaSLD[2],thickness_value = feHight)
for i in range(size(liquid)):
Au = RoundedParPip(SLD = 4.506842e-6,dim=[3.75e4,3.75e4,feHight], curve = .56)
Cr = Layer(SLD = 3.01e-6,thickness_value = 48.0)
liquid[i].on_top_of(Cr)
Au.on_top_of(Cr)
scene = Scene([liquid[i],Cr])
GeoUnit = GeomUnit(Dxyz = [10.0e4,10.0e4,700.0], n = [100,100,300],scene = scene, inc_sub = [liquid[i].SLD,2.7e-6])
unit = GeoUnit.buildUnit()
unit.add_media()
#unit.viewSlice()
lattice = Rectilinear([20.0,20.0,1.0],unit)
beam = Beam(5.0,.02,None,0.02,None)
scale = 1.7e4
q_space = Q_space([-.0002,-0.002,0.00002],[.0002,.002,0.03],[200,50,200])
#q_space = Au_measurments.space
test_data = Calculator(lattice,beam,q_space,unit)
test_data.BA()
#test_data.results[test_data.results < 1.0e-15] = 1.0e-15
test_data.resolution_correction()
test_data.corrected_results *=scale
#test_data.results[Au_measurments.data==0.0]=0.0
#Masking
artre = min(test_data.corrected_results[nonzero(test_data.corrected_results)])
test_data.corrected_results[test_data.corrected_results == 0.0] = artre
test_data.corrected_results[test_data.corrected_results == NaN] = artre
#test_data.corrected_results[Au_measurments.data==0.0]=0.0
models[i] = test_data
'''
from numpy import save
save('mod.npy',test_data.corrected_results)
save('data.npy',Au_measurments.data)
'''
extraData = [models[2].corrected_results]
#extraData = [test_data[1].corrected_results,test_data[2].corrected_results,test_data[3].corrected_results,test_data[4].corrected_results]
#test_data[0].fitCompare(Au_measurments,extraData,titles = ['data','curve = 0','curve = 25','curve = 56','curve = 75','curve = 100'])
models[0].fitCompare(models[1],extraData,titles = ['data',str(fillMediaSLD[0]),str(fillMediaSLD[1]),str(fillMediaSLD[2])])
#test_data.scale(Au_measurments)
| 34.345238
| 138
| 0.719584
|
0995ae0058a006c22bd17e2c8199afd7caebebd2
| 4,053
|
py
|
Python
|
build/win/reorder-imports.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
build/win/reorder-imports.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 250
|
2018-02-02T23:16:57.000Z
|
2022-03-21T06:09:53.000Z
|
build/win/reorder-imports.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
sys.path.insert(
0,
os.path.join(os.path.dirname(__file__), '..', '..', 'third_party',
'pefile_py3'))
import pefile
def reorder_imports(input_dir, output_dir, architecture):
"""Swap chrome_elf.dll to be the first import of chrome.exe.
Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
# TODO(thakis): See if there is a reliable way to write the
# correct executable in the first place, so that this script
# only needs to verify that and not write a whole new exe.
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
# pefile mmap()s the whole executable, and then parses parts of
# it into python data structures for ease of processing.
# To write the file again, only the mmap'd data is written back,
# so modifying the parsed python objects generally has no effect.
# However, parsed raw data ends up in pe.Structure instances,
# and these all get serialized back when the file gets written.
# So things that are in a Structure must have their data set
# through the Structure, while other data must bet set through
# the set_bytes_*() methods.
pe = pefile.PE(input_image, fast_load=True)
if architecture == 'x64' or architecture == 'arm64':
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS
else:
assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
found_elf = False
for i, peimport in enumerate(pe.DIRECTORY_ENTRY_IMPORT):
if peimport.dll.lower() == b'chrome_elf.dll':
assert not found_elf, 'only one chrome_elf.dll import expected'
found_elf = True
if i > 0:
swap = pe.DIRECTORY_ENTRY_IMPORT[0]
# Morally we want to swap peimport.struct and swap.struct here,
# but the pe module doesn't expose a public method on Structure
# to get all data of a Structure without explicitly listing all
# field names.
# NB: OriginalFirstThunk and Characteristics are an union both at
# offset 0, handling just one of them is enough.
peimport.struct.OriginalFirstThunk, swap.struct.OriginalFirstThunk = \
swap.struct.OriginalFirstThunk, peimport.struct.OriginalFirstThunk
peimport.struct.TimeDateStamp, swap.struct.TimeDateStamp = \
swap.struct.TimeDateStamp, peimport.struct.TimeDateStamp
peimport.struct.ForwarderChain, swap.struct.ForwarderChain = \
swap.struct.ForwarderChain, peimport.struct.ForwarderChain
peimport.struct.Name, swap.struct.Name = \
swap.struct.Name, peimport.struct.Name
peimport.struct.FirstThunk, swap.struct.FirstThunk = \
swap.struct.FirstThunk, peimport.struct.FirstThunk
assert found_elf, 'chrome_elf.dll import not found'
pe.write(filename=output_image)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 40.939394
| 78
| 0.711818
|
a3031202bdc86e979b0c41578f7b3f1f83ae2eda
| 2,778
|
py
|
Python
|
eoxserver/services/ows/common/v20/encoders.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | 1
|
2017-11-21T22:23:30.000Z
|
2017-11-21T22:23:30.000Z
|
eoxserver/services/ows/common/v20/encoders.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | null | null | null |
eoxserver/services/ows/common/v20/encoders.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | null | null | null |
#-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from lxml.builder import ElementMaker
from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap
ns_xlink = NameSpace("http://www.w3.org/1999/xlink", "xlink")
ns_ows = NameSpace("http://www.opengis.net/ows/2.0", "ows", "http://schemas.opengis.net/ows/2.0/owsAll.xsd")
ns_xml = NameSpace("http://www.w3.org/XML/1998/namespace", "xml")
nsmap = NameSpaceMap(ns_ows)
OWS = ElementMaker(namespace=ns_ows.uri, nsmap=nsmap)
class OWS20Encoder(XMLEncoder):
def encode_reference(self, node_name, href, reftype="simple"):
attributes = {ns_xlink("href"): href}
if reftype:
attributes[ns_xlink("type")] = reftype
return OWS(node_name, **attributes)
class OWS20ExceptionXMLEncoder(XMLEncoder):
def encode_exception(self, message, version, code, locator=None):
exception_attributes = {
"exceptionCode": str(code)
}
if locator:
exception_attributes["locator"] = str(locator)
exception_text = (OWS("ExceptionText", message),) if message else ()
return OWS("ExceptionReport",
OWS("Exception", *exception_text, **exception_attributes
), version=version, **{ns_xml("lang"): "en"}
)
def get_schema_locations(self):
return nsmap.schema_locations
| 39.126761
| 108
| 0.654068
|
82a2269e3ace7238070f90e9ebd7ee2fdc926ded
| 176
|
py
|
Python
|
two_qubit_simulator/quantum_gates/hadamard.py
|
pweb6304/two-qubit-simulator
|
101adaf23a0b3632346293b7478c79d71941cf91
|
[
"MIT"
] | null | null | null |
two_qubit_simulator/quantum_gates/hadamard.py
|
pweb6304/two-qubit-simulator
|
101adaf23a0b3632346293b7478c79d71941cf91
|
[
"MIT"
] | null | null | null |
two_qubit_simulator/quantum_gates/hadamard.py
|
pweb6304/two-qubit-simulator
|
101adaf23a0b3632346293b7478c79d71941cf91
|
[
"MIT"
] | null | null | null |
"""
Contains the Hadamard gate
"""
from .quantum_gate import QuantumGate
class Hadamard(QuantumGate):
""" Implements the Hadamard gate """
def __init__(self,elements)
| 17.6
| 40
| 0.727273
|
46f42ffd8173b1195c31e40a67c2c61234756387
| 16,798
|
py
|
Python
|
tests/ignite/contrib/handlers/test_neptune_logger.py
|
neptune-ai/ignite
|
48b449edc888f7f4cd54a9ea397c57e7de0eba64
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ignite/contrib/handlers/test_neptune_logger.py
|
neptune-ai/ignite
|
48b449edc888f7f4cd54a9ea397c57e7de0eba64
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ignite/contrib/handlers/test_neptune_logger.py
|
neptune-ai/ignite
|
48b449edc888f7f4cd54a9ea397c57e7de0eba64
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import warnings
from unittest.mock import call, ANY, MagicMock
import pytest
import torch
from ignite.engine import Engine, Events, State
from ignite.contrib.handlers.neptune_logger import *
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with NeptuneLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metric.assert_called_once_with("lr/group_0", y=0.01, x=123)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metric.assert_called_once_with("generator/lr/group_0", y=0.01, x=123)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OutputHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metric.assert_called_once_with("tag/output", y=12345, x=123)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metric.assert_called_once_with("another_tag/loss", y=12345, x=123)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 2
mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True)
wrapper = OutputHandler("tag", metric_names=["a",])
mock_engine = MagicMock()
mock_logger.log_metric = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 4
mock_logger.log_metric.assert_has_calls(
[
call("tag/a/0", y=0.0, x=5),
call("tag/a/1", y=1.0, x=5),
call("tag/a/2", y=2.0, x=5),
call("tag/a/3", y=3.0, x=5),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_logger.log_metric = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 1
mock_logger.log_metric.assert_has_calls([call("tag/a", y=55.56, x=7),], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 2
mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metric.call_count == 3
mock_logger.log_metric.assert_has_calls(
[call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5), call("tag/loss", y=12345, x=5)], any_order=True
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metric.call_count == 1
mock_logger.log_metric.assert_has_calls(
[call("tag/loss", y=mock_engine.state.output, x=mock_another_engine.state.epoch)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metric.call_count == 2
mock_logger.log_metric.assert_has_calls(
[call("tag/loss", y=mock_engine.state.output, x=mock_another_engine.state.epoch)]
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metric.call_count == 1
mock_logger.log_metric.assert_has_calls([call("tag/loss", y=12345, x=10)])
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(ValueError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler WeightsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
assert mock_logger.log_metric.call_count == 4
mock_logger.log_metric.assert_has_calls(
[
call(tag_prefix + "weights_norm/fc1/weight", y=0.0, x=5),
call(tag_prefix + "weights_norm/fc1/bias", y=0.0, x=5),
call(tag_prefix + "weights_norm/fc2/weight", y=12.0, x=5),
call(tag_prefix + "weights_norm/fc2/bias", y=math.sqrt(12.0), x=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metric.assert_has_calls(
[call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5),],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.log_metric.assert_has_calls(
[call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5),],
any_order=True,
)
assert mock_logger.log_metric.call_count == 2
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler GradsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
mock_logger.log_metric.assert_has_calls(
[
call(tag_prefix + "grads_norm/fc1/weight", y=ANY, x=5),
call(tag_prefix + "grads_norm/fc1/bias", y=ANY, x=5),
call(tag_prefix + "grads_norm/fc2/weight", y=ANY, x=5),
call(tag_prefix + "grads_norm/fc2/bias", y=ANY, x=5),
],
any_order=True,
)
assert mock_logger.log_metric.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = GradsScalarHandler(model, reduction=norm_mock)
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metric.assert_has_calls(
[call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5),], any_order=True
)
with pytest.raises(AssertionError):
mock_logger.log_metric.assert_has_calls(
[call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5),], any_order=True
)
assert mock_logger.log_metric.call_count == 2
assert norm_mock.call_count == 2
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
npt_logger = NeptuneLogger(offline_mode=True)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metric("test_value", global_step, global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
npt_logger.close()
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with NeptuneLogger(offline_mode=True) as npt_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metric("test_value", global_step, global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_neptune_saver_serializable(dummy_model_factory, dirname):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_artifact = MagicMock()
model = torch.nn.Module()
to_save_serializable = {"model": model}
saver = NeptuneSaver(mock_logger)
fname = "test.pt"
saver(to_save_serializable, fname)
assert mock_logger.log_artifact.call_count == 1
def test_neptune_saver_non_serializable(dirname):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_artifact = MagicMock()
to_save_non_serializable = {"model": lambda x: x}
saver = NeptuneSaver(mock_logger)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(to_save_non_serializable, fname)
except Exception:
pass
assert mock_logger.log_artifact.call_count == 0
@pytest.fixture
def no_site_packages():
import sys
neptune_client_modules = {}
for k in sys.modules:
if "neptune" in k:
neptune_client_modules[k] = sys.modules[k]
for k in neptune_client_modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in neptune_client_modules:
sys.modules[k] = neptune_client_modules[k]
def test_no_neptune_client(no_site_packages):
with pytest.raises(RuntimeError, match=r"This contrib module requires neptune-client to be installed."):
NeptuneLogger()
| 35.364211
| 120
| 0.704727
|
58e1427514b80f428baba38feeaeedc4c09fc739
| 6,685
|
py
|
Python
|
test/run_portability_test.py
|
lilab-bcb/skylab
|
d230f2d31ba877db58948a9ed73486cd7c71dd59
|
[
"BSD-3-Clause"
] | 45
|
2017-10-12T19:37:29.000Z
|
2022-01-22T02:56:57.000Z
|
test/run_portability_test.py
|
lilab-bcb/skylab
|
d230f2d31ba877db58948a9ed73486cd7c71dd59
|
[
"BSD-3-Clause"
] | 203
|
2017-08-15T13:50:21.000Z
|
2021-02-18T01:20:25.000Z
|
test/run_portability_test.py
|
truwl/skylab
|
e31492cd0219ff6f236cd0500401004f16f0fe41
|
[
"BSD-3-Clause"
] | 42
|
2017-09-13T14:44:36.000Z
|
2022-03-15T09:27:52.000Z
|
"""Runs a single portability test from a test directory."""
import argparse
import datetime
import glob
import json
import os
import sys
import time
import requests
# This is what we need to find in a test directory. I guess we'll glob the PR
# test WDL itself since it's named after the pipeline.
TEST_DIR_LAYOUT = {
"inputs": "test_inputs.json",
"dependencies": "dependencies.json",
"test": "*PR.wdl"
}
class TestFailure(Exception):
"""Error for a test that fails its portability test."""
pass
class TestDefinitionError(Exception):
"""Error for malformed test directory."""
pass
class TestEnvironmentError(Exception):
"""Error for malformed test environment."""
pass
def gather_test_inputs(test_dir):
"""Walk through the test directory, finding files and dependencies needed
to submit a portability test.
Raise a TestDefinitionError if anything goes wrong.
"""
errors = []
# Test WDL
wdl_glob = os.path.join(test_dir, TEST_DIR_LAYOUT["test"])
wdl_paths = glob.glob(wdl_glob)
if not wdl_paths:
errors.append("Test definition WDL not found.")
if len(wdl_paths) > 1:
errors.append("Multiple candidate test WDLs found: {}".format(wdl_paths))
workflow_attachment = []
try:
test_wdl_name = os.path.basename(wdl_paths[0])
test_wdl_string = open(wdl_paths[0]).read()
workflow_attachment.append((test_wdl_name, test_wdl_string))
except IOError:
test_wdl_name, test_wdl_string = None, None
errors.append("Test WDL {} could not be read".format(wdl_paths[0]))
# Test inputs
try:
inputs_json_path = os.path.join(test_dir, TEST_DIR_LAYOUT["inputs"])
inputs_json_string = open(inputs_json_path).read()
except IOError:
inputs_json_string = None
errors.append("Inputs JSON {} could not be read".format(inputs_json_path))
# Dependencies
# First try to load the dependency JSON itself
try:
dependencies_json_path = os.path.join(test_dir, TEST_DIR_LAYOUT["dependencies"])
dependencies_json_string = open(dependencies_json_path).read()
except IOError:
dependencies_json_string = None
errors.append("Dependencies JSON {} could not be read".format(dependencies_json_path))
# Now iterate over the dependencies and load the files.
dependencies_dict = json.loads(dependencies_json_string)
for key, value in dependencies_dict.items():
try:
workflow_attachment.append((
key,
open(value).read()
))
except IOError:
errors.append("Could not read dependency {}".format(value))
if errors:
for error in errors:
sys.stderr.write(error + "\n")
raise TestDefinitionError(errors)
return {
"entry_point_wdl": test_wdl_name,
"workflow_params": inputs_json_string,
"workflow_attachment": workflow_attachment
}
# These are the environment variables we're expecting to be able to submit a
# portability test.
ENV_VARIABLES = {
"portability_service_url": os.environ.get("PORTABILITY_SERVICE_URL"),
"portability_service_headers": os.environ.get("PORTABILITY_SERVICE_HEADERS")
}
def verify_environment_variables():
"""Check that required environment variables are defined."""
errors = []
for key, value in ENV_VARIABLES.items():
if not value:
errors.append("Environment variable {} is undefined".format(key.upper()))
if errors:
for error in errors:
sys.stderr.write(error + "\n")
raise TestEnvironmentError(errors)
def submit_portability_test(test_inputs):
"""Submit the portability to the service and return the test's id."""
service_headers = json.loads(ENV_VARIABLES["portability_service_headers"])
test_endpoint = ENV_VARIABLES["portability_service_url"] + "/portability_tests"
response = requests.post(
test_endpoint,
headers=service_headers,
json=test_inputs)
print("Portability service response:\n{}".format(
json.dumps(json.loads(response.text), indent=4)), flush=True)
test_id = json.loads(response.text)["test_id"]
return test_id
def print_test_states(test_states):
"""Print the states of tests."""
print("{:%Y-%m-%dT%H:%M:%SZ}".format(datetime.datetime.utcnow()), flush=True)
for test_name, test_state in test_states.items():
print("TEST:{} STATE:{}".format(test_name, test_state), flush=True)
def monitor_tests(test_ids):
"""Check the status of tests until they all either fail or succeed. If any fail, raise
TestFailure, and if they all succeed, return successfully.
"""
service_headers = json.loads(ENV_VARIABLES["portability_service_headers"])
terminal_tests = {}
while True:
time.sleep(120)
test_states = {}
for test_name, test_id in test_ids.items():
if test_name in terminal_tests:
test_states[test_name] = terminal_tests[test_name]
continue
status_endpoint = ENV_VARIABLES["portability_service_url"] + "/portability_tests/" + \
test_id + "/status"
response = requests.get(status_endpoint, headers=service_headers)
test_state = json.loads(response.text)["state"]
test_states[test_name] = test_state
print_test_states(test_states)
for test_name, test_state in test_states.items():
if test_state in ("Failed", "Succeeded"):
terminal_tests[test_name] = test_state
if len(terminal_tests) == len(test_ids):
break
for test_name, test_id in test_ids.items():
info_endpoint = ENV_VARIABLES["portability_service_url"] + "/portability_tests/" + \
test_id
response = requests.get(info_endpoint, headers=service_headers)
print(json.dumps(response.json(), indent=4))
if not all(k == "Succeeded" for k in terminal_tests.values()):
raise TestFailure()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--test-directories", required=True, nargs='+')
args = parser.parse_args()
verify_environment_variables()
tests_inputs = {}
for test_directory in args.test_directories:
test_inputs = gather_test_inputs(test_directory)
tests_inputs[test_directory] = test_inputs
test_ids = {}
for test_dir, test_inputs in tests_inputs.items():
test_id = submit_portability_test(test_inputs)
test_ids[test_dir] = test_id
monitor_tests(test_ids)
if __name__ == "__main__":
main()
| 31.833333
| 99
| 0.672401
|
8a4002a7ef6af031f94c3dadf18477cf5ea60c6e
| 5,755
|
py
|
Python
|
gubernator/github/models.py
|
smarterclayton/test-infra
|
13bff73612f370ad8096e8a8d731faa5e3697adb
|
[
"Apache-2.0"
] | null | null | null |
gubernator/github/models.py
|
smarterclayton/test-infra
|
13bff73612f370ad8096e8a8d731faa5e3697adb
|
[
"Apache-2.0"
] | 1
|
2021-03-20T05:41:39.000Z
|
2021-03-20T05:41:39.000Z
|
gubernator/github/models.py
|
smarterclayton/test-infra
|
13bff73612f370ad8096e8a8d731faa5e3697adb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import google.appengine.ext.ndb as ndb
class GithubResource(ndb.Model):
# A key holder used to define an entitygroup for
# each Issue/PR, for easy ancestor queries.
@staticmethod
def make_key(repo, number):
return ndb.Key(GithubResource, '%s %s' % (repo, number))
def shrink(body):
'''
Recursively remove Github API urls from an object, to make it
more human-readable.
'''
toremove = []
for key, value in body.iteritems():
if isinstance(value, basestring):
if key.endswith('url'):
if (value.startswith('https://api.github.com/') or
value.startswith('https://avatars.githubusercontent.com')):
toremove.append(key)
elif isinstance(value, dict):
shrink(value)
elif isinstance(value, list):
for el in value:
if isinstance(el, dict):
shrink(el)
for key in toremove:
body.pop(key)
return body
class GithubWebhookRaw(ndb.Model):
repo = ndb.StringProperty()
number = ndb.IntegerProperty(indexed=False)
event = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
body = ndb.TextProperty(compressed=True)
def to_tuple(self):
return (self.event, shrink(json.loads(self.body)), int(self.timestamp.strftime('%s')))
def from_iso8601(t):
return t and datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def make_kwargs(body, fields):
kwargs = {}
for field in fields:
if field.endswith('_at'):
kwargs[field] = from_iso8601(body[field])
else:
kwargs[field] = body[field]
return kwargs
class GHStatus(ndb.Model):
# Key: {repo}\t{sha}\t{context}
state = ndb.StringProperty(indexed=False)
target_url = ndb.StringProperty(indexed=False)
description = ndb.TextProperty()
created_at = ndb.DateTimeProperty(indexed=False)
updated_at = ndb.DateTimeProperty(indexed=False)
@staticmethod
def make_key(repo, sha, context):
return ndb.Key(GHStatus, '%s\t%s\t%s' % (repo, sha, context))
@staticmethod
def make(repo, sha, context, **kwargs):
return GHStatus(key=GHStatus.make_key(repo, sha, context), **kwargs)
@staticmethod
def query_for_sha(repo, sha):
before = GHStatus.make_key(repo, sha, '')
after = GHStatus.make_key(repo, sha, '\x7f')
return GHStatus.query(GHStatus.key > before, GHStatus.key < after)
@staticmethod
def from_json(body):
kwargs = make_kwargs(body,
'sha context state target_url description '
'created_at updated_at'.split())
kwargs['repo'] = body['name']
return GHStatus.make(**kwargs)
@property
def repo(self):
return self.key.id().split('\t', 1)[0]
@property
def sha(self):
return self.key.id().split('\t', 2)[1]
@property
def context(self):
return self.key.id().split('\t', 2)[2]
class GHIssueDigest(ndb.Model):
# Key: {repo} {number}
is_pr = ndb.BooleanProperty()
is_open = ndb.BooleanProperty()
involved = ndb.StringProperty(repeated=True)
xref = ndb.StringProperty(repeated=True)
payload = ndb.JsonProperty()
updated_at = ndb.DateTimeProperty()
head = ndb.StringProperty()
@staticmethod
def make_key(repo, number):
return ndb.Key(GHIssueDigest, '%s %s' % (repo, number))
@staticmethod
def make(repo, number, is_pr, is_open, involved, payload, updated_at):
return GHIssueDigest(key=GHIssueDigest.make_key(repo, number),
is_pr=is_pr, is_open=is_open, involved=involved, payload=payload,
updated_at=updated_at, head=payload.get('head'),
xref=payload.get('xrefs', []))
@staticmethod
def get(repo, number):
return GHIssueDigest.make_key(repo, number).get()
@property
def repo(self):
return self.key.id().split()[0]
@property
def number(self):
return int(self.key.id().split()[1])
@staticmethod
def find_head(repo, head):
return GHIssueDigest.query(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~'),
GHIssueDigest.head == head)
@staticmethod
def find_xrefs(xref):
return GHIssueDigest.query(GHIssueDigest.xref == xref)
class GHUserState(ndb.Model):
# Key: {github username}
acks = ndb.JsonProperty() # dict of issue keys => ack time (seconds since epoch)
@staticmethod
def make_key(user):
return ndb.Key(GHUserState, user)
@staticmethod
def make(user, acks=None):
return GHUserState(key=GHUserState.make_key(user), acks=acks or {})
@ndb.transactional
def save_if_newer(obj):
assert obj.updated_at is not None
old = obj.key.get()
if old is None:
obj.put()
return True
else:
if old.updated_at is None or obj.updated_at >= old.updated_at:
obj.put()
return True
return False
| 30.13089
| 94
| 0.637011
|
15579af6a77db548baa9b018898ead9572a3a817
| 672
|
py
|
Python
|
alipay/aop/api/response/KoubeiItemExtitemInfoCreateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiItemExtitemInfoCreateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiItemExtitemInfoCreateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiItemExtitemInfoCreateResponse(AlipayResponse):
def __init__(self):
super(KoubeiItemExtitemInfoCreateResponse, self).__init__()
self._id = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
def parse_response_content(self, response_content):
response = super(KoubeiItemExtitemInfoCreateResponse, self).parse_response_content(response_content)
if 'id' in response:
self.id = response['id']
| 25.846154
| 108
| 0.693452
|
869b865631a731a92a24030d602e4c8316e187b9
| 1,943
|
py
|
Python
|
azure-keyvault/azure/keyvault/v7_0/models/certificate_issuer_set_parameters.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-keyvault/azure/keyvault/v7_0/models/certificate_issuer_set_parameters.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-keyvault/azure/keyvault/v7_0/models/certificate_issuer_set_parameters.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateIssuerSetParameters(Model):
"""The certificate issuer set parameters.
All required parameters must be populated in order to send to Azure.
:param provider: Required. The issuer provider.
:type provider: str
:param credentials: The credentials to be used for the issuer.
:type credentials: ~azure.keyvault.v7_0.models.IssuerCredentials
:param organization_details: Details of the organization as provided to
the issuer.
:type organization_details:
~azure.keyvault.v7_0.models.OrganizationDetails
:param attributes: Attributes of the issuer object.
:type attributes: ~azure.keyvault.v7_0.models.IssuerAttributes
"""
_validation = {
'provider': {'required': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'IssuerCredentials'},
'organization_details': {'key': 'org_details', 'type': 'OrganizationDetails'},
'attributes': {'key': 'attributes', 'type': 'IssuerAttributes'},
}
def __init__(self, **kwargs):
super(CertificateIssuerSetParameters, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.credentials = kwargs.get('credentials', None)
self.organization_details = kwargs.get('organization_details', None)
self.attributes = kwargs.get('attributes', None)
| 39.653061
| 86
| 0.645908
|
40a97bf35221b692b566eca0418946f641541e5f
| 264
|
py
|
Python
|
wagtail/wagtailembeds/urls.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T18:02:04.000Z
|
2015-11-05T18:02:04.000Z
|
wagtail/wagtailembeds/urls.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T08:25:30.000Z
|
2021-02-24T08:25:30.000Z
|
wagtail/wagtailembeds/urls.py
|
balkantechnologies/BalkanCMS_core
|
68625199028fc96abb175e410a4a7a92c02cb261
|
[
"BSD-3-Clause"
] | 1
|
2020-11-24T10:21:24.000Z
|
2020-11-24T10:21:24.000Z
|
from django.conf.urls import url
from wagtail.wagtailembeds.views import chooser
urlpatterns = [
url(r'^chooser/$', chooser.chooser, name='wagtailembeds_chooser'),
url(r'^chooser/upload/$', chooser.chooser_upload, name='wagtailembeds_chooser_upload'),
]
| 29.333333
| 91
| 0.757576
|
b419444ccca776c53a338a97574a95fde843affc
| 859
|
py
|
Python
|
harvey/test/test_sorted_key_value.py
|
vrde/harvey
|
9a381fd71c543f7e15d07dca72c4324fcc8ce20f
|
[
"MIT"
] | 1
|
2016-02-13T04:51:23.000Z
|
2016-02-13T04:51:23.000Z
|
harvey/test/test_sorted_key_value.py
|
vrde/harvey
|
9a381fd71c543f7e15d07dca72c4324fcc8ce20f
|
[
"MIT"
] | null | null | null |
harvey/test/test_sorted_key_value.py
|
vrde/harvey
|
9a381fd71c543f7e15d07dca72c4324fcc8ce20f
|
[
"MIT"
] | null | null | null |
from operator import itemgetter
from harvey.sorted_collection import SortedKeyValue
def test_insert():
s = SortedKeyValue(itemgetter(1), itemgetter(0))
s.insert((0, 'en.wikipedia.org'))
assert s.find_le(10)[1] == 'en.wikipedia.org'
s.insert((10, 'en.wikipedia.org'))
s.insert((20, 'en.wikipedia.org'))
assert s.find_le(20)[1] == 'en.wikipedia.org'
assert len(s) == 1
assert len(s.keys) == 1
assert len(s.values) == 1
def test_remove():
s = SortedKeyValue(itemgetter(1), itemgetter(0))
s.insert((0, 'en.wikipedia.org'))
s.remove((0, 'en.wikipedia.org'))
assert len(s) == 0
assert len(s.keys) == 0
assert len(s.values) == 0
s.insert((20, 'en.wikipedia.org'))
s.remove(('whatever', 'en.wikipedia.org'))
assert len(s) == 0
assert len(s.keys) == 0
assert len(s.values) == 0
| 24.542857
| 52
| 0.619325
|
e018a42a3c7b4ba0d2f51c1dea75a472166250e5
| 920
|
py
|
Python
|
ibis/tests/expr/test_struct.py
|
jreback/ibis
|
fdcca59b085416b1311eb268be3886abad1db230
|
[
"Apache-2.0"
] | 1
|
2020-08-19T03:36:26.000Z
|
2020-08-19T03:36:26.000Z
|
ibis/tests/expr/test_struct.py
|
jreback/ibis
|
fdcca59b085416b1311eb268be3886abad1db230
|
[
"Apache-2.0"
] | 1
|
2021-03-25T14:07:29.000Z
|
2021-03-25T14:07:29.000Z
|
ibis/tests/expr/test_struct.py
|
jreback/ibis
|
fdcca59b085416b1311eb268be3886abad1db230
|
[
"Apache-2.0"
] | 2
|
2020-11-27T22:21:50.000Z
|
2021-04-03T09:36:25.000Z
|
import pickle
from collections import OrderedDict
import ibis
import ibis.expr.operations as ops
import ibis.expr.types as ir
def test_struct_operations():
value = OrderedDict(
[
('a', 1),
('b', list('abc')),
('c', OrderedDict([('foo', [1.0, 2.0])])),
]
)
expr = ibis.literal(value)
assert isinstance(expr, ir.StructValue)
assert isinstance(expr['b'], ir.ArrayValue)
assert isinstance(expr['a'].op(), ops.StructField)
def test_struct_field_dir():
t = ibis.table([('struct_col', 'struct<my_field: string>')])
assert 'struct_col' in dir(t)
assert 'my_field' in dir(t.struct_col)
def test_struct_pickle():
struct_scalar_expr = ibis.literal(
OrderedDict([("fruit", "pear"), ("weight", 0)])
)
raw = pickle.dumps(struct_scalar_expr)
loaded = pickle.loads(raw)
assert loaded.equals(struct_scalar_expr)
| 24.210526
| 64
| 0.630435
|
c6933489456db44e52195b61dd7f967262104c7a
| 8,188
|
py
|
Python
|
Protheus_WebApp/Modules/SIGAPLS/PPLCHAPRE02TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAPLS/PPLCHAPRE02TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAPLS/PPLCHAPRE02TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
import unittest
from tir.technologies.apw_internal import ApwInternal
class PPLCHAPRE02TESTCASE(unittest.TestCase):
# test_PPLCHAPRE02_CT001 - Atendimento guia SADT Autorizada com impressão
# test_PPLCHAPRE02_CT002 - Atendimento guia SADT Pacialmente Autorizada com Impressão
# test_PPLCHAPRE02_CT003 - Atendimento guia SADT Negada
# test_PPLCHAPRE02_CT004 - Atendimento guia Em Auditoria com inclusão de documentos
@classmethod
def setUpClass(inst):
'''
SETUP
Configuração de inicialização dos Casos de Teste
'''
# Endereco do webapp e o nome do Browser
inst.oHelper = ApwInternal("config.json")
inst.oHelper.Setup()
# Atendimento guia SADT Autorizada com impressao
def test_PPLCHAPRE02_CT001(self):
self.oHelper.ClickMenu("Principal > Atendimento")
self.oHelper.ClickLink("Trocar para matrícula")
self.oHelper.SetValue("Selecione o prestador, local de Atendimento e regime de atendimento que irá atender o beneficiário", "HOSPITAL BOM CLIMA - HOSPITAIS - NORMAL")
self.oHelper.SetValue("Matrícula", "00010013000001003")
self.oHelper.SetButton("Confirmar")
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetValue("Tipo de Atendimento:", "SADT")
self.oHelper.SelectBrowse("CARLOS ROBERTO|HOSPITAL BOM CLIMA")
self.oHelper.SetButton("Atendimento")
self.oHelper.SwitchModal("Sim")
self.oHelper.SetButton("000 - Protocolo", "add")
self.oHelper.SetValue("012 - Rn?", "Nao")
self.oHelper.SetButton("015 - Nome Prof. Sol.", "search")
self.oHelper.SearchValue("Numero C.R.", "654987")
self.oHelper.SetValue("019 - Cod. Cbos", "MEDICO EM GERAL (CLINICO GERAL)")
self.oHelper.SetButton("021 - Carater Atend.", "search")
self.oHelper.SearchValue("Código", "1")
self.oHelper.SetValue("023 - Indicacao Clinica", "Teste")
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "40303136", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "5", True)
self.oHelper.SetGrid()
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "40201120", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "10", True)
self.oHelper.SetGrid()
self.oHelper.SetButton("Confirmar")
self.oHelper.WaitModal("Autorizada")
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("imprimir")
self.oHelper.SwitchWindow()
self.oHelper.CheckLink("Clique aqui.")
self.oHelper.CloseWindow()
self.oHelper.SwitchWindow()
self.oHelper.SetButton("Anexar documento")
self.oHelper.SetValue("Selecione o Arquivo:", "C:\\Totvs\\Automacao\\UPLOAD_GEN.txt")
self.oHelper.SetButton("Anexar")
self.oHelper.CheckBrowse("UPLOAD_GEN")
self.oHelper.EndCase()
# Atendimento guia SADT Pacialmente Autorizada com Impressao
def test_PPLCHAPRE02_CT002(self):
self.oHelper.ClickMenu("Principal > Atendimento")
self.oHelper.SetValue("Tipo de Atendimento:", "SADT")
self.oHelper.SelectBrowse("CARLOS ROBERTO|HOSPITAL BOM CLIMA")
self.oHelper.SetButton("Atendimento")
self.oHelper.SwitchModal("Sim")
self.oHelper.SetButton("000 - Protocolo", "add")
self.oHelper.SetValue("012 - Rn?", "Nao")
self.oHelper.SetButton("015 - Nome Prof. Sol.", "search")
self.oHelper.SearchValue("Numero C.R.", "654987")
self.oHelper.SetValue("019 - Cod. Cbos", "MEDICO EM GERAL (CLINICO GERAL)")
self.oHelper.SetButton("021 - Carater Atend.", "search")
self.oHelper.SearchValue("Código", "1")
self.oHelper.SetValue("023 - Indicacao Clinica", "Teste")
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "40303136", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "5", True)
self.oHelper.SetGrid()
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "40311236", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "1", True)
self.oHelper.SetGrid()
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("Confirmar")
self.oHelper.WaitModal("Autorizada Parcialmente")
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("imprimir")
self.oHelper.SwitchWindow()
self.oHelper.CheckLink("Clique aqui.")
self.oHelper.CloseWindow()
self.oHelper.SwitchWindow()
self.oHelper.SetButton("Anexar documento")
self.oHelper.SetValue("Selecione o Arquivo:", "C:\\Totvs\\Automacao\\UPLOAD_GEN.txt")
self.oHelper.SetButton("Anexar")
self.oHelper.CheckBrowse("UPLOAD_GEN")
self.oHelper.EndCase()
# Atendimento guia SADT Negada
def test_PPLCHAPRE02_CT003(self):
self.oHelper.ClickMenu("Principal > Atendimento")
self.oHelper.SetValue("Tipo de Atendimento:", "SADT")
self.oHelper.SelectBrowse("CARLOS ROBERTO|HOSPITAL BOM CLIMA")
self.oHelper.SetButton("Atendimento")
self.oHelper.SwitchModal("Sim")
self.oHelper.SetButton("000 - Protocolo", "add")
self.oHelper.SetValue("012 - Rn?", "Nao")
self.oHelper.SetButton("015 - Nome Prof. Sol.", "search")
self.oHelper.SearchValue("Numero C.R.", "654987")
self.oHelper.SetValue("019 - Cod. Cbos", "MEDICO EM GERAL (CLINICO GERAL)")
self.oHelper.SetButton("021 - Carater Atend.", "search")
self.oHelper.SearchValue("Código", "1")
self.oHelper.SetValue("023 - Indicacao Clinica", "Teste")
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "40311236", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "1", True)
self.oHelper.SetGrid()
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("Confirmar")
self.oHelper.WaitModal("Nao Autorizada")
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("Anexar documento")
self.oHelper.SetValue("Selecione o Arquivo:", "C:\\Totvs\\Automacao\\UPLOAD_GEN.txt")
self.oHelper.SetButton("Anexar")
self.oHelper.CheckBrowse("UPLOAD_GEN")
self.oHelper.EndCase()
# Atendimento guia Em Auditoria com inclusao de documentos
def test_PPLCHAPRE02_CT004(self):
self.oHelper.ClickMenu("Principal > Atendimento")
self.oHelper.SetValue("Tipo de Atendimento:", "SADT")
self.oHelper.SelectBrowse("CARLOS ROBERTO|HOSPITAL BOM CLIMA")
self.oHelper.SetButton("Atendimento")
self.oHelper.SwitchModal("Sim")
self.oHelper.SetButton("000 - Protocolo", "add")
self.oHelper.SetValue("012 - Rn?", "Nao")
self.oHelper.SetButton("015 - Nome Prof. Sol.", "search")
self.oHelper.SearchValue("Numero C.R.", "654987")
self.oHelper.SetValue("019 - Cod. Cbos", "MEDICO EM GERAL (CLINICO GERAL)")
self.oHelper.SetButton("021 - Carater Atend.", "search")
self.oHelper.SearchValue("Código", "1")
self.oHelper.SetValue("023 - Indicacao Clinica", "Teste")
self.oHelper.SetButton("025 - Cod. Procedimento", "search")
self.oHelper.SearchValue("Código", "30101018", True)
self.oHelper.SetValue("027 - Qtd. Sol.", "1", True)
self.oHelper.SetGrid()
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("Confirmar")
self.oHelper.WaitModal("Em Análise")
self.oHelper.SwitchModal("Fechar")
self.oHelper.SetButton("Anexar documento")
self.oHelper.SetValue("Selecione o Arquivo:", "C:\\Totvs\\Automacao\\UPLOAD_GEN.txt")
self.oHelper.SetButton("Anexar")
self.oHelper.CheckBrowse("UPLOAD_GEN")
self.oHelper.EndCase()
@classmethod
def tearDownClass(inst):
'''
Método que finaliza o TestCase
'''
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 45.743017
| 174
| 0.654006
|
5236e1e1d04488927330339167dbb5ca72dd2f1c
| 5,683
|
py
|
Python
|
backend/forum/utils/djtools/query_cache.py
|
karolyi/forum-django
|
a498be3123deb836e0108258c493b88c645b2163
|
[
"MIT"
] | 7
|
2016-09-20T11:49:49.000Z
|
2017-06-24T23:51:56.000Z
|
backend/forum/utils/djtools/query_cache.py
|
karolyi/forum-django
|
a498be3123deb836e0108258c493b88c645b2163
|
[
"MIT"
] | 17
|
2019-12-22T10:41:48.000Z
|
2021-11-17T10:58:50.000Z
|
backend/forum/utils/djtools/query_cache.py
|
karolyi/forum-django
|
a498be3123deb836e0108258c493b88c645b2163
|
[
"MIT"
] | 1
|
2016-09-20T11:50:57.000Z
|
2016-09-20T11:50:57.000Z
|
from typing import Iterable, Optional
from django import VERSION
from django.db.models.base import Model
from django.db.models.fields.related import ManyToManyField
from django.db.models.fields.reverse_related import ManyToOneRel
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
def invalidate_onetomany(objs: Iterable[Model], prefetch_keys: Iterable[str]):
"""
Invalidate one-to-many caches. These are remote `ForeignKey` and
`ManyToManyField` fields fetched with `prefetch_related()`.
"""
if VERSION[0] == 1 or VERSION[0] == 2:
for obj in objs:
if not hasattr(obj, '_prefetched_objects_cache'):
continue
for key in prefetch_keys:
if key not in obj._prefetched_objects_cache:
continue
del obj._prefetched_objects_cache[key]
def invalidate_manytoone(objs: Iterable[Model], field_names: Iterable[str]):
"""
Invalidate many-to-one caches. These are `ForeignKey` and
`OneToOneField` fields fetched with `select_related()` or
`prefetch_related()`.
"""
if VERSION[0] == 1:
for obj in objs:
for field_name in field_names:
if not is_fk_cached(obj=obj, field_name=field_name):
continue
del obj.__dict__[f'_{field_name}_cache']
elif VERSION[0] == 2:
for obj in objs:
for field_name in field_names:
if not is_fk_cached(obj=obj, field_name=field_name):
continue
del obj._state.fields_cache[field_name]
def get_prefetch_cache_key(relation: Manager) -> str:
'Return a key used in the prefetched cache for a relation.'
try:
# Works on ManyToMany
return relation.prefetch_cache_name
except AttributeError:
# Is a ForeignKey (OneToMany)
rel_field = relation.field.remote_field # type: ManyToOneRel
if rel_field.related_name:
return rel_field.related_name
if VERSION[0] == 1:
return rel_field.name
elif VERSION[0] == 2:
return f'{rel_field.name}_set'
def init_prefetch_cache(obj: Model):
'Init a prefetch cache on the model.'
if VERSION[0] == 1 or VERSION[0] == 2:
if hasattr(obj, '_prefetched_objects_cache'):
return
obj._prefetched_objects_cache = {}
def is_query_prefetched(relation: Manager) -> bool:
'Return `True` if the relation is prefetched.'
if VERSION[0] == 1 or VERSION[0] == 2:
obj = relation.instance
if not hasattr(obj, '_prefetched_objects_cache'):
return False
prefetch_cache_key = get_prefetch_cache_key(relation=relation)
return prefetch_cache_key in obj._prefetched_objects_cache
return False
def set_prefetch_cache(
relation: Manager, queryset: QuerySet, override: bool = True):
'Set prefetch cache on a `Model` for a relation.'
if is_query_prefetched(relation=relation) and not override:
return
obj = relation.instance
init_prefetch_cache(obj=obj)
if VERSION[0] == 1 or VERSION[0] == 2:
key = get_prefetch_cache_key(relation=relation)
obj._prefetched_objects_cache[key] = queryset
def is_queryresult_loaded(qs: QuerySet) -> bool:
'Return `True` if the query is loaded, `False` otherwise.'
if VERSION[0] == 1 or VERSION[0] == 2:
return qs._result_cache is not None
return False
def set_queryresult(qs: QuerySet, result: list, override: bool = True):
'Set result on a previously setup query.'
if VERSION[0] == 1 or VERSION[0] == 2:
if override or not is_queryresult_loaded(qs=qs):
qs._result_cache = result
def get_queryresult(qs: QuerySet) -> Optional[list]:
'Return the cached query result of the passed `QuerySet`.'
if VERSION[0] == 1 or VERSION[0] == 2:
return qs._result_cache
def is_fk_cached(obj: Model, field_name: str) -> bool:
'Return `True` if the `ForeignKey` field on the object is cached.'
if VERSION[0] == 1:
return hasattr(obj, f'_{field_name}_cache')
elif VERSION[0] == 2:
if getattr(obj, '_state', None) is None or \
getattr(obj._state, 'fields_cache', None) is None:
return False
return field_name in obj._state.fields_cache
return False
def set_fk_cache(
obj: Model, field_name: str, value: Model, override: bool = True):
"""
Set a cache on the `obj` for a `ForeignKey` field, override when
requested.
"""
if not override and is_fk_cached(obj=obj, field_name=field_name):
return
if VERSION[0] == 1:
setattr(obj, f'_{field_name}_cache', value)
elif VERSION[0] == 2:
if getattr(obj, '_state', None) is None:
obj._state = dict()
if getattr(obj._state, 'fields_cache', None) is None:
obj._state.fields_cache = dict()
obj._state.fields_cache[field_name] = value
def del_fk_cache(obj: Model, field_name: str):
'Delete a cached `ForeignKey` on the `Model`.'
if not is_fk_cached(obj=obj, field_name=field_name):
return
if VERSION[0] == 1:
delattr(obj, f'_{field_name}_cache')
elif VERSION[0] == 2:
del obj._state.fields_cache
_old_m2m_savedata = ManyToManyField.save_form_data
def _save_m2m_form_data(
self: ManyToManyField, instance: Model, data: QuerySet):
_old_m2m_savedata(self=self, instance=instance, data=data)
set_prefetch_cache(
relation=getattr(instance, self.name), queryset=data, override=True)
ManyToManyField.save_form_data = _save_m2m_form_data
| 34.652439
| 78
| 0.655464
|
adcf83f772e99730293ea4e09207a83c40c468a6
| 13,422
|
py
|
Python
|
tests/dash/app_dataframe_backend_paging.py
|
r-chris/dash-table
|
0d6b7c41ad06a1c67655f7645a4bbcca2160b0ac
|
[
"MIT"
] | null | null | null |
tests/dash/app_dataframe_backend_paging.py
|
r-chris/dash-table
|
0d6b7c41ad06a1c67655f7645a4bbcca2160b0ac
|
[
"MIT"
] | null | null | null |
tests/dash/app_dataframe_backend_paging.py
|
r-chris/dash-table
|
0d6b7c41ad06a1c67655f7645a4bbcca2160b0ac
|
[
"MIT"
] | null | null | null |
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from textwrap import dedent
import dash_table
from index import app
from .utils import section_title
ID_PREFIX = "app_data_updating_graph_be"
IDS = {
"table": ID_PREFIX,
"container": "{}-container".format(ID_PREFIX),
"table-sorting": "{}-sorting".format(ID_PREFIX),
"table-multi-sorting": "{}-multi-sorting".format(ID_PREFIX),
"table-filtering": "{}-filtering".format(ID_PREFIX),
"table-sorting-filtering": "{}-sorting-filtering".format(ID_PREFIX),
"table-paging-selection": "{}-paging-selection".format(ID_PREFIX),
"table-paging-with-graph": "{}-table-paging-with-graph".format(ID_PREFIX),
"table-paging-with-graph-container": "{}-table-paging-with-graph-container".format(ID_PREFIX),
}
df = pd.read_csv("./datasets/gapminder.csv")
df = df[df["year"] == 2007]
df[' index'] = range(1, len(df) + 1)
PAGE_SIZE = 5
def layout():
return html.Div(
[
section_title('Backend Paging'),
dash_table.DataTable(
id=IDS["table"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom'
),
html.Hr(),
dcc.Markdown(dedent('''
With backend paging, we can have front-end sorting and filtering
but it will only filter and sort the data that exists on the page.
This should be avoided. Your users will expect
that sorting and filtering is happening on the entire dataset and,
with large pages, might not be aware that this is only occuring
on the current page.
Instead, we recommend implementing sorting and filtering on the
backend as well. That is, on the entire underlying dataset.
''')),
section_title('Backend Paging with Sorting'),
dash_table.DataTable(
id=IDS["table-sorting"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
sort_action='custom',
sort_mode='single',
sort_by=[]
),
section_title('Backend Paging with Multi Column Sorting'),
dcc.Markdown(dedent('''
Multi-column sort allows you to sort by multiple columns.
This is useful when you have categorical columns with repeated
values and you're interested in seeing the sorted values for
each category.
In this example, try sorting by continent and then any other column.
''')),
dash_table.DataTable(
id=IDS["table-multi-sorting"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
sort_action='custom',
sort_mode='multi',
sort_by=[]
),
section_title('Backend Paging with Filtering'),
dcc.Markdown(dedent('''
Dash Table's front-end filtering has its own filtering expression
language.
Currently, backend filtering must parse the same filtering language.
If you write an expression that is not "valid" under the filtering
language, then it will not be passed to the backend.
This limitation will be removed in the future to allow you to
write your own expression query language.
In this example, we've written a Pandas backend for the filtering
language. It supports `eq`, `<`, and `>`. For example, try:
- Enter `eq Asia` in the "continent" column
- Enter `> 5000` in the "gdpPercap" column
- Enter `< 80` in the `lifeExp` column
''')),
dash_table.DataTable(
id=IDS["table-filtering"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
filter_action='custom',
filter_query=''
),
section_title('Backend Paging with Filtering and Multi-Column Sorting'),
dash_table.DataTable(
id=IDS["table-sorting-filtering"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[]
),
section_title('Connecting Backend Paging with a Graph'),
dcc.Markdown(dedent('''
This final example ties it all together: the graph component
displays the current page of the `data`.
''')),
html.Div(
className="row",
children=[
html.Div(
dash_table.DataTable(
id=IDS["table-paging-with-graph"],
columns=[
{"name": i, "id": i, "deletable": True} for i in sorted(df.columns)
],
page_current=0,
page_size=20,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[]
),
style={'height': 750, 'overflowY': 'scroll'},
className='six columns'
),
html.Div(
id=IDS["table-paging-with-graph-container"],
className="six columns"
)
]
)
]
)
@app.callback(
Output(IDS["table"], "data"),
[Input(IDS["table"], "page_current"), Input(IDS["table"], "page_size")])
def update_graph(page_current, page_size):
return df.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-sorting"], "data"),
[Input(IDS["table-sorting"], "page_current"),
Input(IDS["table-sorting"], "page_size"),
Input(IDS["table-sorting"], "sort_by")])
def update_graph(page_current, page_size, sort_by):
print(sort_by)
if len(sort_by):
dff = df.sort_values(
sort_by[0]['columnId'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False
)
else:
# No sort is applied
dff = df
return dff.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-multi-sorting"], "data"),
[Input(IDS["table-multi-sorting"], "page_current"),
Input(IDS["table-multi-sorting"], "page_size"),
Input(IDS["table-multi-sorting"], "sort_by")])
def update_graph(page_current, page_size, sort_by):
print(sort_by)
if len(sort_by):
dff = df.sort_values(
[col['columnId'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False
)
else:
# No sort is applied
dff = df
return dff.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-filtering"], "data"),
[Input(IDS["table-filtering"], "page_current"),
Input(IDS["table-filtering"], "page_size"),
Input(IDS["table-filtering"], "filter_query")])
def update_graph(page_current, page_size, filter_query):
print(filter_query)
filtering_expressions = filter_query.split(' && ')
dff = df
for filter_query in filtering_expressions:
if ' eq ' in filter_query:
col_name = filter_query.split(' eq ')[0]
filter_value = filter_query.split(' eq ')[1]
dff = dff.loc[dff[col_name] == filter_value]
if ' > ' in filter_query:
col_name = filter_query.split(' > ')[0]
filter_value = float(filter_query.split(' > ')[1])
dff = dff.loc[dff[col_name] > filter_value]
if ' < ' in filter_query:
col_name = filter_query.split(' < ')[0]
filter_value = float(filter_query.split(' < ')[1])
dff = dff.loc[dff[col_name] < filter_value]
return dff.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-sorting-filtering"], "data"),
[Input(IDS["table-sorting-filtering"], "page_current"),
Input(IDS["table-sorting-filtering"], "page_size"),
Input(IDS["table-sorting-filtering"], "sort_by"),
Input(IDS["table-sorting-filtering"], "filter_query")])
def update_graph(page_current, page_size, sort_by, filter_query):
filtering_expressions = filter_query.split(' && ')
dff = df
for filter_query in filtering_expressions:
if ' eq ' in filter_query:
col_name = filter_query.split(' eq ')[0]
filter_value = filter_query.split(' eq ')[1]
dff = dff.loc[dff[col_name] == filter_value]
if ' > ' in filter_query:
col_name = filter_query.split(' > ')[0]
filter_value = float(filter_query.split(' > ')[1])
dff = dff.loc[dff[col_name] > filter_value]
if ' < ' in filter_query:
col_name = filter_query.split(' < ')[0]
filter_value = float(filter_query.split(' < ')[1])
dff = dff.loc[dff[col_name] < filter_value]
if len(sort_by):
dff = dff.sort_values(
[col['columnId'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False
)
return dff.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-paging-with-graph"], "data"),
[Input(IDS["table-paging-with-graph"], "page_current"),
Input(IDS["table-paging-with-graph"], "page_size"),
Input(IDS["table-paging-with-graph"], "sort_by"),
Input(IDS["table-paging-with-graph"], "filter_query")])
def update_table(page_current, page_size, sort_by, filter_query):
filtering_expressions = filter_query.split(' && ')
dff = df
for filter_query in filtering_expressions:
if ' eq ' in filter_query:
col_name = filter_query.split(' eq ')[0]
filter_value = filter_query.split(' eq ')[1]
dff = dff.loc[dff[col_name] == filter_value]
if ' > ' in filter_query:
col_name = filter_query.split(' > ')[0]
filter_value = float(filter_query.split(' > ')[1])
dff = dff.loc[dff[col_name] > filter_value]
if ' < ' in filter_query:
col_name = filter_query.split(' < ')[0]
filter_value = float(filter_query.split(' < ')[1])
dff = dff.loc[dff[col_name] < filter_value]
if len(sort_by):
dff = dff.sort_values(
[col['columnId'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False
)
return dff.iloc[
page_current * page_size:
(page_current + 1) * page_size
].to_dict('rows')
@app.callback(
Output(IDS["table-paging-with-graph-container"], "children"),
[Input(IDS["table-paging-with-graph"], "data")])
def update_graph(rows):
dff = pd.DataFrame(rows)
return html.Div(
[
dcc.Graph(
id=column,
figure={
"data": [
{
"x": dff["country"],
"y": dff[column] if column in dff else [],
"type": "bar",
"marker": {"color": "#0074D9"},
}
],
"layout": {
"xaxis": {"automargin": True},
"yaxis": {"automargin": True},
"height": 250,
"margin": {"t": 10, "l": 10, "r": 10},
},
},
)
for column in ["pop", "lifeExp", "gdpPercap"]
]
)
| 33.979747
| 99
| 0.521234
|
bae072b575f27548117dcc2d3ec6d6689982171b
| 4,190
|
py
|
Python
|
indico/modules/rb/operations/admin.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1
|
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/modules/rb/operations/admin.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/modules/rb/operations/admin.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import datetime, time
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.core.permissions import get_unified_permissions, update_principals_permissions
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.map_areas import MapArea
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
@no_autoflush
def _populate_room(room, properties):
for prop, value in properties.items():
if prop not in ['available_equipment', 'bookable_hours', 'bookable_periods']:
setattr(room, prop, value)
def update_room_equipment(room, available_equipment_ids):
available_equipment = EquipmentType.query.filter(EquipmentType.id.in_(available_equipment_ids)).all()
room.available_equipment = available_equipment
db.session.flush()
def update_room_attributes(room, attributes):
current_attributes = {x.attribute.name for x in room.attributes}
new_attributes = {attribute['name'] for attribute in attributes}
deleted_attributes = current_attributes - new_attributes
for attribute in attributes:
room.set_attribute_value(attribute['name'], attribute['value'])
for deleted_attribute in deleted_attributes:
room.set_attribute_value(deleted_attribute, None)
db.session.flush()
def update_room_availability(room, availability):
if 'bookable_hours' in availability:
room.bookable_hours.order_by(False).delete()
unique_bh = set((hours['start_time'], hours['end_time']) for hours in availability['bookable_hours'])
db.session.add_all(
[BookableHours(room=room, start_time=hours[0], end_time=hours[1]) for hours in unique_bh])
if 'nonbookable_periods' in availability:
room.nonbookable_periods.order_by(False).delete()
unique_nbp = set((period['start_dt'], period['end_dt']) for period in availability['nonbookable_periods'])
db.session.add_all(
[NonBookablePeriod(room=room, start_dt=datetime.combine(period[0], time(0, 0)),
end_dt=datetime.combine(period[1], time(23, 59))) for period in unique_nbp])
def update_room(room, args):
acl_entries = args.pop('acl_entries', None)
if acl_entries:
current = {e.principal: get_unified_permissions(e) for e in room.acl_entries}
update_principals_permissions(room, current, acl_entries)
_populate_room(room, args)
db.session.flush()
def create_area(bounds, name, default=False):
top, bottom = bounds['north_east'], bounds['south_west']
if default:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
new_area = MapArea()
new_area.name = name
new_area.is_default = default
new_area.top_left_latitude = top['lat']
new_area.top_left_longitude = top['lng']
new_area.bottom_right_latitude = bottom['lat']
new_area.bottom_right_longitude = bottom['lng']
db.session.add(new_area)
db.session.flush()
return new_area
def update_area(area_id, area_data):
top = area_data['bounds']['north_east']
bottom = area_data['bounds']['south_west']
map_area = MapArea.get_one(area_id)
if 'name' in area_data:
map_area.name = area_data['name']
if 'default' in area_data:
if area_data['default']:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
map_area.is_default = area_data['default']
map_area.top_left_latitude = top['lat']
map_area.top_left_longitude = top['lng']
map_area.bottom_right_latitude = bottom['lat']
map_area.bottom_right_longitude = bottom['lng']
db.session.flush()
def delete_areas(area_ids):
MapArea.query.filter(MapArea.id.in_(area_ids)).delete(synchronize_session='fetch')
db.session.flush()
| 39.528302
| 114
| 0.730072
|
b071d2e3312b1c95895f5025bcd78b6a04796f58
| 244
|
py
|
Python
|
webempresa/pages/admin.py
|
JoseM1101/web-empresa
|
190c70e6f2944beeffc224525e0739a14f78a56b
|
[
"MIT"
] | null | null | null |
webempresa/pages/admin.py
|
JoseM1101/web-empresa
|
190c70e6f2944beeffc224525e0739a14f78a56b
|
[
"MIT"
] | null | null | null |
webempresa/pages/admin.py
|
JoseM1101/web-empresa
|
190c70e6f2944beeffc224525e0739a14f78a56b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Page
# Register your models here.
class PageAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
list_display = ('title', 'order')
admin.site.register(Page, PageAdmin)
| 24.4
| 44
| 0.737705
|
8ba66c0ca41fa185ba67645136da09f0b8dc92e1
| 5,303
|
py
|
Python
|
models/ssd_inception_v2_feature_extractor.py
|
YuelongLi/Be-My-Ass
|
cf3921ca70bf5cc92593d200c29b2e1ef06109e0
|
[
"MIT"
] | 2
|
2018-07-12T15:32:00.000Z
|
2018-07-17T14:40:27.000Z
|
models/ssd_inception_v2_feature_extractor.py
|
YuelongLi/Project-Dawn
|
cf3921ca70bf5cc92593d200c29b2e1ef06109e0
|
[
"MIT"
] | null | null | null |
models/ssd_inception_v2_feature_extractor.py
|
YuelongLi/Project-Dawn
|
cf3921ca70bf5cc92593d200c29b2e1ef06109e0
|
[
"MIT"
] | 1
|
2018-07-23T18:04:42.000Z
|
2018-07-23T18:04:42.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for InceptionV2 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import inception_v2
slim = tf.contrib.slim
class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using InceptionV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""InceptionV2 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: If `override_base_feature_extractor_hyperparams` is False.
"""
super(SSDInceptionV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
if not self._override_base_feature_extractor_hyperparams:
raise ValueError('SSD Inception V2 feature extractor always uses'
'scope returned by `conv_hyperparams_fn` for both the '
'base feature extractor and the additional layers '
'added since there is no arg_scope defined for the base '
'feature extractor.')
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
_, image_features = inception_v2.inception_v2_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Mixed_5c',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| 41.755906
| 81
| 0.67245
|
8df9ee5d9de68dd6bec7e0d9a99b53e43b3406c6
| 1,319
|
py
|
Python
|
lecture2/tests/question-2_9.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 1
|
2019-01-12T12:43:24.000Z
|
2019-01-12T12:43:24.000Z
|
lecture2/tests/question-2_9.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | null | null | null |
lecture2/tests/question-2_9.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 3
|
2019-05-16T21:08:48.000Z
|
2022-02-21T06:54:57.000Z
|
test = {
'name': 'question 2.9',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> np.allclose(compute_heights(), [1.0, 0.9, 0.81, 0.7290000000000001, 0.6561000000000001, 0.5904900000000002, 0.5314410000000002, 0.47829690000000014, 0.43046721000000016, 0.38742048900000015, 0.34867844010000015])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.allclose(compute_heights(h_0=0.5), [0.5, 0.45, 0.405, 0.36450000000000005, 0.32805000000000006, 0.2952450000000001])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.allclose(compute_heights(h_1=0.5), [1.0, 0.9, 0.81, 0.7290000000000001, 0.6561000000000001, 0.5904900000000002, 0.5314410000000002, 0.47829690000000014])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.allclose(compute_heights(n=2), [1.0, 0.9, 0.81])
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': 'import numpy as np',
'teardown': '',
'type': 'doctest'
}
]
}
| 28.06383
| 226
| 0.477635
|
afabf94381d95b24e1553d515db7d11ce3111929
| 554
|
py
|
Python
|
agents_playground/renderers/scene.py
|
sholloway/agents-playground
|
f4da7acf9b410b4c01c5afc1aa45233960bea861
|
[
"MIT"
] | null | null | null |
agents_playground/renderers/scene.py
|
sholloway/agents-playground
|
f4da7acf9b410b4c01c5afc1aa45233960bea861
|
[
"MIT"
] | 37
|
2021-11-25T14:40:49.000Z
|
2022-02-23T23:09:28.000Z
|
agents_playground/renderers/scene.py
|
sholloway/agents-playground
|
f4da7acf9b410b4c01c5afc1aa45233960bea861
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Dict
from agents_playground.simulation.tag import Tag
from agents_playground.agents.agent import Agent
from agents_playground.agents.path import InterpolatedPath
@dataclass
class Scene:
agents: Dict[Tag, Agent]
paths: Dict[Tag, InterpolatedPath]
def __init__(self) -> None:
self.agents = dict()
self.paths = dict()
def add_agent(self, agent: Agent) -> None:
self.agents[agent.id] = agent
def add_path(self, path: InterpolatedPath) -> None:
self.paths[path.id] = path
| 26.380952
| 58
| 0.740072
|
b96cc5c7af5c0e5e7fdc2b4c318bfd527cd993f5
| 5,360
|
py
|
Python
|
docs/conf.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | 1
|
2022-01-27T09:07:49.000Z
|
2022-01-27T09:07:49.000Z
|
docs/conf.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import art
# -- Project information -----------------------------------------------------
project = 'Adversarial Robustness Toolbox'
copyright = '2018, IBM Corporation'
author = 'Maria-Irina Nicolae'
# The short X.Y version
version = '0.10'
# The full version, including alpha/beta/rc tags
release = '0.10.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
if os.environ.get('READTHEDOCS') != 'True':
try:
import sphinx_rtd_theme
except ImportError:
pass # assume we have sphinx >= 1.3
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'adversarial-robustness-toolboxdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'adversarial-robustness-toolbox.tex', 'adversarial-robustness-toolbox Documentation',
'Maria-Irina Nicolae', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adversarial-robustness-toolbox', 'adversarial-robustness-toolbox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'adversarial-robustness-toolbox', 'adversarial-robustness-toolbox Documentation',
author, 'adversarial-robustness-toolbox', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 31.715976
| 102
| 0.659328
|
4d58db24346e2d0a97ab74920453154f28f10b2c
| 265
|
py
|
Python
|
tests/artificial/transf_Difference/trend_PolyTrend/cycle_7/ar_/test_artificial_32_Difference_PolyTrend_7__100.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Difference/trend_PolyTrend/cycle_7/ar_/test_artificial_32_Difference_PolyTrend_7__100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Difference/trend_PolyTrend/cycle_7/ar_/test_artificial_32_Difference_PolyTrend_7__100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 100, ar_order = 0);
| 37.857143
| 165
| 0.732075
|
ad498b54c1632e786fd6bcc29bb46b4119064d70
| 1,099
|
py
|
Python
|
setup.py
|
ajupatatero/neurasim
|
c1d3f8163a7389b06a13e453daa98ad5157d9b2e
|
[
"MIT"
] | null | null | null |
setup.py
|
ajupatatero/neurasim
|
c1d3f8163a7389b06a13e453daa98ad5157d9b2e
|
[
"MIT"
] | null | null | null |
setup.py
|
ajupatatero/neurasim
|
c1d3f8163a7389b06a13e453daa98ad5157d9b2e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='NeuraSim',
version='3.2.1',
description='CNN to accelerate Poisson step in CFD solvers.',
author='Anonymus',
license='LICENSE.txt',
packages=find_packages(include=['NeuraSim']),
install_requires=[
"numpy", "scipy", "matplotlib", "imageio",
],
#Include shell scripts realted to pando ....
#scripts=['interface/commands/simulate.py',
# 'interface/commands/train.py',
# 'interface/commands/analyze.py',
# 'interface/commands/mgit.py',
# 'interface/commands/update.py'
#], #only on linux, windows not
entry_points={
'console_scripts': [
'simulate=interface.commands.simulate:main',
'train=interface.commands.train:main',
'analyze=interface.commands.analyze:main',
'update=interface.commands.update:main',
'mgit=interface.commands.mgit:main',
'launch=interface.commands.launch:main',
'iterate=interface.commands.iterate:main'
],
}
)
| 31.4
| 65
| 0.609645
|
a539e0acf56edcc9502338b7ebb4314cd52d0e9f
| 8,736
|
py
|
Python
|
deepbiome/loss_and_metric.py
|
Hua-Zhou/deepbiome
|
e168e5677356ef98ac504b587cba2b3d76c9dc5d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T18:42:27.000Z
|
2022-01-11T18:42:27.000Z
|
deepbiome/loss_and_metric.py
|
Hua-Zhou/deepbiome
|
e168e5677356ef98ac504b587cba2b3d76c9dc5d
|
[
"BSD-3-Clause"
] | null | null | null |
deepbiome/loss_and_metric.py
|
Hua-Zhou/deepbiome
|
e168e5677356ef98ac504b587cba2b3d76c9dc5d
|
[
"BSD-3-Clause"
] | null | null | null |
######################################################################
## DeepBiome
## - Loss and metrics (mse, cross-entropy)
##
## July 10. 2019
## Youngwon (youngwon08@gmail.com)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import numpy as np
import sklearn.metrics as skmetrics
from keras.callbacks import Callback
import tensorflow as tf
import keras.backend as K
from keras.losses import mean_squared_error, mean_absolute_error, binary_crossentropy, categorical_crossentropy, sparse_categorical_crossentropy
from keras.metrics import binary_accuracy, categorical_accuracy, sparse_categorical_accuracy
from sklearn.metrics import roc_auc_score, f1_score
###############################################################################################################################
# tf loss functions
def precision(y_true, y_pred):
return K.sum(y_true*y_pred)/(K.sum(y_true*y_pred) + K.sum((1-y_true)*y_pred) + 1e-10)
def recall(y_true, y_pred):
return K.sum(y_true*y_pred)/(K.sum(y_true*y_pred) + K.sum(y_true*(1-y_pred)) + 1e-10)
def sensitivity(y_true, y_pred):
# y_pred = K.round(y_pred)
# neg_y_pred = 1 - y_pred
# true_positive = K.sum(y_true * y_pred)
# false_negative = K.sum(y_true * neg_y_pred)
# return (true_positive) / (true_positive + false_negative + K.epsilon())
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), 0.5), K.floatx())
neg_y_pred = 1 - y_pred
true_positive = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
false_negative = K.round(K.sum(K.clip(y_true * neg_y_pred, 0, 1)))
return (true_positive) / (true_positive + false_negative + K.epsilon())
def specificity(y_true, y_pred):
# y_pred = K.round(y_pred)
# neg_y_true = 1 - y_true
# neg_y_pred = 1 - y_pred
# false_positive = K.sum(neg_y_true * y_pred)
# true_negative = K.sum(neg_y_true * neg_y_pred)
# return (true_negative) / (false_positive + true_negative + K.epsilon())
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), 0.5), K.floatx())
neg_y_true = 1 - y_true
neg_y_pred = 1 - y_pred
false_positive = K.round(K.sum(K.clip(neg_y_true * y_pred, 0, 1)))
true_negative = K.round(K.sum(K.clip(neg_y_true * neg_y_pred, 0, 1)))
return (true_negative) / (false_positive + true_negative + K.epsilon())
def gmeasure(y_true, y_pred):
return (sensitivity(y_true, y_pred) * specificity(y_true, y_pred)) ** 0.5
def auc(y_true, y_pred):
# https://stackoverflow.com/questions/43263111/defining-an-auc-metric-for-keras-to-support-evaluation-of-validation-dataset
score = tf.py_function(lambda y_true, y_pred : roc_auc_score(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnAUC')
return score
def f1_score_with_nan(y_true, y_pred, average='macro', sample_weight=None):
try:
score = f1_score(y_true, y_pred, average=average, sample_weight=sample_weight)
except:
score = np.nan
return score
def f1(y_true, y_pred):
# https://stackoverflow.com/questions/43263111/defining-an-auc-metric-for-keras-to-support-evaluation-of-validation-dataset
y_pred = K.round(y_pred)
score = tf.py_function(lambda y_true, y_pred : f1_score_with_nan(y_true, y_pred, average='macro', sample_weight=None).astype('float32'),
[y_true, y_pred],
Tout=tf.float32,
name='sklearnF1')
return score
def ss(a, axis=0):
# a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def pearsonr(x,y):
n = len(x)
mx = np.mean(x)
my = np.mean(y)
xm, ym = x-mx, y-my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
return r
def correlation_coefficient(y_true, y_pred):
score = tf.py_function(lambda y_true, y_pred : pearsonr(y_true, y_pred),
[y_true, y_pred],
Tout=tf.float32,
name='correlation_coefficient')
return score
# TODO
# https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras
# def auc(y_true, y_pred):
# return NotImplementedError()
###############################################################################################################################
# helper
def np_binary_accuracy(y_true, y_pred):
y_pred = (y_pred>=0.5).astype(np.int32)
return skmetrics.accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
def np_precision(y_true, y_pred):
return skmetrics.precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
# return (np.sum(y_true*y_pred) + 1e-7)/(np.sum(y_true*y_pred) + np.sum((1-y_true)*y_pred) + 1e-7)
def np_recall(y_true, y_pred):
return skmetrics.recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
# return (np.sum(y_true*y_pred) + 1e-7)/(np.sum(y_true*y_pred) + np.sum(y_true*(1-y_pred)) + 1e-7)
def np_f1_score(y_true, y_pred):
return skmetrics.f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
def np_roc_auc(y_true, y_pred):
return skmetrics.roc_auc_score(y_true, y_pred, average='macro', sample_weight=None)
def np_confusion_matrix(y_true, y_pred):
return skmetrics.confusion_matrix(y_true, y_pred).ravel()
def np_sensitivity(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_pred = 1 - y_pred
tp = np.sum(y_true * y_pred)
fn = np.sum(y_true * neg_y_pred)
return tp / (tp+fn)
def np_specificity(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_true = 1 - y_true
neg_y_pred = 1 - y_pred
fp = np.sum(neg_y_true * y_pred)
tn = np.sum(neg_y_true * neg_y_pred)
return tn / (tn+fp)
def np_PPV(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred >= 0.5).astype(np.int32)
neg_y_true = 1 - y_true
tp = np.sum(y_true * y_pred)
fp = np.sum(neg_y_true * y_pred)
return tp/(tp+fp)
def np_gmeasure(y_true, y_pred):
sensitivity = np_sensitivity(y_true, y_pred)
specificity = np_specificity(y_true, y_pred)
return (sensitivity*specificity)**0.5
def metric_test(y_true, y_pred):
return (np_sensitivity(y_true, y_pred), np_specificity(y_true, y_pred),
np_gmeasure(y_true, y_pred), np_binary_accuracy(y_true, y_pred),
np_roc_auc(y_true, y_pred))
def metric_texa_test(y_true, y_pred):
y_true = y_true.astype(np.int32)
y_pred = (y_pred>=0.5).astype(np.int32)
return (np_sensitivity(y_true, y_pred), np_specificity(y_true, y_pred),
np_gmeasure(y_true, y_pred), np_binary_accuracy(y_true, y_pred))
###############################################################################################################################
# if __name__ == "__main__":
# test_metrics = {'Accuracy':binary_accuracy, 'Precision':precision, 'Recall':recall}
# print('Test loss functions %s' % test_metrics.keys())
# y_true_set = np.array([[[0,0,0,0,0],
# [0,0,0,0,0],
# [0,1,1,0,0],
# [1,1,1,0,0],
# [0,1,0,0,0]]])
# y_pred_set = np.array([[[0,0,0,0,1],
# [0,0,0,0,0],
# [0,1,0.6,0,0],
# [0,1,1,0,0],
# [0,0.3,0,0,0]]])
# def test(acc, y_true_set, y_pred_set):
# sess = tf.Session()
# K.set_session(sess)
# with sess.as_default():
# return acc.eval(feed_dict={y_true: y_true_set, y_pred: y_pred_set})
# # tf
# y_true = tf.placeholder("float32", shape=(None,y_true_set.shape[1],y_true_set.shape[2]))
# y_pred = tf.placeholder("float32", shape=(None,y_pred_set.shape[1],y_pred_set.shape[2]))
# metric_list = [binary_accuracy(y_true, y_pred),
# precision(y_true, y_pred),
# recall(y_true, y_pred)]
# # numpy
# print('%15s %15s %15s' % tuple(test_metrics.keys()))
# print('tf : {}'.format([test(acc, y_true_set, y_pred_set) for acc in metric_list]))
# print('np : {}'.format(np.round(metric_test(y_true_set[0],y_pred_set[0]),8)))
| 41.402844
| 144
| 0.602793
|
6d6abf5b12a4baca8dfce50a8863de36be97d610
| 122
|
py
|
Python
|
prpc/response.py
|
einsfr/cmc-storage
|
b8f806ee343accf827c78c556a6bb4023bed4556
|
[
"MIT"
] | null | null | null |
prpc/response.py
|
einsfr/cmc-storage
|
b8f806ee343accf827c78c556a6bb4023bed4556
|
[
"MIT"
] | null | null | null |
prpc/response.py
|
einsfr/cmc-storage
|
b8f806ee343accf827c78c556a6bb4023bed4556
|
[
"MIT"
] | null | null | null |
class BaseResponse:
pass
class DataResponse(BaseResponse):
pass
class ErrorResponse(BaseResponse):
pass
| 9.384615
| 34
| 0.721311
|
3030be7ff8d4d3f71c8353cf7efd21180e4ff75f
| 13,945
|
py
|
Python
|
simulation/observations.py
|
kuntzer/binfind
|
28f9cf9474e6b39a55a1a22d19ca8131a0408c84
|
[
"MIT"
] | null | null | null |
simulation/observations.py
|
kuntzer/binfind
|
28f9cf9474e6b39a55a1a22d19ca8131a0408c84
|
[
"MIT"
] | null | null | null |
simulation/observations.py
|
kuntzer/binfind
|
28f9cf9474e6b39a55a1a22d19ca8131a0408c84
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
import scipy.interpolate as interp
from scipy.spatial import cKDTree
import sklearn.metrics as metrics
from .. import utils
from .. import diagnostics
import logging
logger = logging.getLogger(__name__)
class Observations():
def __init__(self, ei_max_error, r2_max_error, fname_interpolation, fname_fiducial, radius=6):
self.ei_max_error = ei_max_error
self.r2_max_error = r2_max_error
psf_positions = np.loadtxt(fname_fiducial)
self.x_psf = psf_positions[:,0]
self.y_psf = psf_positions[:,1]
self.min_x_psf = np.amin(self.x_psf)
self.min_y_psf = np.amin(self.y_psf)
self.max_x_psf = np.amax(self.x_psf)
self.max_y_psf = np.amax(self.y_psf)
self.configurations, self.fields_e1, self.fields_e2, self.fields_sigma = utils.readpickle(fname_interpolation)
# Preparing for the matching of the indexes
self.contrasts = np.unique(self.configurations[:,0])
dxdy = utils.rdisk(radius=radius)
self.dxdytree = cKDTree(dxdy)
# Preparing the selection of the interpolation for no binaries
id_null = np.where(np.all(self.configurations == 0, axis=1))[0]
# Just make sure that the size of the array is one
assert np.size(id_null) == 1
self.id_null = id_null[0]
#self.meane = []
self.meanr2 = 0
for x, y in zip(self.x_psf, self.y_psf):
self.meanr2 += self.fields_sigma[self.id_null](x, y)
#e1_ = self.fields_e1[self.id_null](x, y)
#e2_ = self.fields_e2[self.id_null](x, y)
#self.meane.append(np.hypot(e1_, e2_))
self.meanr2 /= len(self.x_psf)
#print np.amin(self.meane), np.amax(self.meane)
self.meane = 0.1
def observe(self, catalog, n_exposures):
self.n_exposures = n_exposures
observed_stars = []
count_doubles = 0
# Now, for each star, get the position of the binary
for this_star in catalog:
con = this_star[2]
dx = this_star[3]
dy = this_star[4]
# Assign a position in the field of view
x_star = np.random.uniform(low=self.min_x_psf, high=self.max_x_psf)
y_star = np.random.uniform(low=self.min_y_psf, high=self.max_y_psf)
# Making n_exposures observations of the same stars
obs_ss = []
for _ in range(n_exposures):
if this_star[0] == 1:
count_doubles += 1./n_exposures
# Preparing the selection of the interpolation for no binaries
if con > self.contrasts[-1]:
idcons = [utils.find_nearest(self.contrasts, con)]
wcon = [1.]
else:
ds = np.abs(self.contrasts - con)
idcons = np.argsort(ds)[:2]
wcon = 1. / ds[idcons]
e1_star = 0.
e2_star = 0.
sigma_star = 0.
for ii, idcon in enumerate(idcons):
idcon = np.where(self.configurations[:,0] == self.contrasts[idcon])[0]
dist, ids = self.dxdytree.query([dx, dy], k=3)
we = 1./dist
e1_star += np.average([fe1(x_star, y_star) for fe1 in self.fields_e1[idcon][ids]], weights=we) * wcon[ii]
e2_star += np.average([fe2(x_star, y_star) for fe2 in self.fields_e2[idcon][ids]], weights=we) * wcon[ii]
sigma_star += np.average([sig(x_star, y_star) for sig in self.fields_sigma[idcon][ids]], weights=we) * wcon[ii]
#print e1_star; exit()
e1_star /= np.sum(wcon)
e2_star /= np.sum(wcon)
sigma_star /= np.sum(wcon)
else:
# Interpolate the ellipticity and size
e1_star = self.fields_e1[self.id_null](x_star, y_star)
e2_star = self.fields_e2[self.id_null](x_star, y_star)
sigma_star = self.fields_sigma[self.id_null](x_star, y_star)
# Adding some noise in the measure of e1, e2
#if this_star[0] == 1 :print self.fields_e2[self.id_null](x_star, y_star), e2_star,
"""if this_star[0] == 1 :
#print dx, dy, e1_star, e2_star, np.hypot(e1_star, e2_star), sigma_star * 12. * 4.
t = self.fields_e2[self.id_null](x_star, y_star)
te = np.hypot(self.fields_e2[self.id_null](x_star, y_star), self.fields_e1[self.id_null](x_star, y_star))
o = e2_star
oe = np.hypot(e2_star, e1_star)
obs_errors.append(oe-te)
print te, oe, (oe-te)/te
#print "%1.2f \t %1.4f %+1.1e\t%1.4f %1.4f %+1.1e" % (this_star[1] / .12,t,(o-t)/t, te,oe,(oe-te)/te),
"""
e1_star += np.random.normal(scale=self.ei_max_error * self.meane)
e2_star += np.random.normal(scale=self.ei_max_error * self.meane)
"""if this_star[0] == 1 :
oe = np.hypot(e2_star, e1_star)
#print "\t%1.4f %+1.1e" % (oe,(oe-te)/te)
#if this_star[0] == 1:print e2_star"""
sigma_star += np.random.normal(scale=self.r2_max_error * self.meanr2)
# Adding to the catalogue
obs_ss.append([x_star, y_star, e1_star, e2_star, sigma_star])
#x_star += (float(delta_inbetween_frame[0]) * 0.1 / 360.)
#y_star += (float(delta_inbetween_frame[1]) * 0.1 / 360.)
observed_stars.append(obs_ss)
logger.info("Observed {} stars, {:1.1f}% doubles".format(len(observed_stars), count_doubles/len(observed_stars)*100))
self.observed_stars = np.asarray(observed_stars)
def substract_fields(self, eps=0., error_e=2e-4, error_r2=1e-3, bias_e=0, bias_r2=0, relerr=True):
obs_x = self.observed_stars[:,:,0].flatten()
obs_y = self.observed_stars[:,:,1].flatten()
n_stars_obs = self.observed_stars.shape[0]
#obs_xy = (np.array([obs_x, obs_y]).T).reshape([n_stars_obs, self.n_exposures * 2])
fiducial_e1 = self.fields_e1[self.id_null](obs_x, obs_y).reshape([n_stars_obs, self.n_exposures]) + bias_e
fiducial_e2 = self.fields_e2[self.id_null](obs_x, obs_y).reshape([n_stars_obs, self.n_exposures]) + bias_e
fiducial_sigma = self.fields_sigma[self.id_null](obs_x, obs_y).reshape([n_stars_obs, self.n_exposures]) + bias_r2
fiducial_e1 += np.random.normal(scale=error_e * self.meane, size=[n_stars_obs, self.n_exposures])
fiducial_e2 += np.random.normal(scale=error_e * self.meane, size=[n_stars_obs, self.n_exposures])
fiducial_sigma += np.random.normal(scale=error_r2 * self.meane, size=[n_stars_obs, self.n_exposures])
dev_e1 = (self.observed_stars[:,:,2] - fiducial_e1)
dev_e2 = (self.observed_stars[:,:,3] - fiducial_e2)
dev_r2 = (self.observed_stars[:,:,4] - fiducial_sigma)
if relerr:
dev_e1 /= (fiducial_e1 + eps)
dev_e2 /= (fiducial_e2 + eps)
dev_r2 /= (fiducial_sigma + eps)
obs_xy = np.array([fiducial_e1[:,0], fiducial_e2[:,0], fiducial_sigma[:,0]]).T
features = np.array([dev_e1.T, dev_e2.T, dev_r2.T]).reshape([3*self.n_exposures, n_stars_obs]).T
return obs_xy, features
def reconstruct_fields(self, classifier, n_iter_reconstr, n_neighbours, eps, truth=None, return_proba=False, relerr=True, **kwargs):
n_stars = self.observed_stars.shape[0]
ids_all = range(n_stars)
outliers_ids = None
observed_stars = self.observed_stars
obs_x = self.observed_stars[:,:,0].flatten()
obs_y = self.observed_stars[:,:,1].flatten()
#obs_xy = (np.array([obs_x, obs_y]).T).reshape([self.observed_stars.shape[0], self.n_exposures * 2])[:,:2]
for kk in range(n_iter_reconstr):
logger.info("PSF reconstruction with {:s}, iteration {:d}/{:d}".format(classifier, kk+1, n_iter_reconstr))
if np.size(outliers_ids) >= n_stars - n_neighbours:
continue
de1 = []
de2 = []
dsigma = []
daf = []
for ii in range(n_stars):
if outliers_ids is None:
ids_singles = ids_all
ids_single = np.delete(ids_singles, [ii])
else:
# Remove outliers from the list
ids_single = np.delete(ids_all, np.concatenate([outliers_ids, [ii]]))
obs_x = (observed_stars[ids_single,0,0].flatten())
obs_y = (observed_stars[ids_single,0,1].flatten())
xy = np.array([obs_x, obs_y]).T
ie1 = []
ie2 = []
isigma = []
for iobs in range(self.n_exposures):
ie1.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,2]) )
ie2.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,3]) )
isigma.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,4]) )
tree = cKDTree(zip(obs_x, obs_y))
d, inds = tree.query(zip([observed_stars[ii,0,0]], [observed_stars[ii,0,1]]), k = n_neighbours)
inds = inds[d > 0]
d = d[d > 0]
weights = 1. / (d*2)
obs_e1 = np.median(observed_stars[inds,:,2], axis=1)
obs_e2 = np.median(observed_stars[inds,:,3], axis=1)
obs_r2 = np.median(observed_stars[inds,:,4], axis=1)
try:
dinterp_e1 = np.average(obs_e1, weights=weights)
except :
print xy.shape
print weights
print d
print inds
raise
dinterp_e2 = np.average(obs_e2, weights=weights)
dinterp_r2 = np.average(obs_r2, weights=weights)
ae1 = []
ae2 = []
asigma = []
for iobs in range(self.n_exposures):
#print observed_stars[ii,iobs,2] - ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]),
#print ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1])
ae1.append(ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
ae2.append(ie2[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
asigma.append(isigma[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
dinterp_e1 = np.median(np.asarray(ae1))
dinterp_e2 = np.median(np.asarray(ae2))
dinterp_r2 = np.median(np.asarray(asigma))
dde1 = observed_stars[ii,:,2] - dinterp_e1
dde2 = observed_stars[ii,:,3] - dinterp_e2
ddr2 = observed_stars[ii,:,4] - dinterp_r2
daf.append([dinterp_e1, dinterp_e2, dinterp_r2])
if relerr:
dde1 /= (dinterp_e1 + eps)
dde2 /= (dinterp_e2 + eps)
ddr2 /= (dinterp_r2 + eps)
de1.append(dde1)
de2.append(dde2)
dsigma.append(ddr2)
de1 = np.array(de1)
de2 = np.array(de2)
dsigma = np.array(dsigma)
daf = np.array(daf)
if relerr:
features = np.concatenate([de1, de2, dsigma], axis=1)
else:
features = np.concatenate([daf, de1, de2, dsigma], axis=1)
#features = np.concatenate([daf[:,0].reshape((n_stars,1)), de1], axis=1)
preds = classifier.predict(features)
outliers_ids = np.where(preds == 1)[0]
if truth is not None :
f1_ = metrics.f1_score(truth, preds, average='binary')
tpr, fpr = diagnostics.get_tpr_fpr(truth, preds)
msg = "F1={:1.3f}, FPR={:2.1f}%, TPR={:2.1f}%".format(f1_, fpr*100., tpr*100.)
logger.info(msg)
proba = classifier.predict_proba(features, **kwargs)
if return_proba:
return preds, proba
else:
return preds
def get_reconstruct_fields(self, n_iter_reconstr, n_neighbours, eps, truth=None, return_proba=False, relerr=True, **kwargs):
n_stars = self.observed_stars.shape[0]
ids_all = range(n_stars)
outliers_ids = None
observed_stars = self.observed_stars
obs_x = self.observed_stars[:,:,0].flatten()
obs_y = self.observed_stars[:,:,1].flatten()
#obs_xy = (np.array([obs_x, obs_y]).T).reshape([self.observed_stars.shape[0], self.n_exposures * 2])[:,:2]
n_iter_reconstr = 1
for kk in range(n_iter_reconstr):
logger.info("Iteration {:d}/{:d}".format(kk+1, n_iter_reconstr))
if np.size(outliers_ids) >= n_stars - n_neighbours:
continue
de1 = []
de2 = []
dsigma = []
daf = []
for ii in range(n_stars):
if outliers_ids is None:
ids_singles = ids_all
ids_single = np.delete(ids_singles, [ii])
else:
# Remove outliers from the list
ids_single = np.delete(ids_all, np.concatenate([outliers_ids, [ii]]))
obs_x = (observed_stars[ids_single,0,0].flatten())
obs_y = (observed_stars[ids_single,0,1].flatten())
xy = np.array([obs_x, obs_y]).T
ie1 = []
ie2 = []
isigma = []
for iobs in range(self.n_exposures):
ie1.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,2]) )
ie2.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,3]) )
isigma.append(interp.NearestNDInterpolator(xy, observed_stars[ids_single,iobs,4]) )
tree = cKDTree(zip(obs_x, obs_y))
d, inds = tree.query(zip([observed_stars[ii,0,0]], [observed_stars[ii,0,1]]), k = n_neighbours)
inds = inds[d > 0]
d = d[d > 0]
weights = 1. / (d*2)
obs_e1 = np.median(observed_stars[inds,:,2], axis=1)
obs_e2 = np.median(observed_stars[inds,:,3], axis=1)
obs_r2 = np.median(observed_stars[inds,:,4], axis=1)
try:
dinterp_e1 = np.average(obs_e1, weights=weights)
except :
print xy.shape
print weights
print d
print inds
raise
dinterp_e2 = np.average(obs_e2, weights=weights)
dinterp_r2 = np.average(obs_r2, weights=weights)
ae1 = []
ae2 = []
asigma = []
for iobs in range(self.n_exposures):
#print observed_stars[ii,iobs,2] - ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]),
#print ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1])
ae1.append(ie1[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
ae2.append(ie2[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
asigma.append(isigma[iobs](observed_stars[ii,0,0], observed_stars[ii,0,1]))
dinterp_e1 = np.median(np.asarray(ae1))
dinterp_e2 = np.median(np.asarray(ae2))
dinterp_r2 = np.median(np.asarray(asigma))
dde1 = observed_stars[ii,:,2] - dinterp_e1
dde2 = observed_stars[ii,:,3] - dinterp_e2
ddr2 = observed_stars[ii,:,4] - dinterp_r2
daf.append([dinterp_e1, dinterp_e2, dinterp_r2])
if relerr:
dde1 /= (dinterp_e1 + eps)
dde2 /= (dinterp_e2 + eps)
ddr2 /= (dinterp_r2 + eps)
de1.append(dde1)
de2.append(dde2)
dsigma.append(ddr2)
de1 = np.array(de1)
de2 = np.array(de2)
dsigma = np.array(dsigma)
daf = np.array(daf)
if relerr:
features = np.concatenate([de1, de2, dsigma], axis=1)
else:
features = np.concatenate([daf, de1, de2, dsigma], axis=1)
#features = np.concatenate([daf[:,0].reshape((n_stars,1)), de1], axis=1)
return features
| 35.214646
| 133
| 0.6583
|
eb72c76ce77f9618c3afc3b07effc2b002cbfed6
| 61
|
py
|
Python
|
actions/__init__.py
|
Adrian-Tamas/python-behave-automation-quickstart
|
243d58dc52e3bcf7a93cb9dcf4a0175e52ab0131
|
[
"MIT"
] | null | null | null |
actions/__init__.py
|
Adrian-Tamas/python-behave-automation-quickstart
|
243d58dc52e3bcf7a93cb9dcf4a0175e52ab0131
|
[
"MIT"
] | null | null | null |
actions/__init__.py
|
Adrian-Tamas/python-behave-automation-quickstart
|
243d58dc52e3bcf7a93cb9dcf4a0175e52ab0131
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger('gdp-automation')
| 15.25
| 44
| 0.786885
|
b6001057e4e79390e86ee7e8a1e8ba26b01bc28b
| 7,808
|
py
|
Python
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/world_canvas_msgs/srv/_ResetDatabase.py
|
QianheYu/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/world_canvas_msgs/srv/_ResetDatabase.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/world_canvas_msgs/srv/_ResetDatabase.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from world_canvas_msgs/ResetDatabaseRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ResetDatabaseRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "world_canvas_msgs/ResetDatabaseRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ResetDatabaseRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from world_canvas_msgs/ResetDatabaseResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ResetDatabaseResponse(genpy.Message):
_md5sum = "b543fbd3518c791be28589b850702201"
_type = "world_canvas_msgs/ResetDatabaseResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
bool result
string message
"""
__slots__ = ['result','message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result,message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ResetDatabaseResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
if self.message is None:
self.message = ''
else:
self.result = False
self.message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.result))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.result,) = _get_struct_B().unpack(str[start:end])
self.result = bool(self.result)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.result))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.result,) = _get_struct_B().unpack(str[start:end])
self.result = bool(self.result)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class ResetDatabase(object):
_type = 'world_canvas_msgs/ResetDatabase'
_md5sum = 'b543fbd3518c791be28589b850702201'
_request_class = ResetDatabaseRequest
_response_class = ResetDatabaseResponse
| 32.806723
| 145
| 0.658171
|
b124039778d46849d84024bc23719078fdd44a8e
| 24,394
|
py
|
Python
|
gnpy/core/network.py
|
caffery-chen/oopt-gnpy
|
211227288016a75328710ee8d1c78b820dac89d9
|
[
"BSD-3-Clause"
] | null | null | null |
gnpy/core/network.py
|
caffery-chen/oopt-gnpy
|
211227288016a75328710ee8d1c78b820dac89d9
|
[
"BSD-3-Clause"
] | null | null | null |
gnpy/core/network.py
|
caffery-chen/oopt-gnpy
|
211227288016a75328710ee8d1c78b820dac89d9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
gnpy.core.network
=================
Working with networks which consist of network elements
'''
from operator import attrgetter
from gnpy.core import ansi_escapes, elements
from gnpy.core.exceptions import ConfigurationError, NetworkTopologyError
from gnpy.core.utils import round2float, convert_length
from collections import namedtuple
def edfa_nf(gain_target, variety_type, equipment):
amp_params = equipment['Edfa'][variety_type]
amp = elements.Edfa(
uid='calc_NF',
params=amp_params.__dict__,
operational={
'gain_target': gain_target,
'tilt_target': 0
}
)
amp.pin_db = 0
amp.nch = 88
amp.slot_width = 50e9
return amp._calc_nf(True)
def select_edfa(raman_allowed, gain_target, power_target, equipment, uid, restrictions=None):
"""amplifer selection algorithm
@Orange Jean-Luc Augé
"""
Edfa_list = namedtuple('Edfa_list', 'variety power gain_min nf')
TARGET_EXTENDED_GAIN = equipment['Span']['default'].target_extended_gain
# for roadm restriction only: create a dict including not allowed for design amps
# because main use case is to have specific radm amp which are not allowed for ILA
# with the auto design
edfa_dict = {name: amp for (name, amp) in equipment['Edfa'].items()
if restrictions is None or name in restrictions[0]}
pin = power_target - gain_target
# create 2 list of available amplifiers with relevant attributes for their selection
# edfa list with:
# extended gain min allowance of 3dB: could be parametrized, but a bit complex
# extended gain max allowance TARGET_EXTENDED_GAIN is coming from eqpt_config.json
# power attribut include power AND gain limitations
edfa_list = [Edfa_list(
variety=edfa_variety,
power=min(
pin
+ edfa.gain_flatmax
+ TARGET_EXTENDED_GAIN,
edfa.p_max
)
- power_target,
gain_min=gain_target + 3
- edfa.gain_min,
nf=edfa_nf(gain_target, edfa_variety, equipment))
for edfa_variety, edfa in edfa_dict.items()
if ((edfa.allowed_for_design or restrictions is not None) and not edfa.raman)]
# consider a Raman list because of different gain_min requirement:
# do not allow extended gain min for Raman
raman_list = [Edfa_list(
variety=edfa_variety,
power=min(
pin
+ edfa.gain_flatmax
+ TARGET_EXTENDED_GAIN,
edfa.p_max
)
- power_target,
gain_min=gain_target
- edfa.gain_min,
nf=edfa_nf(gain_target, edfa_variety, equipment))
for edfa_variety, edfa in edfa_dict.items()
if (edfa.allowed_for_design and edfa.raman)] \
if raman_allowed else []
# merge raman and edfa lists
amp_list = edfa_list + raman_list
# filter on min gain limitation:
acceptable_gain_min_list = [x for x in amp_list if x.gain_min > 0]
if len(acceptable_gain_min_list) < 1:
# do not take this empty list into account for the rest of the code
# but issue a warning to the user and do not consider Raman
# Raman below min gain should not be allowed because i is meant to be a design requirement
# and raman padding at the amplifier input is impossible!
if len(edfa_list) < 1:
raise ConfigurationError(f'auto_design could not find any amplifier \
to satisfy min gain requirement in node {uid} \
please increase span fiber padding')
else:
# TODO: convert to logging
print(
f'{ansi_escapes.red}WARNING:{ansi_escapes.reset} target gain in node {uid} is below all available amplifiers min gain: \
amplifier input padding will be assumed, consider increase span fiber padding instead'
)
acceptable_gain_min_list = edfa_list
# filter on gain+power limitation:
# this list checks both the gain and the power requirement
# because of the way .power is calculated in the list
acceptable_power_list = [x for x in acceptable_gain_min_list if x.power > 0]
if len(acceptable_power_list) < 1:
# no amplifier satisfies the required power, so pick the highest power(s):
power_max = max(acceptable_gain_min_list, key=attrgetter('power')).power
# check and pick if other amplifiers may have a similar gain/power
# allow a 0.3dB power range
# this allows to chose an amplifier with a better NF subsequentely
acceptable_power_list = [x for x in acceptable_gain_min_list
if x.power - power_max > -0.3]
# gain and power requirements are resolved,
# =>chose the amp with the best NF among the acceptable ones:
selected_edfa = min(acceptable_power_list, key=attrgetter('nf')) # filter on NF
# check what are the gain and power limitations of this amp
power_reduction = round(min(selected_edfa.power, 0), 2)
if power_reduction < -0.5:
print(
f'{ansi_escapes.red}WARNING:{ansi_escapes.reset} target gain and power in node {uid}\n \
is beyond all available amplifiers capabilities and/or extended_gain_range:\n\
a power reduction of {power_reduction} is applied\n'
)
return selected_edfa.variety, power_reduction
def target_power(network, node, equipment): # get_fiber_dp
if isinstance(node, elements.Roadm):
return 0
SPAN_LOSS_REF = 20
POWER_SLOPE = 0.3
dp_range = list(equipment['Span']['default'].delta_power_range_db)
node_loss = span_loss(network, node)
try:
dp = round2float((node_loss - SPAN_LOSS_REF) * POWER_SLOPE, dp_range[2])
dp = max(dp_range[0], dp)
dp = min(dp_range[1], dp)
except IndexError:
raise ConfigurationError(f'invalid delta_power_range_db definition in eqpt_config[Span]'
f'delta_power_range_db: [lower_bound, upper_bound, step]')
return dp
_fiber_fused_types = (elements.Fused, elements.Fiber)
def prev_node_generator(network, node):
"""fused spans interest:
iterate over all predecessors while they are either Fused or Fibers succeeded by Fused"""
try:
prev_node = next(network.predecessors(node))
except StopIteration:
if isinstance(node, elements.Transceiver):
return
raise NetworkTopologyError(f'Node {node.uid} is not properly connected, please check network topology')
if ((isinstance(prev_node, elements.Fused) and isinstance(node, _fiber_fused_types)) or
(isinstance(prev_node, _fiber_fused_types) and isinstance(node, elements.Fused))):
yield prev_node
yield from prev_node_generator(network, prev_node)
def next_node_generator(network, node):
"""fused spans interest:
iterate over all predecessors while they are either Fused or Fibers preceded by Fused"""
try:
next_node = next(network.successors(node))
except StopIteration:
if isinstance(node, elements.Transceiver):
return
raise NetworkTopologyError(f'Node {node.uid} is not properly connected, please check network topology')
if ((isinstance(next_node, elements.Fused) and isinstance(node, _fiber_fused_types)) or
(isinstance(next_node, _fiber_fused_types) and isinstance(node, elements.Fused))):
yield next_node
yield from next_node_generator(network, next_node)
def span_loss(network, node):
"""Total loss of a span (Fiber and Fused nodes) which contains the given node"""
loss = node.loss if node.passive else 0
loss += sum(n.loss for n in prev_node_generator(network, node))
loss += sum(n.loss for n in next_node_generator(network, node))
return loss
def find_first_node(network, node):
"""Fused node interest:
returns the 1st node at the origin of a succession of fused nodes
(aka no amp in between)"""
this_node = node
for this_node in prev_node_generator(network, node):
pass
return this_node
def find_last_node(network, node):
"""Fused node interest:
returns the last node in a succession of fused nodes
(aka no amp in between)"""
this_node = node
for this_node in next_node_generator(network, node):
pass
return this_node
def set_amplifier_voa(amp, power_target, power_mode):
VOA_MARGIN = 1 # do not maximize the VOA optimization
if amp.out_voa is None:
if power_mode and amp.params.out_voa_auto:
voa = min(amp.params.p_max - power_target,
amp.params.gain_flatmax - amp.effective_gain)
voa = max(round2float(voa, 0.5) - VOA_MARGIN, 0)
amp.delta_p = amp.delta_p + voa
amp.effective_gain = amp.effective_gain + voa
else:
voa = 0 # no output voa optimization in gain mode
amp.out_voa = voa
def set_egress_amplifier(network, this_node, equipment, pref_ch_db, pref_total_db):
""" this node can be a transceiver or a ROADM (same function called in both cases)
"""
power_mode = equipment['Span']['default'].power_mode
next_oms = (n for n in network.successors(this_node) if not isinstance(n, elements.Transceiver))
this_node_degree = {k: v for k, v in this_node.per_degree_pch_out_db.items()} if hasattr(this_node, 'per_degree_pch_out_db') else {}
for oms in next_oms:
# go through all the OMS departing from the ROADM
prev_node = this_node
node = oms
# if isinstance(next_node, elements.Fused): #support ROADM wo egress amp for metro applications
# node = find_last_node(next_node)
# next_node = next(n for n in network.successors(node))
# next_node = find_last_node(next_node)
if node.uid not in this_node_degree:
# if no target power is defined on this degree or no per degree target power is given use the global one
# if target_pch_out_db is not an attribute, then the element must be a transceiver
this_node_degree[node.uid] = getattr(this_node.params, 'target_pch_out_db', 0)
# use the target power on this degree
prev_dp = this_node_degree[node.uid] - pref_ch_db
dp = prev_dp
prev_voa = 0
voa = 0
visited_nodes = []
while not (isinstance(node, elements.Roadm) or isinstance(node, elements.Transceiver)):
# go through all nodes in the OMS (loop until next Roadm instance)
try:
next_node = next(network.successors(node))
except StopIteration:
raise NetworkTopologyError(f'{type(node).__name__} {node.uid} is not properly connected, please check network topology')
visited_nodes.append(node)
if next_node in visited_nodes:
raise NetworkTopologyError(f'Loop detected for {type(node).__name__} {node.uid}, please check network topology')
if isinstance(node, elements.Edfa):
node_loss = span_loss(network, prev_node)
voa = node.out_voa if node.out_voa else 0
if node.delta_p is None:
dp = target_power(network, next_node, equipment) + voa
else:
dp = node.delta_p
if node.effective_gain is None or power_mode:
gain_target = node_loss + dp - prev_dp + prev_voa
else: # gain mode with effective_gain
gain_target = node.effective_gain
dp = prev_dp - node_loss - prev_voa + gain_target
power_target = pref_total_db + dp
if isinstance(prev_node, elements.Fiber):
max_fiber_lineic_loss_for_raman = \
equipment['Span']['default'].max_fiber_lineic_loss_for_raman * 1e-3 # dB/m
raman_allowed = prev_node.params.loss_coef < max_fiber_lineic_loss_for_raman
else:
raman_allowed = False
if node.params.type_variety == '':
if node.variety_list and isinstance(node.variety_list, list):
restrictions = node.variety_list
elif isinstance(prev_node, elements.Roadm) and prev_node.restrictions['booster_variety_list']:
# implementation of restrictions on roadm boosters
restrictions = prev_node.restrictions['booster_variety_list']
elif isinstance(next_node, elements.Roadm) and next_node.restrictions['preamp_variety_list']:
# implementation of restrictions on roadm preamp
restrictions = next_node.restrictions['preamp_variety_list']
else:
restrictions = None
edfa_variety, power_reduction = select_edfa(raman_allowed, gain_target, power_target, equipment, node.uid, restrictions)
extra_params = equipment['Edfa'][edfa_variety]
node.params.update_params(extra_params.__dict__)
dp += power_reduction
gain_target += power_reduction
else:
if node.params.raman and not raman_allowed:
if isinstance(prev_node, elements.Fiber):
print(f'{ansi_escapes.red}WARNING{ansi_escapes.reset}: raman is used in node {node.uid}\n '
'but fiber lineic loss is above threshold\n')
else:
print(f'{ansi_escapes.red}WARNING{ansi_escapes.reset}: raman is used in node {node.uid}\n '
'but previous node is not a fiber\n')
# if variety is imposed by user, and if the gain_target (computed or imposed) is also above
# variety max gain + extended range, then warn that gain > max_gain + extended range
if gain_target - equipment['Edfa'][node.params.type_variety].gain_flatmax - \
equipment['Span']['default'].target_extended_gain > 1e-2:
# 1e-2 to allow a small margin according to round2float min step
print(f'{ansi_escapes.red}WARNING{ansi_escapes.reset}: '
f'WARNING: effective gain in Node {node.uid} is above user '
f'specified amplifier {node.params.type_variety}\n'
f'max flat gain: {equipment["Edfa"][node.params.type_variety].gain_flatmax}dB ; '
f'required gain: {gain_target}dB. Please check amplifier type.')
node.delta_p = dp if power_mode else None
node.effective_gain = gain_target
set_amplifier_voa(node, power_target, power_mode)
prev_dp = dp
prev_voa = voa
prev_node = node
node = next_node
# print(f'{node.uid}')
if isinstance(this_node, elements.Roadm):
this_node.per_degree_pch_out_db = {k: v for k, v in this_node_degree.items()}
def add_roadm_booster(network, roadm):
next_nodes = [n for n in network.successors(roadm)
if not (isinstance(n, elements.Transceiver) or isinstance(n, elements.Fused) or isinstance(n, elements.Edfa))]
# no amplification for fused spans or TRX
for next_node in next_nodes:
network.remove_edge(roadm, next_node)
amp = elements.Edfa(
uid=f'Edfa_booster_{roadm.uid}_to_{next_node.uid}',
params={},
metadata={
'location': {
'latitude': roadm.lat,
'longitude': roadm.lng,
'city': roadm.loc.city,
'region': roadm.loc.region,
}
},
operational={
'gain_target': None,
'tilt_target': 0,
})
network.add_node(amp)
network.add_edge(roadm, amp, weight=0.01)
network.add_edge(amp, next_node, weight=0.01)
def add_roadm_preamp(network, roadm):
prev_nodes = [n for n in network.predecessors(roadm)
if not (isinstance(n, elements.Transceiver) or isinstance(n, elements.Fused) or isinstance(n, elements.Edfa))]
# no amplification for fused spans or TRX
for prev_node in prev_nodes:
network.remove_edge(prev_node, roadm)
amp = elements.Edfa(
uid=f'Edfa_preamp_{roadm.uid}_from_{prev_node.uid}',
params={},
metadata={
'location': {
'latitude': roadm.lat,
'longitude': roadm.lng,
'city': roadm.loc.city,
'region': roadm.loc.region,
}
},
operational={
'gain_target': None,
'tilt_target': 0,
})
network.add_node(amp)
if isinstance(prev_node, elements.Fiber):
edgeweight = prev_node.params.length
else:
edgeweight = 0.01
network.add_edge(prev_node, amp, weight=edgeweight)
network.add_edge(amp, roadm, weight=0.01)
def add_inline_amplifier(network, fiber):
next_node = next(network.successors(fiber))
if isinstance(next_node, elements.Fiber) or isinstance(next_node, elements.RamanFiber):
# no amplification for fused spans or TRX
network.remove_edge(fiber, next_node)
amp = elements.Edfa(
uid=f'Edfa_{fiber.uid}',
params={},
metadata={
'location': {
'latitude': (fiber.lat + next_node.lat) / 2,
'longitude': (fiber.lng + next_node.lng) / 2,
'city': fiber.loc.city,
'region': fiber.loc.region,
}
},
operational={
'gain_target': None,
'tilt_target': 0,
})
network.add_node(amp)
network.add_edge(fiber, amp, weight=fiber.params.length)
network.add_edge(amp, next_node, weight=0.01)
def calculate_new_length(fiber_length, bounds, target_length):
if fiber_length < bounds.stop:
return fiber_length, 1
n_spans2 = int(fiber_length // target_length)
n_spans1 = n_spans2 + 1
length1 = fiber_length / n_spans1
length2 = fiber_length / n_spans2
if (bounds.start <= length1 <= bounds.stop) and not(bounds.start <= length2 <= bounds.stop):
return (length1, n_spans1)
elif (bounds.start <= length2 <= bounds.stop) and not(bounds.start <= length1 <= bounds.stop):
return (length2, n_spans2)
elif target_length - length1 < length2 - target_length:
return (length1, n_spans1)
else:
return (length2, n_spans2)
def split_fiber(network, fiber, bounds, target_length, equipment):
new_length, n_spans = calculate_new_length(fiber.params.length, bounds, target_length)
if n_spans == 1:
return
try:
next_node = next(network.successors(fiber))
prev_node = next(network.predecessors(fiber))
except StopIteration:
raise NetworkTopologyError(f'Fiber {fiber.uid} is not properly connected, please check network topology')
network.remove_node(fiber)
fiber.params.length = new_length
xpos = [prev_node.lng + (next_node.lng - prev_node.lng) * (n + 0.5) / n_spans for n in range(n_spans)]
ypos = [prev_node.lat + (next_node.lat - prev_node.lat) * (n + 0.5) / n_spans for n in range(n_spans)]
for span, lng, lat in zip(range(n_spans), xpos, ypos):
new_span = elements.Fiber(uid=f'{fiber.uid}_({span+1}/{n_spans})',
type_variety=fiber.type_variety,
metadata={
'location': {
'latitude': lat,
'longitude': lng,
'city': fiber.loc.city,
'region': fiber.loc.region,
}
},
params=fiber.params.asdict())
if isinstance(prev_node, elements.Fiber):
edgeweight = prev_node.params.length
else:
edgeweight = 0.01
network.add_edge(prev_node, new_span, weight=edgeweight)
prev_node = new_span
if isinstance(prev_node, elements.Fiber):
edgeweight = prev_node.params.length
else:
edgeweight = 0.01
network.add_edge(prev_node, next_node, weight=edgeweight)
def add_connector_loss(network, fibers, default_con_in, default_con_out, EOL):
for fiber in fibers:
try:
next_node = next(network.successors(fiber))
except StopIteration:
raise NetworkTopologyError(f'Fiber {fiber.uid} is not properly connected, please check network topology')
if fiber.params.con_in is None:
fiber.params.con_in = default_con_in
if fiber.params.con_out is None:
fiber.params.con_out = default_con_out
if not isinstance(next_node, elements.Fused):
fiber.params.con_out += EOL
def add_fiber_padding(network, fibers, padding):
"""last_fibers = (fiber for n in network.nodes()
if not (isinstance(n, elements.Fiber) or isinstance(n, elements.Fused))
for fiber in network.predecessors(n)
if isinstance(fiber, elements.Fiber))"""
for fiber in fibers:
try:
next_node = next(network.successors(fiber))
except StopIteration:
raise NetworkTopologyError(f'Fiber {fiber.uid} is not properly connected, please check network topology')
if isinstance(next_node, elements.Fused):
continue
this_span_loss = span_loss(network, fiber)
if this_span_loss < padding:
# add a padding att_in at the input of the 1st fiber:
# address the case when several fibers are spliced together
first_fiber = find_first_node(network, fiber)
# in order to support no booster , fused might be placed
# just after a roadm: need to check that first_fiber is really a fiber
if isinstance(first_fiber, elements.Fiber):
first_fiber.params.att_in = first_fiber.params.att_in + padding - this_span_loss
def build_network(network, equipment, pref_ch_db, pref_total_db, no_insert_edfas=False):
default_span_data = equipment['Span']['default']
max_length = int(convert_length(default_span_data.max_length, default_span_data.length_units))
min_length = max(int(default_span_data.padding / 0.2 * 1e3), 50_000)
bounds = range(min_length, max_length)
target_length = max(min_length, min(max_length, 90_000))
# set roadm loss for gain_mode before to build network
fibers = [f for f in network.nodes() if isinstance(f, elements.Fiber)]
add_connector_loss(network, fibers, default_span_data.con_in, default_span_data.con_out, default_span_data.EOL)
# don't group split fiber and add amp in the same loop
# =>for code clarity (at the expense of speed):
roadms = [r for r in network.nodes() if isinstance(r, elements.Roadm)]
if not no_insert_edfas:
for fiber in fibers:
split_fiber(network, fiber, bounds, target_length, equipment)
for roadm in roadms:
add_roadm_preamp(network, roadm)
add_roadm_booster(network, roadm)
fibers = [f for f in network.nodes() if isinstance(f, elements.Fiber)]
for fiber in fibers:
add_inline_amplifier(network, fiber)
add_fiber_padding(network, fibers, default_span_data.padding)
for roadm in roadms:
set_egress_amplifier(network, roadm, equipment, pref_ch_db, pref_total_db)
trx = [t for t in network.nodes() if isinstance(t, elements.Transceiver)]
for t in trx:
next_node = next(network.successors(t), None)
if next_node and not isinstance(next_node, elements.Roadm):
set_egress_amplifier(network, t, equipment, 0, pref_total_db)
| 43.795332
| 140
| 0.626384
|
12b41a788b03bd1ea7542820c3d5db1b25379b4b
| 461
|
py
|
Python
|
tests/system/test_base.py
|
chuck-horowitz/pvbeat
|
eae941db478e585dd42ba43cd84ea27b44c9e90a
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
chuck-horowitz/pvbeat
|
eae941db478e585dd42ba43cd84ea27b44c9e90a
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
chuck-horowitz/pvbeat
|
eae941db478e585dd42ba43cd84ea27b44c9e90a
|
[
"Apache-2.0"
] | null | null | null |
from pvbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Pvbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
pvbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("pvbeat is running"))
exit_code = pvbeat_proc.kill_and_wait()
assert exit_code == 0
| 23.05
| 71
| 0.618221
|
44d8dbbb7d1a182d0dd6cb70e30056d4062a01d7
| 8,636
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_load_balancer_backend_address_pools_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_load_balancer_backend_address_pools_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_load_balancer_backend_address_pools_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LoadBalancerBackendAddressPoolListResult"]
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerBackendAddressPoolListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
backend_address_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BackendAddressPool"
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BackendAddressPool"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| 47.977778
| 218
| 0.672186
|
edecd83416736779badde7e006edc43adac3569e
| 54,086
|
py
|
Python
|
astropy/time/formats.py
|
gpdf/astropy
|
c487542611276a3361a38d6c4b3954dcd637f847
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/time/formats.py
|
gpdf/astropy
|
c487542611276a3361a38d6c4b3954dcd637f847
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/time/formats.py
|
gpdf/astropy
|
c487542611276a3361a38d6c4b3954dcd637f847
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import fnmatch
import time
import re
import datetime
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy import _erfa as erfa
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64', 'TimeYMDHMS']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = (val1.dtype.kind == 'f' and val1.dtype.itemsize == 8 and np.all(np.isfinite(val1)) or
val1.size == 0)
ok2 = val2 is None or (val2.dtype.kind == 'f' and val2.dtype.itemsize == 8 and
not np.any(np.isinf(val2))) or val2.size == 0
if not (ok1 and ok2):
raise TypeError('Input values for {} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return self.mask_if_needed(time_from_epoch)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=7*[None] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = 'ymdhms'
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError('val2 must be None for ymdhms format')
ymdhms = ['year', 'month', 'day', 'hour', 'minute', 'second']
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (val1.dtype.kind == 'O' and
val1.shape == () and
isinstance(val1.item(), dict)):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {name: value for name, value
in zip(names, np.broadcast_arrays(*values))}
else:
raise ValueError('input must be dict or table-like')
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[:len(names)]
def comma_repr(vals):
return ', '.join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(f'{comma_repr(bad_names)} not allowed as YMDHMS key name(s)')
if set(names) != set(required_names):
raise ValueError(f'for {len(names)} input key names '
f'you must supply {comma_repr(required_names)}')
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
val1['year'],
val1.get('month', 1),
val1.get('day', 1),
val1.get('hour', 0),
val1.get('minute', 0),
val1.get('second', 0))
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9,
self.jd1, self.jd2_filled)
out = np.empty(self.jd1.shape, dtype=[('year', 'i4'),
('month', 'i4'),
('day', 'i4'),
('hour', 'i4'),
('minute', 'i4'),
('second', 'f8')])
out['year'] = iys
out['month'] = ims
out['day'] = ids
out['hour'] = ihmsfs['h']
out['minute'] = ihmsfs['m']
out['second'] = ihmsfs['s'] + ihmsfs['f'] * 10**(-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ('S', 'U') and val1.size:
raise TypeError('Input values for {} class must be strings'
.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs],
flags=['zerosize_ok']):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError(f'No subformats match {pattern}')
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == 'M':
if val1.size > 0:
raise TypeError('Input values for {} class must be '
'datetime64 objects'.format(self.name))
else:
val1 = np.array([], 'datetime64[D]')
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double],
flags=['zerosize_ok'])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {} does not match {} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime.timedelta objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [np.double])
for val, sec in iterator:
sec[...] = val.item().total_seconds()
self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,
divisor=erfa.DAYSEC)
@property
def value(self):
iterator = np.nditer([self.jd1 + self.jd2, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [object])
for jd, out in iterator:
out[...] = datetime.timedelta(days=jd.item())
return self.mask_if_needed(iterator.operands[-1])
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
| 37.533657
| 99
| 0.570443
|
91e5d82bb4661b4cfce83a379e752118919e4e1f
| 194
|
py
|
Python
|
tests/gis_tests/geoapp/sitemaps.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/gis_tests/geoapp/sitemaps.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/gis_tests/geoapp/sitemaps.py
|
mustafa0x/django
|
d7394cfa13a4d1a02356e3a83e10ec100fbb9948
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
from django.contrib.gis.sitemaps import KMLSitemap, KMZSitemap
from .models import City, Country
sitemaps = {
'kml': KMLSitemap([City, Country]),
'kmz': KMZSitemap([City, Country]),
}
| 21.555556
| 62
| 0.706186
|
24af79ac020a87240e8b1adfaa5838e7c9b29048
| 536
|
py
|
Python
|
library/lib_study/08_rlcompleter.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
library/lib_study/08_rlcompleter.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
library/lib_study/08_rlcompleter.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
# rlcompeleter 通过补全有效的 Python 标识符和关键字定义了一个适用于 readline 模块的补全函数。
# rlcompleter 模块是为了使用 Python 的 交互模式 而设计的。 除非 Python 是通过 -S 选项运行, 这个模块总是自动地被导入且配置 (参见 Readline configuration)。
import rlcompleter
"""
>>> import rlcompleter
>>> import readline
>>> readline.parse_and_bind("tab: complete")
>>> readline. <TAB PRESSED>
readline.__doc__ readline.get_line_buffer( readline.read_init_file(
readline.__file__ readline.insert_text( readline.set_completer(
readline.__name__ readline.parse_and_bind(
>>> readline.
"""
| 41.230769
| 109
| 0.751866
|
0f5f7d85b30116fa99436eec46adf19b90c19803
| 239
|
py
|
Python
|
pinakes/main/catalog/tests/unit/test_tenants.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 2
|
2022-03-17T18:53:58.000Z
|
2022-03-17T22:04:22.000Z
|
pinakes/main/catalog/tests/unit/test_tenants.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 9
|
2022-03-18T08:22:57.000Z
|
2022-03-30T17:14:49.000Z
|
pinakes/main/catalog/tests/unit/test_tenants.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 7
|
2022-03-17T22:03:08.000Z
|
2022-03-28T21:28:34.000Z
|
import pytest
from pinakes.main.tests.factories import TenantFactory
class TestTenants:
@pytest.mark.django_db
def test_tenant(self):
tenant = TenantFactory()
assert tenant.external_tenant.startswith("external")
| 21.727273
| 60
| 0.740586
|
74189c7518112af502d83673dc8d20f20f02de84
| 1,377
|
py
|
Python
|
accounts/models.py
|
mariuslihet/CRM
|
1323dc358a016d027717466f946ffd3af74897f2
|
[
"MIT"
] | 2
|
2018-07-25T13:11:19.000Z
|
2019-04-19T03:45:40.000Z
|
accounts/models.py
|
mariuslihet/CRM
|
1323dc358a016d027717466f946ffd3af74897f2
|
[
"MIT"
] | 2
|
2020-06-05T19:05:09.000Z
|
2021-06-10T21:08:49.000Z
|
accounts/models.py
|
mariuslihet/CRM
|
1323dc358a016d027717466f946ffd3af74897f2
|
[
"MIT"
] | 1
|
2019-05-07T04:30:36.000Z
|
2019-05-07T04:30:36.000Z
|
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from common.models import User, Address, Team
from common.utils import INDCHOICES
class Account(models.Model):
name = models.CharField(pgettext_lazy("Name of Account", "Name"), max_length=64)
email = models.EmailField()
phone = models.CharField(max_length=20)
industry = models.CharField(_("Industry Type"), max_length=255, choices=INDCHOICES, blank=True, null=True)
billing_address = models.ForeignKey(Address, related_name='account_billing_address', on_delete=models.CASCADE, blank=True, null=True)
shipping_address = models.ForeignKey(Address, related_name='account_shipping_address', on_delete=models.CASCADE, blank=True, null=True)
website = models.URLField(_("Website"), blank=True, null=True)
description = models.TextField(blank=True, null=True)
assigned_to = models.ManyToManyField(User, related_name='account_assigned_to')
teams = models.ManyToManyField(Team)
created_by = models.ForeignKey(User, related_name='account_created_by', on_delete=models.CASCADE)
created_on = models.DateTimeField(_("Created on"), auto_now_add=True)
is_active = models.BooleanField(default=False)
def __str__(self):
return self.name
class Meta:
ordering = ['-created_on']
| 47.482759
| 139
| 0.761075
|
7a2abfe124f44a4e3ce366ea42f62ef090a72e00
| 509
|
py
|
Python
|
setup.py
|
ONS-OpenData/cmd-databaker-utils
|
a5a80e2da460d85558d814c051c0e40f035ef0a2
|
[
"MIT"
] | null | null | null |
setup.py
|
ONS-OpenData/cmd-databaker-utils
|
a5a80e2da460d85558d814c051c0e40f035ef0a2
|
[
"MIT"
] | null | null | null |
setup.py
|
ONS-OpenData/cmd-databaker-utils
|
a5a80e2da460d85558d814c051c0e40f035ef0a2
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'databakerUtils',
packages = ['databakerUtils'],
version = '0.1.49',
description = 'Some additional utilities for using databaker within ONS digital publishing',
author = 'Michael Adams',
author_email = 'michael.adams@ons.gov.uk',
url = 'https://github.com/ONS-OpenData/databakerUtils',
download_url = 'https://github.com/ONS-OpenData/cmd-databaker-utils/archive/0.1.tar.gz',
keywords = ['databaker', 'addon', 'utility'],
classifiers = [],
)
| 36.357143
| 94
| 0.709234
|
d469b508fd478b67fa7fe25acc068ce2dc1fab9f
| 18,735
|
py
|
Python
|
sympy/functions/elementary/tests/test_integers.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/functions/elementary/tests/test_integers.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/functions/elementary/tests/test_integers.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.core.numbers import (E, Float, I, Rational, nan, oo, pi, zoo)
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.integers import (ceiling, floor, frac)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin
from sympy.core.expr import unchanged
from sympy.testing.pytest import XFAIL
x = Symbol('x')
i = Symbol('i', imaginary=True)
y = Symbol('y', real=True)
k, n = symbols('k,n', integer=True)
def test_floor():
assert floor(nan) is nan
assert floor(oo) is oo
assert floor(-oo) is -oo
assert floor(zoo) is zoo
assert floor(0) == 0
assert floor(1) == 1
assert floor(-1) == -1
assert floor(E) == 2
assert floor(-E) == -3
assert floor(2*E) == 5
assert floor(-2*E) == -6
assert floor(pi) == 3
assert floor(-pi) == -4
assert floor(S.Half) == 0
assert floor(Rational(-1, 2)) == -1
assert floor(Rational(7, 3)) == 2
assert floor(Rational(-7, 3)) == -3
assert floor(-Rational(7, 3)) == -3
assert floor(Float(17.0)) == 17
assert floor(-Float(17.0)) == -17
assert floor(Float(7.69)) == 7
assert floor(-Float(7.69)) == -8
assert floor(I) == I
assert floor(-I) == -I
e = floor(i)
assert e.func is floor and e.args[0] == i
assert floor(oo*I) == oo*I
assert floor(-oo*I) == -oo*I
assert floor(exp(I*pi/4)*oo) == exp(I*pi/4)*oo
assert floor(2*I) == 2*I
assert floor(-2*I) == -2*I
assert floor(I/2) == 0
assert floor(-I/2) == -I
assert floor(E + 17) == 19
assert floor(pi + 2) == 5
assert floor(E + pi) == 5
assert floor(I + pi) == 3 + I
assert floor(floor(pi)) == 3
assert floor(floor(y)) == floor(y)
assert floor(floor(x)) == floor(x)
assert unchanged(floor, x)
assert unchanged(floor, 2*x)
assert unchanged(floor, k*x)
assert floor(k) == k
assert floor(2*k) == 2*k
assert floor(k*n) == k*n
assert unchanged(floor, k/2)
assert unchanged(floor, x + y)
assert floor(x + 3) == floor(x) + 3
assert floor(x + k) == floor(x) + k
assert floor(y + 3) == floor(y) + 3
assert floor(y + k) == floor(y) + k
assert floor(3 + I*y + pi) == 6 + floor(y)*I
assert floor(k + n) == k + n
assert unchanged(floor, x*I)
assert floor(k*I) == k*I
assert floor(Rational(23, 10) - E*I) == 2 - 3*I
assert floor(sin(1)) == 0
assert floor(sin(-1)) == -1
assert floor(exp(2)) == 7
assert floor(log(8)/log(2)) != 2
assert int(floor(log(8)/log(2)).evalf(chop=True)) == 3
assert floor(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336800
assert (floor(y) < y) == False
assert (floor(y) <= y) == True
assert (floor(y) > y) == False
assert (floor(y) >= y) == False
assert (floor(x) <= x).is_Relational # x could be non-real
assert (floor(x) > x).is_Relational
assert (floor(x) <= y).is_Relational # arg is not same as rhs
assert (floor(x) > y).is_Relational
assert (floor(y) <= oo) == True
assert (floor(y) < oo) == True
assert (floor(y) >= -oo) == True
assert (floor(y) > -oo) == True
assert floor(y).rewrite(frac) == y - frac(y)
assert floor(y).rewrite(ceiling) == -ceiling(-y)
assert floor(y).rewrite(frac).subs(y, -pi) == floor(-pi)
assert floor(y).rewrite(frac).subs(y, E) == floor(E)
assert floor(y).rewrite(ceiling).subs(y, E) == -ceiling(-E)
assert floor(y).rewrite(ceiling).subs(y, -pi) == -ceiling(pi)
assert Eq(floor(y), y - frac(y))
assert Eq(floor(y), -ceiling(-y))
neg = Symbol('neg', negative=True)
nn = Symbol('nn', nonnegative=True)
pos = Symbol('pos', positive=True)
np = Symbol('np', nonpositive=True)
assert (floor(neg) < 0) == True
assert (floor(neg) <= 0) == True
assert (floor(neg) > 0) == False
assert (floor(neg) >= 0) == False
assert (floor(neg) <= -1) == True
assert (floor(neg) >= -3) == (neg >= -3)
assert (floor(neg) < 5) == (neg < 5)
assert (floor(nn) < 0) == False
assert (floor(nn) >= 0) == True
assert (floor(pos) < 0) == False
assert (floor(pos) <= 0) == (pos < 1)
assert (floor(pos) > 0) == (pos >= 1)
assert (floor(pos) >= 0) == True
assert (floor(pos) >= 3) == (pos >= 3)
assert (floor(np) <= 0) == True
assert (floor(np) > 0) == False
assert floor(neg).is_negative == True
assert floor(neg).is_nonnegative == False
assert floor(nn).is_negative == False
assert floor(nn).is_nonnegative == True
assert floor(pos).is_negative == False
assert floor(pos).is_nonnegative == True
assert floor(np).is_negative is None
assert floor(np).is_nonnegative is None
assert (floor(7, evaluate=False) >= 7) == True
assert (floor(7, evaluate=False) > 7) == False
assert (floor(7, evaluate=False) <= 7) == True
assert (floor(7, evaluate=False) < 7) == False
assert (floor(7, evaluate=False) >= 6) == True
assert (floor(7, evaluate=False) > 6) == True
assert (floor(7, evaluate=False) <= 6) == False
assert (floor(7, evaluate=False) < 6) == False
assert (floor(7, evaluate=False) >= 8) == False
assert (floor(7, evaluate=False) > 8) == False
assert (floor(7, evaluate=False) <= 8) == True
assert (floor(7, evaluate=False) < 8) == True
assert (floor(x) <= 5.5) == Le(floor(x), 5.5, evaluate=False)
assert (floor(x) >= -3.2) == Ge(floor(x), -3.2, evaluate=False)
assert (floor(x) < 2.9) == Lt(floor(x), 2.9, evaluate=False)
assert (floor(x) > -1.7) == Gt(floor(x), -1.7, evaluate=False)
assert (floor(y) <= 5.5) == (y < 6)
assert (floor(y) >= -3.2) == (y >= -3)
assert (floor(y) < 2.9) == (y < 3)
assert (floor(y) > -1.7) == (y >= -1)
assert (floor(y) <= n) == (y < n + 1)
assert (floor(y) >= n) == (y >= n)
assert (floor(y) < n) == (y < n)
assert (floor(y) > n) == (y >= n + 1)
def test_ceiling():
assert ceiling(nan) is nan
assert ceiling(oo) is oo
assert ceiling(-oo) is -oo
assert ceiling(zoo) is zoo
assert ceiling(0) == 0
assert ceiling(1) == 1
assert ceiling(-1) == -1
assert ceiling(E) == 3
assert ceiling(-E) == -2
assert ceiling(2*E) == 6
assert ceiling(-2*E) == -5
assert ceiling(pi) == 4
assert ceiling(-pi) == -3
assert ceiling(S.Half) == 1
assert ceiling(Rational(-1, 2)) == 0
assert ceiling(Rational(7, 3)) == 3
assert ceiling(-Rational(7, 3)) == -2
assert ceiling(Float(17.0)) == 17
assert ceiling(-Float(17.0)) == -17
assert ceiling(Float(7.69)) == 8
assert ceiling(-Float(7.69)) == -7
assert ceiling(I) == I
assert ceiling(-I) == -I
e = ceiling(i)
assert e.func is ceiling and e.args[0] == i
assert ceiling(oo*I) == oo*I
assert ceiling(-oo*I) == -oo*I
assert ceiling(exp(I*pi/4)*oo) == exp(I*pi/4)*oo
assert ceiling(2*I) == 2*I
assert ceiling(-2*I) == -2*I
assert ceiling(I/2) == I
assert ceiling(-I/2) == 0
assert ceiling(E + 17) == 20
assert ceiling(pi + 2) == 6
assert ceiling(E + pi) == 6
assert ceiling(I + pi) == I + 4
assert ceiling(ceiling(pi)) == 4
assert ceiling(ceiling(y)) == ceiling(y)
assert ceiling(ceiling(x)) == ceiling(x)
assert unchanged(ceiling, x)
assert unchanged(ceiling, 2*x)
assert unchanged(ceiling, k*x)
assert ceiling(k) == k
assert ceiling(2*k) == 2*k
assert ceiling(k*n) == k*n
assert unchanged(ceiling, k/2)
assert unchanged(ceiling, x + y)
assert ceiling(x + 3) == ceiling(x) + 3
assert ceiling(x + k) == ceiling(x) + k
assert ceiling(y + 3) == ceiling(y) + 3
assert ceiling(y + k) == ceiling(y) + k
assert ceiling(3 + pi + y*I) == 7 + ceiling(y)*I
assert ceiling(k + n) == k + n
assert unchanged(ceiling, x*I)
assert ceiling(k*I) == k*I
assert ceiling(Rational(23, 10) - E*I) == 3 - 2*I
assert ceiling(sin(1)) == 1
assert ceiling(sin(-1)) == 0
assert ceiling(exp(2)) == 8
assert ceiling(-log(8)/log(2)) != -2
assert int(ceiling(-log(8)/log(2)).evalf(chop=True)) == -3
assert ceiling(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336801
assert (ceiling(y) >= y) == True
assert (ceiling(y) > y) == False
assert (ceiling(y) < y) == False
assert (ceiling(y) <= y) == False
assert (ceiling(x) >= x).is_Relational # x could be non-real
assert (ceiling(x) < x).is_Relational
assert (ceiling(x) >= y).is_Relational # arg is not same as rhs
assert (ceiling(x) < y).is_Relational
assert (ceiling(y) >= -oo) == True
assert (ceiling(y) > -oo) == True
assert (ceiling(y) <= oo) == True
assert (ceiling(y) < oo) == True
assert ceiling(y).rewrite(floor) == -floor(-y)
assert ceiling(y).rewrite(frac) == y + frac(-y)
assert ceiling(y).rewrite(floor).subs(y, -pi) == -floor(pi)
assert ceiling(y).rewrite(floor).subs(y, E) == -floor(-E)
assert ceiling(y).rewrite(frac).subs(y, pi) == ceiling(pi)
assert ceiling(y).rewrite(frac).subs(y, -E) == ceiling(-E)
assert Eq(ceiling(y), y + frac(-y))
assert Eq(ceiling(y), -floor(-y))
neg = Symbol('neg', negative=True)
nn = Symbol('nn', nonnegative=True)
pos = Symbol('pos', positive=True)
np = Symbol('np', nonpositive=True)
assert (ceiling(neg) <= 0) == True
assert (ceiling(neg) < 0) == (neg <= -1)
assert (ceiling(neg) > 0) == False
assert (ceiling(neg) >= 0) == (neg > -1)
assert (ceiling(neg) > -3) == (neg > -3)
assert (ceiling(neg) <= 10) == (neg <= 10)
assert (ceiling(nn) < 0) == False
assert (ceiling(nn) >= 0) == True
assert (ceiling(pos) < 0) == False
assert (ceiling(pos) <= 0) == False
assert (ceiling(pos) > 0) == True
assert (ceiling(pos) >= 0) == True
assert (ceiling(pos) >= 1) == True
assert (ceiling(pos) > 5) == (pos > 5)
assert (ceiling(np) <= 0) == True
assert (ceiling(np) > 0) == False
assert ceiling(neg).is_positive == False
assert ceiling(neg).is_nonpositive == True
assert ceiling(nn).is_positive is None
assert ceiling(nn).is_nonpositive is None
assert ceiling(pos).is_positive == True
assert ceiling(pos).is_nonpositive == False
assert ceiling(np).is_positive == False
assert ceiling(np).is_nonpositive == True
assert (ceiling(7, evaluate=False) >= 7) == True
assert (ceiling(7, evaluate=False) > 7) == False
assert (ceiling(7, evaluate=False) <= 7) == True
assert (ceiling(7, evaluate=False) < 7) == False
assert (ceiling(7, evaluate=False) >= 6) == True
assert (ceiling(7, evaluate=False) > 6) == True
assert (ceiling(7, evaluate=False) <= 6) == False
assert (ceiling(7, evaluate=False) < 6) == False
assert (ceiling(7, evaluate=False) >= 8) == False
assert (ceiling(7, evaluate=False) > 8) == False
assert (ceiling(7, evaluate=False) <= 8) == True
assert (ceiling(7, evaluate=False) < 8) == True
assert (ceiling(x) <= 5.5) == Le(ceiling(x), 5.5, evaluate=False)
assert (ceiling(x) >= -3.2) == Ge(ceiling(x), -3.2, evaluate=False)
assert (ceiling(x) < 2.9) == Lt(ceiling(x), 2.9, evaluate=False)
assert (ceiling(x) > -1.7) == Gt(ceiling(x), -1.7, evaluate=False)
assert (ceiling(y) <= 5.5) == (y <= 5)
assert (ceiling(y) >= -3.2) == (y > -4)
assert (ceiling(y) < 2.9) == (y <= 2)
assert (ceiling(y) > -1.7) == (y > -2)
assert (ceiling(y) <= n) == (y <= n)
assert (ceiling(y) >= n) == (y > n - 1)
assert (ceiling(y) < n) == (y <= n - 1)
assert (ceiling(y) > n) == (y > n)
def test_frac():
assert isinstance(frac(x), frac)
assert frac(oo) == AccumBounds(0, 1)
assert frac(-oo) == AccumBounds(0, 1)
assert frac(zoo) is nan
assert frac(n) == 0
assert frac(nan) is nan
assert frac(Rational(4, 3)) == Rational(1, 3)
assert frac(-Rational(4, 3)) == Rational(2, 3)
assert frac(Rational(-4, 3)) == Rational(2, 3)
r = Symbol('r', real=True)
assert frac(I*r) == I*frac(r)
assert frac(1 + I*r) == I*frac(r)
assert frac(0.5 + I*r) == 0.5 + I*frac(r)
assert frac(n + I*r) == I*frac(r)
assert frac(n + I*k) == 0
assert unchanged(frac, x + I*x)
assert frac(x + I*n) == frac(x)
assert frac(x).rewrite(floor) == x - floor(x)
assert frac(x).rewrite(ceiling) == x + ceiling(-x)
assert frac(y).rewrite(floor).subs(y, pi) == frac(pi)
assert frac(y).rewrite(floor).subs(y, -E) == frac(-E)
assert frac(y).rewrite(ceiling).subs(y, -pi) == frac(-pi)
assert frac(y).rewrite(ceiling).subs(y, E) == frac(E)
assert Eq(frac(y), y - floor(y))
assert Eq(frac(y), y + ceiling(-y))
r = Symbol('r', real=True)
p_i = Symbol('p_i', integer=True, positive=True)
n_i = Symbol('p_i', integer=True, negative=True)
np_i = Symbol('np_i', integer=True, nonpositive=True)
nn_i = Symbol('nn_i', integer=True, nonnegative=True)
p_r = Symbol('p_r', positive=True)
n_r = Symbol('n_r', negative=True)
np_r = Symbol('np_r', real=True, nonpositive=True)
nn_r = Symbol('nn_r', real=True, nonnegative=True)
# Real frac argument, integer rhs
assert frac(r) <= p_i
assert not frac(r) <= n_i
assert (frac(r) <= np_i).has(Le)
assert (frac(r) <= nn_i).has(Le)
assert frac(r) < p_i
assert not frac(r) < n_i
assert not frac(r) < np_i
assert (frac(r) < nn_i).has(Lt)
assert not frac(r) >= p_i
assert frac(r) >= n_i
assert frac(r) >= np_i
assert (frac(r) >= nn_i).has(Ge)
assert not frac(r) > p_i
assert frac(r) > n_i
assert (frac(r) > np_i).has(Gt)
assert (frac(r) > nn_i).has(Gt)
assert not Eq(frac(r), p_i)
assert not Eq(frac(r), n_i)
assert Eq(frac(r), np_i).has(Eq)
assert Eq(frac(r), nn_i).has(Eq)
assert Ne(frac(r), p_i)
assert Ne(frac(r), n_i)
assert Ne(frac(r), np_i).has(Ne)
assert Ne(frac(r), nn_i).has(Ne)
# Real frac argument, real rhs
assert (frac(r) <= p_r).has(Le)
assert not frac(r) <= n_r
assert (frac(r) <= np_r).has(Le)
assert (frac(r) <= nn_r).has(Le)
assert (frac(r) < p_r).has(Lt)
assert not frac(r) < n_r
assert not frac(r) < np_r
assert (frac(r) < nn_r).has(Lt)
assert (frac(r) >= p_r).has(Ge)
assert frac(r) >= n_r
assert frac(r) >= np_r
assert (frac(r) >= nn_r).has(Ge)
assert (frac(r) > p_r).has(Gt)
assert frac(r) > n_r
assert (frac(r) > np_r).has(Gt)
assert (frac(r) > nn_r).has(Gt)
assert not Eq(frac(r), n_r)
assert Eq(frac(r), p_r).has(Eq)
assert Eq(frac(r), np_r).has(Eq)
assert Eq(frac(r), nn_r).has(Eq)
assert Ne(frac(r), p_r).has(Ne)
assert Ne(frac(r), n_r)
assert Ne(frac(r), np_r).has(Ne)
assert Ne(frac(r), nn_r).has(Ne)
# Real frac argument, +/- oo rhs
assert frac(r) < oo
assert frac(r) <= oo
assert not frac(r) > oo
assert not frac(r) >= oo
assert not frac(r) < -oo
assert not frac(r) <= -oo
assert frac(r) > -oo
assert frac(r) >= -oo
assert frac(r) < 1
assert frac(r) <= 1
assert not frac(r) > 1
assert not frac(r) >= 1
assert not frac(r) < 0
assert (frac(r) <= 0).has(Le)
assert (frac(r) > 0).has(Gt)
assert frac(r) >= 0
# Some test for numbers
assert frac(r) <= sqrt(2)
assert (frac(r) <= sqrt(3) - sqrt(2)).has(Le)
assert not frac(r) <= sqrt(2) - sqrt(3)
assert not frac(r) >= sqrt(2)
assert (frac(r) >= sqrt(3) - sqrt(2)).has(Ge)
assert frac(r) >= sqrt(2) - sqrt(3)
assert not Eq(frac(r), sqrt(2))
assert Eq(frac(r), sqrt(3) - sqrt(2)).has(Eq)
assert not Eq(frac(r), sqrt(2) - sqrt(3))
assert Ne(frac(r), sqrt(2))
assert Ne(frac(r), sqrt(3) - sqrt(2)).has(Ne)
assert Ne(frac(r), sqrt(2) - sqrt(3))
assert frac(p_i, evaluate=False).is_zero
assert frac(p_i, evaluate=False).is_finite
assert frac(p_i, evaluate=False).is_integer
assert frac(p_i, evaluate=False).is_real
assert frac(r).is_finite
assert frac(r).is_real
assert frac(r).is_zero is None
assert frac(r).is_integer is None
assert frac(oo).is_finite
assert frac(oo).is_real
def test_series():
x, y = symbols('x,y')
assert floor(x).nseries(x, y, 100) == floor(y)
assert ceiling(x).nseries(x, y, 100) == ceiling(y)
assert floor(x).nseries(x, pi, 100) == 3
assert ceiling(x).nseries(x, pi, 100) == 4
assert floor(x).nseries(x, 0, 100) == 0
assert ceiling(x).nseries(x, 0, 100) == 1
assert floor(-x).nseries(x, 0, 100) == -1
assert ceiling(-x).nseries(x, 0, 100) == 0
@XFAIL
def test_issue_4149():
assert floor(3 + pi*I + y*I) == 3 + floor(pi + y)*I
assert floor(3*I + pi*I + y*I) == floor(3 + pi + y)*I
assert floor(3 + E + pi*I + y*I) == 5 + floor(pi + y)*I
def test_issue_21651():
k = Symbol('k', positive=True, integer=True)
exp = 2*2**(-k)
assert isinstance(floor(exp), floor)
def test_issue_11207():
assert floor(floor(x)) == floor(x)
assert floor(ceiling(x)) == ceiling(x)
assert ceiling(floor(x)) == floor(x)
assert ceiling(ceiling(x)) == ceiling(x)
def test_nested_floor_ceiling():
assert floor(-floor(ceiling(x**3)/y)) == -floor(ceiling(x**3)/y)
assert ceiling(-floor(ceiling(x**3)/y)) == -floor(ceiling(x**3)/y)
assert floor(ceiling(-floor(x**Rational(7, 2)/y))) == -floor(x**Rational(7, 2)/y)
assert -ceiling(-ceiling(floor(x)/y)) == ceiling(floor(x)/y)
def test_issue_18689():
assert floor(floor(floor(x)) + 3) == floor(x) + 3
assert ceiling(ceiling(ceiling(x)) + 1) == ceiling(x) + 1
assert ceiling(ceiling(floor(x)) + 3) == floor(x) + 3
def test_issue_18421():
assert floor(float(0)) is S.Zero
assert ceiling(float(0)) is S.Zero
| 32.025641
| 86
| 0.56632
|
7dd9d4a23a94b2e45ea9ff84eb289e91d5a86429
| 11,085
|
py
|
Python
|
fairseq/logging/progress_bar.py
|
gooran/FormalityStyleTransfer
|
6c8c92abf54a536a2741f997b857ecc5f0d937eb
|
[
"MIT"
] | 14
|
2020-08-15T09:26:41.000Z
|
2022-03-18T05:24:38.000Z
|
fairseq/logging/progress_bar.py
|
gooran/FormalityStyleTransfer
|
6c8c92abf54a536a2741f997b857ecc5f0d937eb
|
[
"MIT"
] | 4
|
2020-12-04T09:15:35.000Z
|
2022-01-04T09:10:46.000Z
|
fairseq/logging/progress_bar.py
|
gooran/FormalityStyleTransfer
|
6c8c92abf54a536a2741f997b857ecc5f0d937eb
|
[
"MIT"
] | 4
|
2020-05-27T17:20:29.000Z
|
2021-11-11T11:43:35.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from numbers import Number
from typing import Optional
import torch
from .meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = 'tqdm',
):
if log_format is None:
log_format = default_log_format
if log_format == 'tqdm' and not sys.stderr.isatty():
log_format = 'simple'
if log_format == 'json':
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'none':
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == 'simple':
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'tqdm':
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
return bar
def build_progress_bar(
args,
iterator,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default: str = 'tqdm',
no_progress_bar: str = 'none',
):
"""Legacy wrapper that takes an argparse.Namespace."""
if getattr(args, 'no_progress_bar', False):
default = no_progress_bar
if getattr(args, 'distributed_rank', 0) == 0:
tensorboard_logdir = getattr(args, 'tensorboard_logdir', None)
else:
tensorboard_logdir = None
return progress_bar(
iterator,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=tensorboard_logdir,
default_log_format=default,
)
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
update = (
self.epoch - 1 + float(i / size)
if self.epoch is not None
else None
)
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
with rename_logger(logger, self.tag):
logger.info(json.dumps(stats))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = stats
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class NoopProgressBar(BaseProgressBar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class SimpleProgressBar(BaseProgressBar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
postfix = self._str_commas(self.stats)
with rename_logger(logger, self.tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = self._format_stats(stats)
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class TqdmProgressBar(BaseProgressBar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
try:
from tensorboardX import SummaryWriter
_tensorboard_writers = {}
except ImportError:
SummaryWriter = None
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard or required dependencies not found, please see README "
"for using tensorboard. (e.g. pip install tensorboardX)"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
writer.flush()
| 31.402266
| 93
| 0.611367
|
90bbf40288b8ec922b143c8e0203e8d00881a48d
| 233
|
py
|
Python
|
pmdarima/preprocessing/endog/__init__.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 736
|
2019-12-02T01:33:31.000Z
|
2022-03-31T21:45:29.000Z
|
pmdarima/preprocessing/endog/__init__.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 186
|
2019-12-01T18:01:33.000Z
|
2022-03-31T18:27:56.000Z
|
pmdarima/preprocessing/endog/__init__.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 126
|
2019-12-07T04:03:19.000Z
|
2022-03-31T17:40:14.000Z
|
# -*- coding: utf-8 -*-
from .boxcox import *
from .log import *
# don't want to accidentally hoist `base` to top-level, since preprocessing has
# its own base
__all__ = [s for s in dir() if not (s.startswith("_") or s == 'base')]
| 25.888889
| 79
| 0.652361
|
3b2acc98435ce9e0fbf0f4182de4b847aeec8ac8
| 600
|
py
|
Python
|
article/migrations/0006_auto_20180412_2109.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | null | null | null |
article/migrations/0006_auto_20180412_2109.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | null | null | null |
article/migrations/0006_auto_20180412_2109.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | 3
|
2018-01-23T19:08:22.000Z
|
2018-09-25T06:47:24.000Z
|
# Generated by Django 2.0.2 on 2018-04-12 19:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0005_auto_20180412_1836'),
]
operations = [
migrations.DeleteModel(
name='Comments',
),
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(db_column='slug', default=None, on_delete=django.db.models.deletion.CASCADE, to='article.Article'),
),
]
| 26.086957
| 136
| 0.601667
|
6461464a64eca62369e4ea3822c262a2fce5bcb3
| 4,355
|
py
|
Python
|
src/fidibot.py
|
freestyl3r/fidibot
|
b40f7663a1f8ff6be8fb19c63b835a3b01fea220
|
[
"BSD-2-Clause"
] | null | null | null |
src/fidibot.py
|
freestyl3r/fidibot
|
b40f7663a1f8ff6be8fb19c63b835a3b01fea220
|
[
"BSD-2-Clause"
] | null | null | null |
src/fidibot.py
|
freestyl3r/fidibot
|
b40f7663a1f8ff6be8fb19c63b835a3b01fea220
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
#
# Author: Nick Raptis <airscorp@gmail.com>
import argparse
import irc.bot
from irc.strings import lower
from logsetup import setup_logging
from modules import activate_modules
from alternatives import alternatives, read_files, _
import logging
log = logging.getLogger(__name__)
# set unicode decoding to replace errors
from irc.buffer import DecodingLineBuffer as DLB
DLB.errors = 'replace'
class FidiBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, server, port=6667, realname=None, password=''):
if channel[0] != "#":
# make sure channel starts with a #
channel = "#" + channel
self.channel = channel
self.realname = realname if realname else nickname
self.password = password
self.identified = False
self.alternatives = alternatives
# load modules
active_modules, active_alternatives = activate_modules()
self.modules = [m(self) for m in active_modules]
self.alternatives.merge_with(active_alternatives)
# add alternatives from directory
self.alternatives.merge_with(read_files())
self.alternatives.clean_duplicates()
super(FidiBot, self).__init__([(server, port)], nickname, realname)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
def on_privnotice(self, c, e):
if e.source.nick == "NickServ":
if "NickServ identify" in e.arguments[0]:
log.debug("NickServ asks us to identify")
if self.password:
log.info("Sending password to NickServ")
c.privmsg("NickServ", "identify " + self.password)
else:
log.warning("We were asked to identify but we have no password")
elif "You are now identified" in e.arguments[0]:
log.debug("We are now identified with NickServ")
self.identified = True
elif "Invalid password" in e.arguments[0]:
log.error("Invalid password! Check your settings!")
def on_privmsg(self, c, e):
# first try to defer the message to the active modules
for m in self.modules:
if m.on_privmsg(c, e):
return
# default behaviour if no module processes the message.
command = lower(e.arguments[0].split(" ", 1)[0])
if "fidi" in command:
# maybe someone is calling us by name?
c.privmsg(e.source.nick, _("You don't have to call me by name in private"))
return
log.debug("Failed to understand private message '%s' from user %s",
e.arguments[0], e.source.nick)
c.privmsg(e.source.nick, _("I don't understand %s") % command)
def on_pubmsg(self, c, e):
# first try to defer the message to the active modules
for m in self.modules:
if m.on_pubmsg(c, e):
return
# default behaviour if no module processes the message.
if "fidi" in lower(e.arguments[0]):
log.debug("Failed to understand public message '%s' from user %s",
e.arguments[0], e.source.nick)
c.privmsg(e.target, _("Someone talking about me? Duh!"))
def on_join(self, c, e):
nick = e.source.nick
if not nick == c.get_nickname():
c.privmsg(e.target, _("Welcome %s") % nick)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('server', help="Server to connect to")
parser.add_argument('channel', help="Channel to join. Prepending with # is optional")
parser.add_argument('nickname', help="Nickname to use")
parser.add_argument('-r', '--realname', help="Real name to use. Defaults to nickname")
parser.add_argument('-x', '--password', help="Password to authenticate with NickServ")
parser.add_argument('-p', '--port', default=6667, type=int, help="Connect to port")
return parser.parse_args()
def main():
args = get_args()
setup_logging()
bot = FidiBot(args.channel, args.nickname, args.server, args.port,
realname= args.realname, password=args.password)
bot.start()
if __name__ == "__main__":
main()
| 37.869565
| 90
| 0.61837
|
6be28b5fb2b73a9ec6fdd79de7f1ea02556531fa
| 329
|
py
|
Python
|
raven/utils/testutils.py
|
blueyed/raven-python
|
1c57b6e129771ceb2faef16d239e33fea45c6293
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T02:43:06.000Z
|
2020-04-13T02:43:06.000Z
|
raven/utils/testutils.py
|
blueyed/raven-python
|
1c57b6e129771ceb2faef16d239e33fea45c6293
|
[
"BSD-3-Clause"
] | null | null | null |
raven/utils/testutils.py
|
blueyed/raven-python
|
1c57b6e129771ceb2faef16d239e33fea45c6293
|
[
"BSD-3-Clause"
] | null | null | null |
"""
raven.utils.testutils
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from exam import Exam
from .compat import TestCase as BaseTestCase
class TestCase(Exam, BaseTestCase):
pass
| 19.352941
| 75
| 0.711246
|
2966dc0143757c5e01d967978d1b307f3cc66b9a
| 666
|
py
|
Python
|
Electro.py
|
razbiralochka/SolarResearchCubeSat
|
841db825dc583e7d5884c3f09aa309cdfbdaf7a5
|
[
"MIT"
] | null | null | null |
Electro.py
|
razbiralochka/SolarResearchCubeSat
|
841db825dc583e7d5884c3f09aa309cdfbdaf7a5
|
[
"MIT"
] | null | null | null |
Electro.py
|
razbiralochka/SolarResearchCubeSat
|
841db825dc583e7d5884c3f09aa309cdfbdaf7a5
|
[
"MIT"
] | null | null | null |
import math as m
import numpy as np
import matplotlib.pyplot as plt
E=7.4
C=2500
def U(C):
if C < 3600:
return 7.4*(1-m.exp(-0.001*C))
else:
return 7.4
t=0
h=10
T=[]
Cap=[]
while C < 3600:
T.append(t)
Cap.append(C)
C = C + ((E-U(C))/10) * h
t = t + h
while C > 2500:
T.append(t)
Cap.append(C)
C = C + (((E-U(C))/10)-0.25) * h
t = t + h
while C < 3600:
T.append(t)
Cap.append(C)
C = C + ((E-U(C))/10) * h
t = t + h
while C > 2500:
T.append(t)
Cap.append(C)
C = C + (((E-U(C))/10)-0.25) * h
t = t + h
plt.plot(T,Cap)
| 14.478261
| 39
| 0.430931
|
10a851a1e670e5ddd373a4037edd8dd643656749
| 5,946
|
py
|
Python
|
tests/test_auth.py
|
latuannetnam/aiosmtplib
|
89433f33341441cab3929b20575b0b828b7ba46e
|
[
"MIT"
] | null | null | null |
tests/test_auth.py
|
latuannetnam/aiosmtplib
|
89433f33341441cab3929b20575b0b828b7ba46e
|
[
"MIT"
] | null | null | null |
tests/test_auth.py
|
latuannetnam/aiosmtplib
|
89433f33341441cab3929b20575b0b828b7ba46e
|
[
"MIT"
] | 1
|
2020-12-02T16:08:13.000Z
|
2020-12-02T16:08:13.000Z
|
import base64
from collections import deque
import pytest
from aiosmtplib.auth import SMTPAuth, crammd5_verify
from aiosmtplib.errors import SMTPAuthenticationError, SMTPException
from aiosmtplib.response import SMTPResponse
from aiosmtplib.status import SMTPStatus
pytestmark = pytest.mark.asyncio()
SUCCESS_RESPONSE = SMTPResponse(SMTPStatus.auth_successful, "OK")
FAILURE_RESPONSE = SMTPResponse(SMTPStatus.auth_failed, "Nope")
class DummySMTPAuth(SMTPAuth):
transport = None
def __init__(self):
super().__init__()
self.received_commands = []
self.responses = deque()
self.esmtp_extensions = {"auth": ""}
self.server_auth_methods = ["cram-md5", "login", "plain"]
self.supports_esmtp = True
async def execute_command(self, *args, **kwargs):
self.received_commands.append(b" ".join(args))
response = self.responses.popleft()
return SMTPResponse(*response)
async def _ehlo_or_helo_if_needed(self):
pass
@pytest.fixture()
def mock_auth(request):
return DummySMTPAuth()
async def test_login_without_extension_raises_error(mock_auth):
mock_auth.esmtp_extensions = {}
with pytest.raises(SMTPException) as excinfo:
await mock_auth.login("username", "bogus")
assert "Try connecting via TLS" not in excinfo.value.args[0]
async def test_login_unknown_method_raises_error(mock_auth):
mock_auth.AUTH_METHODS = ("fakeauth",)
mock_auth.server_auth_methods = ["fakeauth"]
with pytest.raises(RuntimeError):
await mock_auth.login("username", "bogus")
async def test_login_without_method_raises_error(mock_auth):
mock_auth.server_auth_methods = []
with pytest.raises(SMTPException):
await mock_auth.login("username", "bogus")
async def test_login_tries_all_methods(mock_auth):
responses = [
FAILURE_RESPONSE, # CRAM-MD5
FAILURE_RESPONSE, # PLAIN
(SMTPStatus.auth_continue, "VXNlcm5hbWU6"), # LOGIN continue
SUCCESS_RESPONSE, # LOGIN success
]
mock_auth.responses.extend(responses)
await mock_auth.login("username", "thirdtimelucky")
async def test_login_all_methods_fail_raises_error(mock_auth):
responses = [
FAILURE_RESPONSE, # CRAM-MD5
FAILURE_RESPONSE, # PLAIN
FAILURE_RESPONSE, # LOGIN
]
mock_auth.responses.extend(responses)
with pytest.raises(SMTPAuthenticationError):
await mock_auth.login("username", "bogus")
@pytest.mark.parametrize(
"username,password",
[("test", "test"), ("admin124", "$3cr3t$")],
ids=["test user", "admin user"],
)
async def test_auth_plain_success(mock_auth, username, password):
"""
Check that auth_plain base64 encodes the username/password given.
"""
mock_auth.responses.append(SUCCESS_RESPONSE)
await mock_auth.auth_plain(username, password)
b64data = base64.b64encode(
b"\0" + username.encode("ascii") + b"\0" + password.encode("ascii")
)
assert mock_auth.received_commands == [b"AUTH PLAIN " + b64data]
async def test_auth_plain_error(mock_auth):
mock_auth.responses.append(FAILURE_RESPONSE)
with pytest.raises(SMTPAuthenticationError):
await mock_auth.auth_plain("username", "bogus")
@pytest.mark.parametrize(
"username,password",
[("test", "test"), ("admin124", "$3cr3t$")],
ids=["test user", "admin user"],
)
async def test_auth_login_success(mock_auth, username, password):
continue_response = (SMTPStatus.auth_continue, "VXNlcm5hbWU6")
mock_auth.responses.extend([continue_response, SUCCESS_RESPONSE])
await mock_auth.auth_login(username, password)
b64username = base64.b64encode(username.encode("ascii"))
b64password = base64.b64encode(password.encode("ascii"))
assert mock_auth.received_commands == [b"AUTH LOGIN " + b64username, b64password]
async def test_auth_login_error(mock_auth):
mock_auth.responses.append(FAILURE_RESPONSE)
with pytest.raises(SMTPAuthenticationError):
await mock_auth.auth_login("username", "bogus")
async def test_auth_plain_continue_error(mock_auth):
continue_response = (SMTPStatus.auth_continue, "VXNlcm5hbWU6")
mock_auth.responses.extend([continue_response, FAILURE_RESPONSE])
with pytest.raises(SMTPAuthenticationError):
await mock_auth.auth_login("username", "bogus")
@pytest.mark.parametrize(
"username,password",
[("test", "test"), ("admin124", "$3cr3t$")],
ids=["test user", "admin user"],
)
async def test_auth_crammd5_success(mock_auth, username, password):
continue_response = (
SMTPStatus.auth_continue,
base64.b64encode(b"secretteststring").decode("ascii"),
)
mock_auth.responses.extend([continue_response, SUCCESS_RESPONSE])
await mock_auth.auth_crammd5(username, password)
password_bytes = password.encode("ascii")
username_bytes = username.encode("ascii")
response_bytes = continue_response[1].encode("ascii")
expected_command = crammd5_verify(username_bytes, password_bytes, response_bytes)
assert mock_auth.received_commands == [b"AUTH CRAM-MD5", expected_command]
async def test_auth_crammd5_initial_error(mock_auth):
mock_auth.responses.append(FAILURE_RESPONSE)
with pytest.raises(SMTPAuthenticationError):
await mock_auth.auth_crammd5("username", "bogus")
async def test_auth_crammd5_continue_error(mock_auth):
continue_response = (SMTPStatus.auth_continue, "VXNlcm5hbWU6")
mock_auth.responses.extend([continue_response, FAILURE_RESPONSE])
with pytest.raises(SMTPAuthenticationError):
await mock_auth.auth_crammd5("username", "bogus")
async def test_login_without_starttls_exception(smtp_client, smtpd_server):
async with smtp_client:
with pytest.raises(SMTPException) as excinfo:
await smtp_client.login("test", "test")
assert "Try connecting via TLS" in excinfo.value.args[0]
| 30.96875
| 85
| 0.724016
|
0335b4025446fc709211f85d0080d4d01058a580
| 616
|
py
|
Python
|
leetcode/easy/same_tree/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
leetcode/easy/same_tree/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
leetcode/easy/same_tree/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p == None and q == None:
return True
if p == None or q == None:
return False
if p.val != q.val:
return False
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
| 24.64
| 84
| 0.48539
|
0337794490e59afb0d50e7e70f8ff18f29c9d996
| 1,912
|
py
|
Python
|
Data Manipulation with pandas/Transforming-Data.py
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | 2
|
2022-03-12T04:53:03.000Z
|
2022-03-27T12:39:21.000Z
|
Data Manipulation with pandas/Transforming-Data.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Manipulation with pandas/Transforming-Data.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 2
|
2022-03-12T04:52:21.000Z
|
2022-03-27T12:45:32.000Z
|
# Import pandas using the alias pd
import pandas as pd
# Print the head of the homelessness data
print(homelessness.head())
# Print the values of homelessness
print(homelessness.values)
# Print the column index of homelessness
print(homelessness.columns)
# Print the row index of homelessness
print(homelessness.index)
# Sort homelessness by individual
homelessness_ind = homelessness.sort_values('individuals')
# Print the top few rows
print(homelessness_ind.head())
# Select the individuals column
individuals = homelessness['individuals']
# Print the head of the result
print(individuals.head())
# Filter for rows where individuals is greater than 10000
ind_gt_10k = homelessness[homelessness['individuals'] > 10000]
# See the result
print(ind_gt_10k)
# Subset for rows in South Atlantic or Mid-Atlantic regions
south_mid_atlantic = homelessness[(homelessness['region'] == 'South Atlantic') | (
homelessness['region'] == 'Mid-Atlantic')]
# See the result
print(south_mid_atlantic)
# Add total col as sum of individuals and family_members
homelessness['total'] = homelessness['individuals']+homelessness['family_members']
# Add p_individuals col as proportion of individuals
homelessness['p_individuals'] = homelessness['individuals']/homelessness['total']
# See the result
print(homelessness)
# Create indiv_per_10k col as homeless individuals per 10k state pop
homelessness["indiv_per_10k"] = 10000 * \
((homelessness['individuals']) / (homelessness['state_pop']))
# Subset rows for indiv_per_10k greater than 20
high_homelessness = homelessness[homelessness['indiv_per_10k'] > 20]
# Sort high_homelessness by descending indiv_per_10k
high_homelessness_srt = high_homelessness.sort_values(
'indiv_per_10k', ascending=False)
# From high_homelessness_srt, select the state and indiv_per_10k cols
result = high_homelessness_srt[['state', 'indiv_per_10k']]
# See the result
| 26.929577
| 82
| 0.780335
|
d9339bd134712d1c68c4eba371900ca3a83f84ca
| 84
|
py
|
Python
|
PCTC/2022 R0/Q8.py
|
object-oriented-human/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | 1
|
2022-02-21T15:43:01.000Z
|
2022-02-21T15:43:01.000Z
|
PCTC/2022 R0/Q8.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
PCTC/2022 R0/Q8.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
s, n = int(input()), int(input())
while n:
s *= 2
s += 1
n -= 1
print(s)
| 14
| 33
| 0.428571
|
d342489cb3a075d75aa28dba608d8d9dacc6f9e3
| 3,094
|
gyp
|
Python
|
ion/ion.gyp
|
isabella232/ion-1
|
ef47f3b824050499ce5c6f774b366f6c4dbce0af
|
[
"Apache-2.0"
] | 1
|
2020-03-12T12:49:31.000Z
|
2020-03-12T12:49:31.000Z
|
ion/ion.gyp
|
isabella232/ion-1
|
ef47f3b824050499ce5c6f774b366f6c4dbce0af
|
[
"Apache-2.0"
] | null | null | null |
ion/ion.gyp
|
isabella232/ion-1
|
ef47f3b824050499ce5c6f774b366f6c4dbce0af
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Declares demos, all_public_libraries and all_tests aggregate targets.
#
# The targets in this file are here just to serve as groupings, so that "all of
# Ion" can be built by pointing gyp to this file. Do NOT depend on the targets
# in this file to build your Ion-dependent thing, point to the individual Ion
# library targets you need.
{
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'demos',
'type': 'none',
'conditions': [
['OS != "qnx"', {
'dependencies' : [
'demos/demos.gyp:*',
],
}],
], # conditions
},
{
'target_name': 'examples',
'type': 'none',
'conditions': [
# Examples build only on systems with FreeGLUT
['OS in ["linux", "win"]', {
'dependencies' : [
'doc/usersguide/examples/examples.gyp:*',
],
}],
], # conditions
},
{
# NOTE: Do not depend on this target! See note above.
'target_name': 'all_public_libraries',
'type': 'none',
'dependencies' : [
'analytics/analytics.gyp:ionanalytics',
'base/base.gyp:ionbase',
'external/external.gyp:*',
'external/freetype2.gyp:ionfreetype2',
'external/imagecompression.gyp:ionimagecompression',
'gfx/gfx.gyp:iongfx',
'gfxprofile/gfxprofile.gyp:iongfxprofile',
'gfxutils/gfxutils.gyp:iongfxutils',
'image/image.gyp:ionimage',
'math/math.gyp:ionmath',
'portgfx/portgfx.gyp:ionportgfx',
'port/port.gyp:ionport',
'profile/profile.gyp:ionprofile',
'remote/remote.gyp:ionremote',
'text/text.gyp:iontext',
],
},
{
'target_name': 'all_tests',
'type': 'none',
'dependencies' : [
'analytics/tests/analytics_tests.gyp:ionanalytics_test',
'base/tests/base_tests.gyp:ionbase_test',
'gfx/tests/gfx_tests.gyp:iongfx_test',
'gfxprofile/tests/gfxprofile_tests.gyp:iongfxprofile_test',
'gfxutils/tests/gfxutils_tests.gyp:iongfxutils_test',
'image/tests/image_tests.gyp:ionimage_test',
'math/tests/math_tests.gyp:ionmath_test',
'port/tests/port_tests.gyp:ionport_test',
'portgfx/tests/portgfx_tests.gyp:ionportgfx_test',
'profile/tests/profile_tests.gyp:ionprofile_test',
'remote/tests/remote_tests.gyp:ionremote_test',
'text/tests/text_tests.gyp:iontext_test',
],
},
],
}
| 33.268817
| 79
| 0.628959
|
2619347060d6df394660e61a24ecafcf10e71980
| 3,037
|
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 21206
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351
| 90
| 0.682581
|
fdea05dfed30724a56938efa5ddfe4dfb23a77a5
| 242,217
|
py
|
Python
|
mem.py
|
sushmitajaiswal/PythonPrograms
|
d4fb1b36953185e2f8dd866798ca6965a52563a9
|
[
"MIT"
] | null | null | null |
mem.py
|
sushmitajaiswal/PythonPrograms
|
d4fb1b36953185e2f8dd866798ca6965a52563a9
|
[
"MIT"
] | null | null | null |
mem.py
|
sushmitajaiswal/PythonPrograms
|
d4fb1b36953185e2f8dd866798ca6965a52563a9
|
[
"MIT"
] | null | null | null |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://github.githubassets.com">
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" media="all" integrity="sha512-ZUjVod2EvYMDbGqRSyW0rpfgBq3i+gnR/4PfrzLsy5f20oIcRfgFQFVKgi3Ztp917bP1K/kdP5q8+nAlJ3+cFA==" rel="stylesheet" href="https://github.githubassets.com/assets/frameworks-6548d5a1dd84bd83036c6a914b25b4ae.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-HNV7VuXI98SSFimaE7r8eVsgrmzC37AMNXw1GrNbOl4wN3ATov+SdHusRTAAf4NPoRsUcOOQyYsOCA0AcN4m0w==" rel="stylesheet" href="https://github.githubassets.com/assets/site-1cd57b56e5c8f7c49216299a13bafc79.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-kgNHMdRXa6pI3NCt6PRO0KdcrXha+kygVWwBGx0GGDhYKZ+Q9GZOKDdgpPQIvE2KXeBX5bAZfNTR17MnHvMKoA==" rel="stylesheet" href="https://github.githubassets.com/assets/github-92034731d4576baa48dcd0ade8f44ed0.css" />
<meta name="viewport" content="width=device-width">
<title>MyDotfiles/mem.py at master · Sup3r-Us3r/MyDotfiles · GitHub</title>
<meta name="description" content="Dotfiles. Contribute to Sup3r-Us3r/MyDotfiles development by creating an account on GitHub.">
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta name="twitter:image:src" content="https://avatars1.githubusercontent.com/u/22561893?s=400&v=4" /><meta name="twitter:site" content="@github" /><meta name="twitter:card" content="summary" /><meta name="twitter:title" content="Sup3r-Us3r/MyDotfiles" /><meta name="twitter:description" content="Dotfiles. Contribute to Sup3r-Us3r/MyDotfiles development by creating an account on GitHub." />
<meta property="og:image" content="https://avatars1.githubusercontent.com/u/22561893?s=400&v=4" /><meta property="og:site_name" content="GitHub" /><meta property="og:type" content="object" /><meta property="og:title" content="Sup3r-Us3r/MyDotfiles" /><meta property="og:url" content="https://github.com/Sup3r-Us3r/MyDotfiles" /><meta property="og:description" content="Dotfiles. Contribute to Sup3r-Us3r/MyDotfiles development by creating an account on GitHub." />
<link rel="assets" href="https://github.githubassets.com/">
<meta name="request-id" content="A7DC:78C1:2E6C5A:40D41C:5E4639E1" data-pjax-transient="true"/><meta name="html-safe-nonce" content="bbfafec56352805a03b1e2ba24a89016164b55dd" data-pjax-transient="true"/><meta name="visitor-payload" content="eyJyZWZlcnJlciI6IiIsInJlcXVlc3RfaWQiOiJBN0RDOjc4QzE6MkU2QzVBOjQwRDQxQzo1RTQ2MzlFMSIsInZpc2l0b3JfaWQiOiI2OTY2OTE3MzQzNDkwNzUwOTQ1IiwicmVnaW9uX2VkZ2UiOiJhcC1zb3V0aC0xIiwicmVnaW9uX3JlbmRlciI6ImFwLXNvdXRoLTEifQ==" data-pjax-transient="true"/><meta name="visitor-hmac" content="5cf8117ce1f8b4162617b99cbcc9c1573676d6dfbe0eb76a5ea4b2f8597cb52e" data-pjax-transient="true"/>
<meta name="github-keyboard-shortcuts" content="repository,source-code" data-pjax-transient="true" />
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="octolytics-host" content="collector.githubapp.com" /><meta name="octolytics-app-id" content="github" /><meta name="octolytics-event-url" content="https://collector.githubapp.com/github-external/browser_event" /><meta name="octolytics-dimension-ga_id" content="" class="js-octo-ga-id" />
<meta name="analytics-location" content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" />
<meta name="google-analytics" content="UA-3769691-2">
<meta class="js-ga-set" name="dimension1" content="Logged Out">
<meta name="hostname" content="github.com">
<meta name="user-login" content="">
<meta name="expected-hostname" content="github.com">
<meta name="enabled-features" content="MARKETPLACE_FEATURED_BLOG_POSTS,MARKETPLACE_INVOICED_BILLING,MARKETPLACE_SOCIAL_PROOF_CUSTOMERS,MARKETPLACE_TRENDING_SOCIAL_PROOF,MARKETPLACE_RECOMMENDATIONS,MARKETPLACE_PENDING_INSTALLATIONS,RELATED_ISSUES">
<meta http-equiv="x-pjax-version" content="e38d37e0e30273d41e1927f125d1e44c">
<link href="https://github.com/Sup3r-Us3r/MyDotfiles/commits/master.atom" rel="alternate" title="Recent Commits to MyDotfiles:master" type="application/atom+xml">
<meta name="go-import" content="github.com/Sup3r-Us3r/MyDotfiles git https://github.com/Sup3r-Us3r/MyDotfiles.git">
<meta name="octolytics-dimension-user_id" content="22561893" /><meta name="octolytics-dimension-user_login" content="Sup3r-Us3r" /><meta name="octolytics-dimension-repository_id" content="73324881" /><meta name="octolytics-dimension-repository_nwo" content="Sup3r-Us3r/MyDotfiles" /><meta name="octolytics-dimension-repository_public" content="true" /><meta name="octolytics-dimension-repository_is_fork" content="false" /><meta name="octolytics-dimension-repository_network_root_id" content="73324881" /><meta name="octolytics-dimension-repository_network_root_nwo" content="Sup3r-Us3r/MyDotfiles" /><meta name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" content="false" />
<link rel="canonical" href="https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="mask-icon" href="https://github.githubassets.com/pinned-octocat.svg" color="#000000">
<link rel="icon" type="image/x-icon" class="js-site-favicon" href="https://github.githubassets.com/favicon.ico">
<meta name="theme-color" content="#1e2327">
<link rel="manifest" href="/manifest.json" crossOrigin="use-credentials">
</head>
<body class="logged-out env-production page-responsive page-blob">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" tabindex="1" class="px-2 py-4 bg-blue text-white show-on-focus js-skip-to-content">Skip to content</a>
<span class="Progress progress-pjax-loader position-fixed width-full js-pjax-loader-bar">
<span class="progress-pjax-loader-bar top-0 left-0" style="width: 0%;"></span>
</span>
<header class="Header-old header-logged-out js-details-container Details position-relative f4 py-2" role="banner">
<div class="container-lg d-lg-flex flex-items-center p-responsive">
<div class="d-flex flex-justify-between flex-items-center">
<a class="mr-4" href="https://github.com/" aria-label="Homepage" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<svg height="32" class="octicon octicon-mark-github text-white" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<div class="d-lg-none css-truncate css-truncate-target width-fit p-2">
<svg class="octicon octicon-repo" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<a class="Header-link" href="/Sup3r-Us3r">Sup3r-Us3r</a>
/
<a class="Header-link" href="/Sup3r-Us3r/MyDotfiles">MyDotfiles</a>
</div>
<div class="d-flex flex-items-center">
<a href="/join?source=header-repo"
class="d-inline-block d-lg-none f5 text-white no-underline border border-gray-dark rounded-2 px-2 py-1 mr-3 mr-sm-5"
data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"site header","repository_id":null,"auth_type":"SIGN_UP","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="468e14862de86158aa7a9b2b422a8a1afb2a0a4b623aef5bcb260e34ae7cd5bf"
data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">
Sign up
</a>
<button class="btn-link d-lg-none mt-1 js-details-target" type="button" aria-label="Toggle navigation" aria-expanded="false">
<svg height="24" class="octicon octicon-three-bars text-white" viewBox="0 0 12 16" version="1.1" width="18" aria-hidden="true"><path fill-rule="evenodd" d="M11.41 9H.59C0 9 0 8.59 0 8c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zm0-4H.59C0 5 0 4.59 0 4c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zM.59 11H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1H.59C0 13 0 12.59 0 12c0-.59 0-1 .59-1z"/></svg>
</button>
</div>
</div>
<div class="HeaderMenu HeaderMenu--logged-out position-fixed top-0 right-0 bottom-0 height-fit position-lg-relative d-lg-flex flex-justify-between flex-items-center flex-auto">
<div class="d-flex d-lg-none flex-justify-end border-bottom bg-gray-light p-3">
<button class="btn-link js-details-target" type="button" aria-label="Toggle navigation" aria-expanded="false">
<svg height="24" class="octicon octicon-x text-gray" viewBox="0 0 12 16" version="1.1" width="18" aria-hidden="true"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48L7.48 8z"/></svg>
</button>
</div>
<nav class="mt-0 px-3 px-lg-0 mb-5 mb-lg-0" aria-label="Global">
<ul class="d-lg-flex list-style-none">
<li class="d-block d-lg-flex flex-lg-nowrap flex-lg-items-center border-bottom border-lg-bottom-0 mr-0 mr-lg-3 edge-item-fix position-relative flex-wrap flex-justify-between d-flex flex-items-center ">
<details class="HeaderMenu-details details-overlay details-reset width-full">
<summary class="HeaderMenu-summary HeaderMenu-link px-0 py-3 border-0 no-wrap d-block d-lg-inline-block">
Why GitHub?
<svg x="0px" y="0px" viewBox="0 0 14 8" xml:space="preserve" fill="none" class="icon-chevon-down-mktg position-absolute position-lg-relative">
<path d="M1,1l6.2,6L13,1"></path>
</svg>
</summary>
<div class="dropdown-menu flex-auto rounded-1 bg-white px-0 mt-0 pb-4 p-lg-4 position-relative position-lg-absolute left-0 left-lg-n4">
<a href="/features" class="py-2 lh-condensed-ultra d-block link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Features">Features <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a>
<ul class="list-style-none f5 pb-3">
<li class="edge-item-fix"><a href="/features/code-review/" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Code review">Code review</a></li>
<li class="edge-item-fix"><a href="/features/project-management/" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Project management">Project management</a></li>
<li class="edge-item-fix"><a href="/features/integrations" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Integrations">Integrations</a></li>
<li class="edge-item-fix"><a href="/features/actions" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Actions">Actions</a></li>
<li class="edge-item-fix"><a href="/features/packages" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to GitHub Packages">Packages</a></li>
<li class="edge-item-fix"><a href="/features/security" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Security">Security</a></li>
<li class="edge-item-fix"><a href="/features#team-management" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Team management">Team management</a></li>
<li class="edge-item-fix"><a href="/features#hosting" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Code hosting">Hosting</a></li>
</ul>
<ul class="list-style-none mb-0 border-lg-top pt-lg-3">
<li class="edge-item-fix"><a href="/customer-stories" class="py-2 lh-condensed-ultra d-block no-underline link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Customer stories">Customer stories <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a></li>
<li class="edge-item-fix"><a href="/security" class="py-2 lh-condensed-ultra d-block no-underline link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Security">Security <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a></li>
</ul>
</div>
</details>
</li>
<li class="border-bottom border-lg-bottom-0 mr-0 mr-lg-3">
<a href="/enterprise" class="HeaderMenu-link no-underline py-3 d-block d-lg-inline-block" data-ga-click="(Logged out) Header, go to Enterprise">Enterprise</a>
</li>
<li class="d-block d-lg-flex flex-lg-nowrap flex-lg-items-center border-bottom border-lg-bottom-0 mr-0 mr-lg-3 edge-item-fix position-relative flex-wrap flex-justify-between d-flex flex-items-center ">
<details class="HeaderMenu-details details-overlay details-reset width-full">
<summary class="HeaderMenu-summary HeaderMenu-link px-0 py-3 border-0 no-wrap d-block d-lg-inline-block">
Explore
<svg x="0px" y="0px" viewBox="0 0 14 8" xml:space="preserve" fill="none" class="icon-chevon-down-mktg position-absolute position-lg-relative">
<path d="M1,1l6.2,6L13,1"></path>
</svg>
</summary>
<div class="dropdown-menu flex-auto rounded-1 bg-white px-0 pt-2 pb-0 mt-0 pb-4 p-lg-4 position-relative position-lg-absolute left-0 left-lg-n4">
<ul class="list-style-none mb-3">
<li class="edge-item-fix"><a href="/explore" class="py-2 lh-condensed-ultra d-block link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Explore">Explore GitHub <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a></li>
</ul>
<h4 class="text-gray-light text-normal text-mono f5 mb-2 border-lg-top pt-lg-3">Learn & contribute</h4>
<ul class="list-style-none mb-3">
<li class="edge-item-fix"><a href="/topics" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Topics">Topics</a></li>
<li class="edge-item-fix"><a href="/collections" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Collections">Collections</a></li>
<li class="edge-item-fix"><a href="/trending" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Trending">Trending</a></li>
<li class="edge-item-fix"><a href="https://lab.github.com/" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Learning lab">Learning Lab</a></li>
<li class="edge-item-fix"><a href="https://opensource.guide" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Open source guides">Open source guides</a></li>
</ul>
<h4 class="text-gray-light text-normal text-mono f5 mb-2 border-lg-top pt-lg-3">Connect with others</h4>
<ul class="list-style-none mb-0">
<li class="edge-item-fix"><a href="https://github.com/events" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Events">Events</a></li>
<li class="edge-item-fix"><a href="https://github.community" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Community forum">Community forum</a></li>
<li class="edge-item-fix"><a href="https://education.github.com" class="py-2 pb-0 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to GitHub Education">GitHub Education</a></li>
</ul>
</div>
</details>
</li>
<li class="border-bottom border-lg-bottom-0 mr-0 mr-lg-3">
<a href="/marketplace" class="HeaderMenu-link no-underline py-3 d-block d-lg-inline-block" data-ga-click="(Logged out) Header, go to Marketplace">Marketplace</a>
</li>
<li class="d-block d-lg-flex flex-lg-nowrap flex-lg-items-center border-bottom border-lg-bottom-0 mr-0 mr-lg-3 edge-item-fix position-relative flex-wrap flex-justify-between d-flex flex-items-center ">
<details class="HeaderMenu-details details-overlay details-reset width-full">
<summary class="HeaderMenu-summary HeaderMenu-link px-0 py-3 border-0 no-wrap d-block d-lg-inline-block">
Pricing
<svg x="0px" y="0px" viewBox="0 0 14 8" xml:space="preserve" fill="none" class="icon-chevon-down-mktg position-absolute position-lg-relative">
<path d="M1,1l6.2,6L13,1"></path>
</svg>
</summary>
<div class="dropdown-menu flex-auto rounded-1 bg-white px-0 pt-2 pb-4 mt-0 p-lg-4 position-relative position-lg-absolute left-0 left-lg-n4">
<a href="/pricing" class="pb-2 lh-condensed-ultra d-block link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Pricing">Plans <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a>
<ul class="list-style-none mb-3">
<li class="edge-item-fix"><a href="/pricing#feature-comparison" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Compare plans">Compare plans</a></li>
<li class="edge-item-fix"><a href="https://enterprise.github.com/contact" class="py-2 lh-condensed-ultra d-block link-gray no-underline f5" data-ga-click="(Logged out) Header, go to Contact Sales">Contact Sales</a></li>
</ul>
<ul class="list-style-none mb-0 border-lg-top pt-lg-3">
<li class="edge-item-fix"><a href="/nonprofit" class="py-2 lh-condensed-ultra d-block no-underline link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Nonprofits">Nonprofit <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a></li>
<li class="edge-item-fix"><a href="https://education.github.com" class="py-2 pb-0 lh-condensed-ultra d-block no-underline link-gray-dark no-underline h5 Bump-link--hover" data-ga-click="(Logged out) Header, go to Education">Education <span class="Bump-link-symbol float-right text-normal text-gray-light">→</span></a></li>
</ul>
</div>
</details>
</li>
</ul>
</nav>
<div class="d-lg-flex flex-items-center px-3 px-lg-0 text-center text-lg-left">
<div class="d-lg-flex mb-3 mb-lg-0">
<div class="header-search flex-self-stretch flex-lg-self-auto mr-0 mr-lg-3 mb-3 mb-lg-0 scoped-search site-scoped-search js-site-search position-relative js-jump-to"
role="combobox"
aria-owns="jump-to-results"
aria-label="Search or jump to"
aria-haspopup="listbox"
aria-expanded="false"
>
<div class="position-relative">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-site-search-form" role="search" aria-label="Site" data-scope-type="Repository" data-scope-id="73324881" data-scoped-search-url="/Sup3r-Us3r/MyDotfiles/search" data-unscoped-search-url="/search" action="/Sup3r-Us3r/MyDotfiles/search" accept-charset="UTF-8" method="get"><input name="utf8" type="hidden" value="✓" />
<label class="form-control input-sm header-search-wrapper p-0 header-search-wrapper-jump-to position-relative d-flex flex-justify-between flex-items-center js-chromeless-input-container">
<input type="text"
class="form-control input-sm header-search-input jump-to-field js-jump-to-field js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s,/"
name="q"
value=""
placeholder="Search"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off"
aria-autocomplete="list"
aria-controls="jump-to-results"
aria-label="Search"
data-jump-to-suggestions-path="/_graphql/GetSuggestedNavigationDestinations"
spellcheck="false"
autocomplete="off"
>
<input type="hidden" data-csrf="true" class="js-data-jump-to-suggestions-path-csrf" value="SnvTelZ9WGBCqJJ8rtU6s4T088A30rs0RhvLcjs7AQeTCN9hcal3iitFMm69bOZ21ilvbEqqlLqmKtJFkAH9TQ==" />
<input type="hidden" class="js-site-search-type-field" name="type" >
<img src="https://github.githubassets.com/images/search-key-slash.svg" alt="" class="mr-2 header-search-key-slash">
<div class="Box position-absolute overflow-hidden d-none jump-to-suggestions js-jump-to-suggestions-container">
<ul class="d-none js-jump-to-suggestions-template-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-suggestion" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 12 16" version="1.1" role="img"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 15 16" version="1.1" role="img"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 00-1 1v14a1 1 0 001 1h13a1 1 0 001-1V1a1 1 0 00-1-1z"/></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M15.7 13.3l-3.81-3.83A5.93 5.93 0 0013 6c0-3.31-2.69-6-6-6S1 2.69 1 6s2.69 6 6 6c1.3 0 2.48-.41 3.47-1.11l3.83 3.81c.19.2.45.3.7.3.25 0 .52-.09.7-.3a.996.996 0 000-1.41v.01zM7 10.7c-2.59 0-4.7-2.11-4.7-4.7 0-2.59 2.11-4.7 4.7-4.7 2.59 0 4.7 2.11 4.7 4.7 0 2.59-2.11 4.7-4.7 4.7z"/></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
</ul>
<ul class="d-none js-jump-to-no-results-template-container">
<li class="d-flex flex-justify-center flex-items-center f5 d-none js-jump-to-suggestion p-2">
<span class="text-gray">No suggested jump to results</span>
</li>
</ul>
<ul id="jump-to-results" role="listbox" class="p-0 m-0 js-navigation-container jump-to-suggestions-results-container js-jump-to-suggestions-results-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 12 16" version="1.1" role="img"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 15 16" version="1.1" role="img"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 00-1 1v14a1 1 0 001 1h13a1 1 0 001-1V1a1 1 0 00-1-1z"/></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M15.7 13.3l-3.81-3.83A5.93 5.93 0 0013 6c0-3.31-2.69-6-6-6S1 2.69 1 6s2.69 6 6 6c1.3 0 2.48-.41 3.47-1.11l3.83 3.81c.19.2.45.3.7.3.25 0 .52-.09.7-.3a.996.996 0 000-1.41v.01zM7 10.7c-2.59 0-4.7-2.11-4.7-4.7 0-2.59 2.11-4.7 4.7-4.7 2.59 0 4.7 2.11 4.7 4.7 0 2.59-2.11 4.7-4.7 4.7z"/></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-global-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 12 16" version="1.1" role="img"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 15 16" version="1.1" role="img"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 00-1 1v14a1 1 0 001 1h13a1 1 0 001-1V1a1 1 0 00-1-1z"/></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M15.7 13.3l-3.81-3.83A5.93 5.93 0 0013 6c0-3.31-2.69-6-6-6S1 2.69 1 6s2.69 6 6 6c1.3 0 2.48-.41 3.47-1.11l3.83 3.81c.19.2.45.3.7.3.25 0 .52-.09.7-.3a.996.996 0 000-1.41v.01zM7 10.7c-2.59 0-4.7-2.11-4.7-4.7 0-2.59 2.11-4.7 4.7-4.7 2.59 0 4.7 2.11 4.7 4.7 0 2.59-2.11 4.7-4.7 4.7z"/></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
</ul>
</div>
</label>
</form> </div>
</div>
</div>
<a href="/login?return_to=%2FSup3r-Us3r%2FMyDotfiles%2Fblob%2Fmaster%2Fscripts%2Fmem.py"
class="HeaderMenu-link no-underline mr-3"
data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"site header menu","repository_id":null,"auth_type":"SIGN_UP","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="0afa0c43182b551e094949ce0652984e2923c1263c4f298d806c4bc206d4e9a2"
data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">
Sign in
</a>
<a href="/join?source=header-repo&source_repo=Sup3r-Us3r%2FMyDotfiles"
class="HeaderMenu-link d-inline-block no-underline border border-gray-dark rounded-1 px-2 py-1"
data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"site header menu","repository_id":null,"auth_type":"SIGN_UP","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="0afa0c43182b551e094949ce0652984e2923c1263c4f298d806c4bc206d4e9a2"
data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">
Sign up
</a>
</div>
</div>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div id="js-flash-container">
</div>
<div class="js-notification-shelf-not-found-error" hidden></div>
<div class="application-main " data-commit-hovercards-enabled>
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<main >
<div class="pagehead repohead readability-menu bg-gray-light pb-0 pt-0 pt-lg-3 pb-0">
<div class="container-lg mb-4 p-responsive d-none d-lg-flex">
<div class="flex-auto min-width-0 width-fit mr-3">
<h1 class="public d-flex flex-wrap flex-items-center break-word float-none ">
<svg class="octicon octicon-repo" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<span class="author ml-1 flex-self-stretch" itemprop="author">
<a class="url fn" rel="author" data-hovercard-type="user" data-hovercard-url="/users/Sup3r-Us3r/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/Sup3r-Us3r">Sup3r-Us3r</a>
</span>
<span class="path-divider flex-self-stretch">/</span>
<strong itemprop="name" class="mr-2 flex-self-stretch">
<a data-pjax="#js-repo-pjax-container" href="/Sup3r-Us3r/MyDotfiles">MyDotfiles</a>
</strong>
</h1>
</div>
<ul class="pagehead-actions flex-shrink-0" >
<li>
<a class="tooltipped tooltipped-s btn btn-sm btn-with-count" aria-label="You must be signed in to watch a repository" rel="nofollow" data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"notification subscription menu watch","repository_id":null,"auth_type":"LOG_IN","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="70d4bcbcb2b1618a8d6405ce3f44c316e129a6071ad0b63f862eda24a65b7b55" href="/login?return_to=%2FSup3r-Us3r%2FMyDotfiles">
<svg class="octicon octicon-eye v-align-text-bottom" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Watch
</a> <a class="social-count" href="/Sup3r-Us3r/MyDotfiles/watchers"
aria-label="3 users are watching this repository">
3
</a>
</li>
<li>
<a class="btn btn-sm btn-with-count tooltipped tooltipped-s" aria-label="You must be signed in to star a repository" rel="nofollow" data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"star button","repository_id":73324881,"auth_type":"LOG_IN","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="c4acf6bdb2bcf39e4f3d56f6901ee3f2df763cdfba1a69415286dac342ee87fc" href="/login?return_to=%2FSup3r-Us3r%2FMyDotfiles">
<svg aria-label="star" height="16" class="octicon octicon-star v-align-text-bottom" viewBox="0 0 14 16" version="1.1" width="14" role="img"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z"/></svg>
Star
</a>
<a class="social-count js-social-count" href="/Sup3r-Us3r/MyDotfiles/stargazers"
aria-label="26 users starred this repository">
26
</a>
</li>
<li>
<a class="btn btn-sm btn-with-count tooltipped tooltipped-s" aria-label="You must be signed in to fork a repository" rel="nofollow" data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"repo details fork button","repository_id":73324881,"auth_type":"LOG_IN","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="5c0fb6ab2891a2cb880986c9c5c41ae89a88d5b350cd8804967c1d2589d14e8e" href="/login?return_to=%2FSup3r-Us3r%2FMyDotfiles">
<svg class="octicon octicon-repo-forked v-align-text-bottom" viewBox="0 0 10 16" version="1.1" width="10" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 1a1.993 1.993 0 00-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 002 1a1.993 1.993 0 00-1 3.72V6.5l3 3v1.78A1.993 1.993 0 005 15a1.993 1.993 0 001-3.72V9.5l3-3V4.72A1.993 1.993 0 008 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
Fork
</a>
<a href="/Sup3r-Us3r/MyDotfiles/network/members" class="social-count"
aria-label="3 users forked this repository">
3
</a>
</li>
</ul>
</div>
<nav class="hx_reponav reponav js-repo-nav js-sidenav-container-pjax clearfix container-lg p-responsive d-none d-lg-block"
itemscope
itemtype="http://schema.org/BreadcrumbList"
aria-label="Repository"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a class="js-selected-navigation-item selected reponav-item" itemprop="url" data-hotkey="g c" aria-current="page" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages /Sup3r-Us3r/MyDotfiles" href="/Sup3r-Us3r/MyDotfiles">
<div class="d-inline"><svg class="octicon octicon-code" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"/></svg></div>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" data-hotkey="g i" class="js-selected-navigation-item reponav-item" data-selected-links="repo_issues repo_labels repo_milestones /Sup3r-Us3r/MyDotfiles/issues" href="/Sup3r-Us3r/MyDotfiles/issues">
<div class="d-inline"><svg class="octicon octicon-issue-opened" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 011.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/></svg></div>
<span itemprop="name">Issues</span>
<span class="Counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a data-hotkey="g p" data-skip-pjax="true" itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_pulls checks /Sup3r-Us3r/MyDotfiles/pulls" href="/Sup3r-Us3r/MyDotfiles/pulls">
<div class="d-inline"><svg class="octicon octicon-git-pull-request" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0010 15a1.993 1.993 0 001-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 00-1 3.72v6.56A1.993 1.993 0 002 15a1.993 1.993 0 001-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg></div>
<span itemprop="name">Pull requests</span>
<span class="Counter">0</span>
<meta itemprop="position" content="4">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement" class="position-relative float-left">
<a data-hotkey="g w" data-skip-pjax="true" class="js-selected-navigation-item reponav-item" data-selected-links="repo_actions /Sup3r-Us3r/MyDotfiles/actions" href="/Sup3r-Us3r/MyDotfiles/actions">
<div class="d-inline"><svg class="octicon octicon-play" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M14 8A7 7 0 110 8a7 7 0 0114 0zm-8.223 3.482l4.599-3.066a.5.5 0 000-.832L5.777 4.518A.5.5 0 005 4.934v6.132a.5.5 0 00.777.416z"/></svg></div>
Actions
</a>
</span>
<a data-hotkey="g b" class="js-selected-navigation-item reponav-item" data-selected-links="repo_projects new_repo_project repo_project /Sup3r-Us3r/MyDotfiles/projects" href="/Sup3r-Us3r/MyDotfiles/projects">
<div class="d-inline"><svg class="octicon octicon-project" viewBox="0 0 15 16" version="1.1" width="15" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 00-1 1v14a1 1 0 001 1h13a1 1 0 001-1V1a1 1 0 00-1-1z"/></svg></div>
Projects
<span class="Counter" >0</span>
</a>
<a data-skip-pjax="true" class="js-selected-navigation-item reponav-item" data-selected-links="security alerts policy token_scanning code_scanning /Sup3r-Us3r/MyDotfiles/security/advisories" href="/Sup3r-Us3r/MyDotfiles/security/advisories">
<div class="d-inline"><svg class="octicon octicon-shield" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 2l7-2 7 2v6.02C14 12.69 8.69 16 7 16c-1.69 0-7-3.31-7-7.98V2zm1 .75L7 1l6 1.75v5.268C13 12.104 8.449 15 7 15c-1.449 0-6-2.896-6-6.982V2.75zm1 .75L7 2v12c-1.207 0-5-2.482-5-5.985V3.5z"/></svg></div>
Security
</a>
<a class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors dependency_graph pulse people /Sup3r-Us3r/MyDotfiles/pulse" href="/Sup3r-Us3r/MyDotfiles/pulse">
<div class="d-inline"><svg class="octicon octicon-graph" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"/></svg></div>
Insights
</a>
</nav>
<div class="reponav-wrapper reponav-small d-lg-none">
<nav class="reponav js-reponav text-center no-wrap"
itemscope
itemtype="http://schema.org/BreadcrumbList">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a class="js-selected-navigation-item selected reponav-item" itemprop="url" aria-current="page" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages /Sup3r-Us3r/MyDotfiles" href="/Sup3r-Us3r/MyDotfiles">
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_issues repo_labels repo_milestones /Sup3r-Us3r/MyDotfiles/issues" href="/Sup3r-Us3r/MyDotfiles/issues">
<span itemprop="name">Issues</span>
<span class="Counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_pulls checks /Sup3r-Us3r/MyDotfiles/pulls" href="/Sup3r-Us3r/MyDotfiles/pulls">
<span itemprop="name">Pull requests</span>
<span class="Counter">0</span>
<meta itemprop="position" content="4">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_projects new_repo_project repo_project /Sup3r-Us3r/MyDotfiles/projects" href="/Sup3r-Us3r/MyDotfiles/projects">
<span itemprop="name">Projects</span>
<span class="Counter">0</span>
<meta itemprop="position" content="5">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_actions /Sup3r-Us3r/MyDotfiles/actions" href="/Sup3r-Us3r/MyDotfiles/actions">
<span itemprop="name">Actions</span>
<meta itemprop="position" content="6">
</a> </span>
<a itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="security alerts policy token_scanning code_scanning /Sup3r-Us3r/MyDotfiles/security/advisories" href="/Sup3r-Us3r/MyDotfiles/security/advisories">
<span itemprop="name">Security</span>
<meta itemprop="position" content="8">
</a>
<a class="js-selected-navigation-item reponav-item" data-selected-links="pulse /Sup3r-Us3r/MyDotfiles/pulse" href="/Sup3r-Us3r/MyDotfiles/pulse">
Pulse
</a>
</nav>
</div>
</div>
<div class="container-lg clearfix new-discussion-timeline p-responsive">
<div class="repository-content ">
<a class="d-none js-permalink-shortcut" data-hotkey="y" href="/Sup3r-Us3r/MyDotfiles/blob/d56e40d49cb9a6a3af04b3d2442408df622637f4/scripts/mem.py">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:31fe52616550bfd4da51bfb2bc25ecd3 -->
<div class="signup-prompt-bg rounded-1 js-signup-prompt" data-prompt="signup" hidden>
<div class="signup-prompt p-4 text-center mb-4 rounded-1">
<div class="position-relative">
<button type="button" class="position-absolute top-0 right-0 btn-link link-gray js-signup-prompt-button" data-ga-click="(Logged out) Sign up prompt, clicked Dismiss, text:dismiss">
Dismiss
</button>
<h3 class="pt-2">Join GitHub today</h3>
<p class="col-6 mx-auto">GitHub is home to over 40 million developers working together to host and review code, manage projects, and build software together.</p>
<a class="btn btn-primary" data-hydro-click="{"event_type":"authentication.click","payload":{"location_in_page":"files signup prompt","repository_id":null,"auth_type":"SIGN_UP","originating_url":"https://github.com/Sup3r-Us3r/MyDotfiles/blob/master/scripts/mem.py","user_id":null}}" data-hydro-click-hmac="4d61cb04ad1a4249da3bb31f67f56ebb44cc7c5feab3e86c35ffc3cd045ab0b1" data-ga-click="(Logged out) Sign up prompt, clicked Sign up, text:sign-up" href="/join?source=prompt-blob-show&source_repo=Sup3r-Us3r%2FMyDotfiles">Sign up</a>
</div>
</div>
</div>
<div class="d-flex flex-items-start flex-shrink-0 flex-column flex-md-row pb-3">
<span class="d-flex flex-justify-between width-full width-md-auto">
<details class="details-reset details-overlay branch-select-menu " id="branch-select-menu">
<summary class="btn btn-sm css-truncate"
data-hotkey="w"
title="Switch branches or tags">
<i>Branch:</i>
<span class="css-truncate-target" data-menu-button>master</span>
<span class="dropdown-caret"></span>
</summary>
<details-menu class="SelectMenu SelectMenu--hasFilter" src="/Sup3r-Us3r/MyDotfiles/refs/master/scripts/mem.py?source_action=show&source_controller=blob" preload>
<div class="SelectMenu-modal">
<include-fragment class="SelectMenu-loading" aria-label="Menu is loading">
<svg class="octicon octicon-octoface anim-pulse" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M14.7 5.34c.13-.32.55-1.59-.13-3.31 0 0-1.05-.33-3.44 1.3-1-.28-2.07-.32-3.13-.32s-2.13.04-3.13.32c-2.39-1.64-3.44-1.3-3.44-1.3-.68 1.72-.26 2.99-.13 3.31C.49 6.21 0 7.33 0 8.69 0 13.84 3.33 15 7.98 15S16 13.84 16 8.69c0-1.36-.49-2.48-1.3-3.35zM8 14.02c-3.3 0-5.98-.15-5.98-3.35 0-.76.38-1.48 1.02-2.07 1.07-.98 2.9-.46 4.96-.46 2.07 0 3.88-.52 4.96.46.65.59 1.02 1.3 1.02 2.07 0 3.19-2.68 3.35-5.98 3.35zM5.49 9.01c-.66 0-1.2.8-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.54-1.78-1.2-1.78zm5.02 0c-.66 0-1.2.79-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.53-1.78-1.2-1.78z"/></svg>
</include-fragment>
</div>
</details-menu>
</details>
<div class="BtnGroup flex-shrink-0 d-md-none">
<a href="/Sup3r-Us3r/MyDotfiles/find/master"
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
data-pjax
data-hotkey="t">
Find file
</a>
<clipboard-copy value="scripts/mem.py" class="btn btn-sm BtnGroup-item">
Copy path
</clipboard-copy>
</div>
</span>
<h2 id="blob-path" class="breadcrumb flex-auto min-width-0 text-normal flex-md-self-center ml-md-2 mr-md-3 my-2 my-md-0">
<span class="js-repo-root text-bold"><span class="js-path-segment"><a data-pjax="true" href="/Sup3r-Us3r/MyDotfiles"><span>MyDotfiles</span></a></span></span><span class="separator">/</span><span class="js-path-segment"><a data-pjax="true" href="/Sup3r-Us3r/MyDotfiles/tree/master/scripts"><span>scripts</span></a></span><span class="separator">/</span><strong class="final-path">mem.py</strong>
</h2>
<div class="BtnGroup flex-shrink-0 d-none d-md-inline-block">
<a href="/Sup3r-Us3r/MyDotfiles/find/master"
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
data-pjax
data-hotkey="t">
Find file
</a>
<clipboard-copy value="scripts/mem.py" class="btn btn-sm BtnGroup-item">
Copy path
</clipboard-copy>
</div>
</div>
<include-fragment src="/Sup3r-Us3r/MyDotfiles/contributors/master/scripts/mem.py" class="Box Box--condensed commit-loader">
<div class="Box-body bg-blue-light f6">
Fetching contributors…
</div>
<div class="Box-body d-flex flex-items-center" >
<img alt="" class="loader-loading mr-2" src="https://github.githubassets.com/images/spinners/octocat-spinner-32-EAF2F5.gif" width="16" height="16" />
<span class="text-red h6 loader-error">Cannot retrieve contributors at this time</span>
</div>
</include-fragment>
<div class="Box mt-3 position-relative">
<div class="Box-header py-2 d-flex flex-column flex-shrink-0 flex-md-row flex-md-items-center">
<div class="text-mono f6 flex-auto pr-3 flex-order-2 flex-md-order-1 mt-2 mt-md-0">
<span class="file-mode" title="File mode">executable file</span>
<span class="file-info-divider"></span>
609 lines (540 sloc)
<span class="file-info-divider"></span>
21.2 KB
</div>
<div class="d-flex py-1 py-md-0 flex-auto flex-order-1 flex-md-order-2 flex-sm-grow-0 flex-justify-between">
<div class="BtnGroup">
<a id="raw-url" class="btn btn-sm BtnGroup-item" href="/Sup3r-Us3r/MyDotfiles/raw/master/scripts/mem.py">Raw</a>
<a class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b" href="/Sup3r-Us3r/MyDotfiles/blame/master/scripts/mem.py">Blame</a>
<a rel="nofollow" class="btn btn-sm BtnGroup-item" href="/Sup3r-Us3r/MyDotfiles/commits/master/scripts/mem.py">History</a>
</div>
<div>
<a class="btn-octicon tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop">
<svg class="octicon octicon-device-desktop" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"/></svg>
</a>
<button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg class="octicon octicon-pencil" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 011.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg>
</button>
<button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg class="octicon octicon-trashcan" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg>
</button>
</div>
</div>
</div>
<div itemprop="text" class="Box-body p-0 blob-wrapper data type-python ">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#!/usr/bin/env python</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Try to determine how much RAM is currently being used per program.</span></td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Note per _program_, not per process. So for example this script</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class=pl-c># will report RAM used by all httpd process together. In detail it reports:</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class=pl-c># sum(private RAM for program processes) + sum(Shared RAM for program processes)</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line"><span class=pl-c># The shared RAM is problematic to calculate, and this script automatically</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class=pl-c># selects the most accurate method available for your kernel.</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Licence: LGPLv2</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Author: P@draigBrady.com</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Source: http://www.pixelbeat.org/scripts/ps_mem.py</span></td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.0 06 Jul 2005 Initial release</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.1 11 Aug 2006 root permission required for accuracy</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.2 08 Nov 2006 Add total to output</span></td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Use KiB,MiB,... for units rather than K,M,...</span></td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"><span class=pl-c># 2.6 kernels up to and including 2.6.9.</span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"><span class=pl-c># There it represented the total file backed extent</span></td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.4 23 Nov 2006 Remove total from output as it's meaningless</span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"><span class=pl-c># (the shared values overlap with other programs).</span></td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Display the shared column. This extra info is</span></td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"><span class=pl-c># useful, especially as it overlaps between programs.</span></td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.5 26 Mar 2007 Remove redundant recursion from human()</span></td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.6 05 Jun 2007 Also report number of processes with a given name.</span></td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Patch from riccardo.murri@gmail.com</span></td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which</span></td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"><span class=pl-c># fixes some over-estimation and allows totalling.</span></td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Enumerate the PIDs directly rather than using ps,</span></td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"><span class=pl-c># which fixes the possible race between reading</span></td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"><span class=pl-c># RSS with ps, and shared memory with this program.</span></td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Also we can show non truncated command names.</span></td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"><span class=pl-c># as otherwise could match libraries causing a crash.</span></td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Patch from patrice.bouchand.fedora@gmail.com</span></td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.</span></td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Reported by Andrey Borzenkov <arvidjaar@mail.ru></span></td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line"><span class=pl-c># V3.8 17 Jun 2016</span></td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line"><span class=pl-c># http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py</span></td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Notes:</span></td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line"><span class=pl-c># All interpreted programs where the interpreter is started</span></td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line"><span class=pl-c># by the shell or with env, will be merged to the interpreter</span></td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line"><span class=pl-c># (as that's what's given to exec). For e.g. all python programs</span></td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line"><span class=pl-c># starting with "#!/usr/bin/env python" will be grouped under python.</span></td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line"><span class=pl-c># You can change this by using the full command line but that will</span></td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line"><span class=pl-c># have the undesirable affect of splitting up programs started with</span></td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"><span class=pl-c># differing parameters (for e.g. mingetty tty[1-6]).</span></td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line"><span class=pl-c># For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels</span></td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line"><span class=pl-c># (rmap vm without smaps) it can not be accurately determined how many pages</span></td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line"><span class=pl-c># are shared between processes in general or within a program in our case:</span></td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line"><span class=pl-c># http://lkml.org/lkml/2005/7/6/250</span></td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line"><span class=pl-c># A warning is printed if overestimation is possible.</span></td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In addition for 2.6 kernels up to 2.6.9 inclusive, the shared</span></td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code blob-code-inner js-file-line"><span class=pl-c># value in /proc/$pid/statm is the total file-backed extent of a process.</span></td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code blob-code-inner js-file-line"><span class=pl-c># We ignore that, introducing more overestimation, again printing a warning.</span></td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows</span></td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code blob-code-inner js-file-line"><span class=pl-c># us to calculate a more accurate value for the total RAM used by programs.</span></td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming</span></td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code blob-code-inner js-file-line"><span class=pl-c># they're the only programs that have the same /proc/$PID/smaps file for</span></td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code blob-code-inner js-file-line"><span class=pl-c># each instance. This will fail if there are multiple real instances of a</span></td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code blob-code-inner js-file-line"><span class=pl-c># program that then use CLONE_VM without CLONE_THREAD, or if a clone changes</span></td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code blob-code-inner js-file-line"><span class=pl-c># its memory map while we're checksumming each /proc/$PID/smaps.</span></td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code blob-code-inner js-file-line"><span class=pl-c># I don't take account of memory allocated for a program</span></td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code blob-code-inner js-file-line"><span class=pl-c># by other programs. For e.g. memory used in the X server for</span></td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code blob-code-inner js-file-line"><span class=pl-c># a program could be determined, but is not.</span></td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code blob-code-inner js-file-line"><span class=pl-c># FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/</span></td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code blob-code-inner js-file-line"><span class=pl-c># FreeBSD 8.0 supports up to a level of Linux 2.6.16</span></td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>getopt</span></td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>time</span></td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>errno</span></td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>os</span></td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>sys</span></td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code blob-code-inner js-file-line"><span class=pl-c># The following exits cleanly on Ctrl-C or EPIPE</span></td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code blob-code-inner js-file-line"><span class=pl-c># while treating other exceptions as before.</span></td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>std_exceptions</span>(<span class=pl-s1>etype</span>, <span class=pl-s1>value</span>, <span class=pl-s1>tb</span>):</td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>excepthook</span> <span class=pl-c1>=</span> <span class=pl-s1>sys</span>.<span class=pl-s1>__excepthook__</span></td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-en>issubclass</span>(<span class=pl-s1>etype</span>, <span class=pl-v>KeyboardInterrupt</span>):</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>pass</span></td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-en>issubclass</span>(<span class=pl-s1>etype</span>, <span class=pl-v>IOError</span>) <span class=pl-c1>and</span> <span class=pl-s1>value</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>EPIPE</span>:</td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>pass</span></td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>__excepthook__</span>(<span class=pl-s1>etype</span>, <span class=pl-s1>value</span>, <span class=pl-s1>tb</span>)</td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>sys</span>.<span class=pl-s1>excepthook</span> <span class=pl-c1>=</span> <span class=pl-s1>std_exceptions</span></td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Define some global variables</span></td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code blob-code-inner js-file-line"><span class=pl-v>PAGESIZE</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>sysconf</span>(<span class=pl-s>"SC_PAGE_SIZE"</span>) <span class=pl-c1>/</span> <span class=pl-c1>1024</span> <span class=pl-c>#KiB</span></td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>our_pid</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>getpid</span>()</td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>have_pss</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>have_swap_pss</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code blob-code-inner js-file-line"><span class=pl-k>class</span> <span class=pl-v>Proc</span>:</td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>def</span> <span class=pl-en>__init__</span>(<span class=pl-s1>self</span>):</td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>uname</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>uname</span>()</td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>uname</span>[<span class=pl-c1>0</span>] <span class=pl-c1>==</span> <span class=pl-s>"FreeBSD"</span>:</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>self</span>.<span class=pl-s1>proc</span> <span class=pl-c1>=</span> <span class=pl-s>'/compat/linux/proc'</span></td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>self</span>.<span class=pl-s1>proc</span> <span class=pl-c1>=</span> <span class=pl-s>'/proc'</span></td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L112" class="blob-num js-line-number" data-line-number="112"></td>
<td id="LC112" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>def</span> <span class=pl-en>path</span>(<span class=pl-s1>self</span>, <span class=pl-c1>*</span><span class=pl-s1>args</span>):</td>
</tr>
<tr>
<td id="L113" class="blob-num js-line-number" data-line-number="113"></td>
<td id="LC113" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-s1>self</span>.<span class=pl-s1>proc</span>, <span class=pl-c1>*</span>(<span class=pl-en>str</span>(<span class=pl-s1>a</span>) <span class=pl-k>for</span> <span class=pl-s1>a</span> <span class=pl-c1>in</span> <span class=pl-s1>args</span>))</td>
</tr>
<tr>
<td id="L114" class="blob-num js-line-number" data-line-number="114"></td>
<td id="LC114" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L115" class="blob-num js-line-number" data-line-number="115"></td>
<td id="LC115" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>def</span> <span class=pl-en>open</span>(<span class=pl-s1>self</span>, <span class=pl-c1>*</span><span class=pl-s1>args</span>):</td>
</tr>
<tr>
<td id="L116" class="blob-num js-line-number" data-line-number="116"></td>
<td id="LC116" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L117" class="blob-num js-line-number" data-line-number="117"></td>
<td id="LC117" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>sys</span>.<span class=pl-s1>version_info</span> <span class=pl-c1><</span> (<span class=pl-c1>3</span>,):</td>
</tr>
<tr>
<td id="L118" class="blob-num js-line-number" data-line-number="118"></td>
<td id="LC118" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-en>open</span>(<span class=pl-s1>self</span>.<span class=pl-en>path</span>(<span class=pl-c1>*</span><span class=pl-s1>args</span>))</td>
</tr>
<tr>
<td id="L119" class="blob-num js-line-number" data-line-number="119"></td>
<td id="LC119" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L120" class="blob-num js-line-number" data-line-number="120"></td>
<td id="LC120" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-en>open</span>(<span class=pl-s1>self</span>.<span class=pl-en>path</span>(<span class=pl-c1>*</span><span class=pl-s1>args</span>), <span class=pl-s1>errors</span><span class=pl-c1>=</span><span class=pl-s>'ignore'</span>)</td>
</tr>
<tr>
<td id="L121" class="blob-num js-line-number" data-line-number="121"></td>
<td id="LC121" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> (<span class=pl-v>IOError</span>, <span class=pl-v>OSError</span>):</td>
</tr>
<tr>
<td id="L122" class="blob-num js-line-number" data-line-number="122"></td>
<td id="LC122" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>val</span> <span class=pl-c1>=</span> <span class=pl-s1>sys</span>.<span class=pl-en>exc_info</span>()[<span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L123" class="blob-num js-line-number" data-line-number="123"></td>
<td id="LC123" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> (<span class=pl-s1>val</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>ENOENT</span> <span class=pl-c1>or</span> <span class=pl-c># kernel thread or process gone</span></td>
</tr>
<tr>
<td id="L124" class="blob-num js-line-number" data-line-number="124"></td>
<td id="LC124" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>val</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>EPERM</span>):</td>
</tr>
<tr>
<td id="L125" class="blob-num js-line-number" data-line-number="125"></td>
<td id="LC125" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>raise</span> <span class=pl-v>LookupError</span></td>
</tr>
<tr>
<td id="L126" class="blob-num js-line-number" data-line-number="126"></td>
<td id="LC126" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>raise</span></td>
</tr>
<tr>
<td id="L127" class="blob-num js-line-number" data-line-number="127"></td>
<td id="LC127" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L128" class="blob-num js-line-number" data-line-number="128"></td>
<td id="LC128" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>proc</span> <span class=pl-c1>=</span> <span class=pl-v>Proc</span>()</td>
</tr>
<tr>
<td id="L129" class="blob-num js-line-number" data-line-number="129"></td>
<td id="LC129" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L130" class="blob-num js-line-number" data-line-number="130"></td>
<td id="LC130" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L131" class="blob-num js-line-number" data-line-number="131"></td>
<td id="LC131" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L132" class="blob-num js-line-number" data-line-number="132"></td>
<td id="LC132" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Functions</span></td>
</tr>
<tr>
<td id="L133" class="blob-num js-line-number" data-line-number="133"></td>
<td id="LC133" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#</span></td>
</tr>
<tr>
<td id="L134" class="blob-num js-line-number" data-line-number="134"></td>
<td id="LC134" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L135" class="blob-num js-line-number" data-line-number="135"></td>
<td id="LC135" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>parse_options</span>():</td>
</tr>
<tr>
<td id="L136" class="blob-num js-line-number" data-line-number="136"></td>
<td id="LC136" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L137" class="blob-num js-line-number" data-line-number="137"></td>
<td id="LC137" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>long_options</span> <span class=pl-c1>=</span> [</td>
</tr>
<tr>
<td id="L138" class="blob-num js-line-number" data-line-number="138"></td>
<td id="LC138" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'split-args'</span>,</td>
</tr>
<tr>
<td id="L139" class="blob-num js-line-number" data-line-number="139"></td>
<td id="LC139" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'help'</span>,</td>
</tr>
<tr>
<td id="L140" class="blob-num js-line-number" data-line-number="140"></td>
<td id="LC140" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'total'</span>,</td>
</tr>
<tr>
<td id="L141" class="blob-num js-line-number" data-line-number="141"></td>
<td id="LC141" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'discriminate-by-pid'</span>,</td>
</tr>
<tr>
<td id="L142" class="blob-num js-line-number" data-line-number="142"></td>
<td id="LC142" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'swap'</span></td>
</tr>
<tr>
<td id="L143" class="blob-num js-line-number" data-line-number="143"></td>
<td id="LC143" class="blob-code blob-code-inner js-file-line"> ]</td>
</tr>
<tr>
<td id="L144" class="blob-num js-line-number" data-line-number="144"></td>
<td id="LC144" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>opts</span>, <span class=pl-s1>args</span> <span class=pl-c1>=</span> <span class=pl-s1>getopt</span>.<span class=pl-en>getopt</span>(<span class=pl-s1>sys</span>.<span class=pl-s1>argv</span>[<span class=pl-c1>1</span>:], <span class=pl-s>"shtdSp:w:"</span>, <span class=pl-s1>long_options</span>)</td>
</tr>
<tr>
<td id="L145" class="blob-num js-line-number" data-line-number="145"></td>
<td id="LC145" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> <span class=pl-s1>getopt</span>.<span class=pl-v>GetoptError</span>:</td>
</tr>
<tr>
<td id="L146" class="blob-num js-line-number" data-line-number="146"></td>
<td id="LC146" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(<span class=pl-en>help</span>())</td>
</tr>
<tr>
<td id="L147" class="blob-num js-line-number" data-line-number="147"></td>
<td id="LC147" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>3</span>)</td>
</tr>
<tr>
<td id="L148" class="blob-num js-line-number" data-line-number="148"></td>
<td id="LC148" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L149" class="blob-num js-line-number" data-line-number="149"></td>
<td id="LC149" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-en>len</span>(<span class=pl-s1>args</span>):</td>
</tr>
<tr>
<td id="L150" class="blob-num js-line-number" data-line-number="150"></td>
<td id="LC150" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(<span class=pl-s>"Extraneous arguments: %s<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span> <span class=pl-s1>args</span>)</td>
</tr>
<tr>
<td id="L151" class="blob-num js-line-number" data-line-number="151"></td>
<td id="LC151" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>3</span>)</td>
</tr>
<tr>
<td id="L152" class="blob-num js-line-number" data-line-number="152"></td>
<td id="LC152" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L153" class="blob-num js-line-number" data-line-number="153"></td>
<td id="LC153" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># ps_mem.py options</span></td>
</tr>
<tr>
<td id="L154" class="blob-num js-line-number" data-line-number="154"></td>
<td id="LC154" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>split_args</span> <span class=pl-c1>=</span> <span class=pl-c1>False</span></td>
</tr>
<tr>
<td id="L155" class="blob-num js-line-number" data-line-number="155"></td>
<td id="LC155" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pids_to_show</span> <span class=pl-c1>=</span> <span class=pl-c1>None</span></td>
</tr>
<tr>
<td id="L156" class="blob-num js-line-number" data-line-number="156"></td>
<td id="LC156" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>discriminate_by_pid</span> <span class=pl-c1>=</span> <span class=pl-c1>False</span></td>
</tr>
<tr>
<td id="L157" class="blob-num js-line-number" data-line-number="157"></td>
<td id="LC157" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>show_swap</span> <span class=pl-c1>=</span> <span class=pl-c1>False</span></td>
</tr>
<tr>
<td id="L158" class="blob-num js-line-number" data-line-number="158"></td>
<td id="LC158" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>watch</span> <span class=pl-c1>=</span> <span class=pl-c1>None</span></td>
</tr>
<tr>
<td id="L159" class="blob-num js-line-number" data-line-number="159"></td>
<td id="LC159" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>only_total</span> <span class=pl-c1>=</span> <span class=pl-c1>False</span></td>
</tr>
<tr>
<td id="L160" class="blob-num js-line-number" data-line-number="160"></td>
<td id="LC160" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L161" class="blob-num js-line-number" data-line-number="161"></td>
<td id="LC161" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>o</span>, <span class=pl-s1>a</span> <span class=pl-c1>in</span> <span class=pl-s1>opts</span>:</td>
</tr>
<tr>
<td id="L162" class="blob-num js-line-number" data-line-number="162"></td>
<td id="LC162" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-s'</span>, <span class=pl-s>'--split-args'</span>):</td>
</tr>
<tr>
<td id="L163" class="blob-num js-line-number" data-line-number="163"></td>
<td id="LC163" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>split_args</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span></td>
</tr>
<tr>
<td id="L164" class="blob-num js-line-number" data-line-number="164"></td>
<td id="LC164" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-t'</span>, <span class=pl-s>'--total'</span>):</td>
</tr>
<tr>
<td id="L165" class="blob-num js-line-number" data-line-number="165"></td>
<td id="LC165" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>only_total</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span></td>
</tr>
<tr>
<td id="L166" class="blob-num js-line-number" data-line-number="166"></td>
<td id="LC166" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-d'</span>, <span class=pl-s>'--discriminate-by-pid'</span>):</td>
</tr>
<tr>
<td id="L167" class="blob-num js-line-number" data-line-number="167"></td>
<td id="LC167" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>discriminate_by_pid</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span></td>
</tr>
<tr>
<td id="L168" class="blob-num js-line-number" data-line-number="168"></td>
<td id="LC168" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-S'</span>, <span class=pl-s>'--swap'</span>):</td>
</tr>
<tr>
<td id="L169" class="blob-num js-line-number" data-line-number="169"></td>
<td id="LC169" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>show_swap</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span></td>
</tr>
<tr>
<td id="L170" class="blob-num js-line-number" data-line-number="170"></td>
<td id="LC170" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-h'</span>, <span class=pl-s>'--help'</span>):</td>
</tr>
<tr>
<td id="L171" class="blob-num js-line-number" data-line-number="171"></td>
<td id="LC171" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-en>help</span>())</td>
</tr>
<tr>
<td id="L172" class="blob-num js-line-number" data-line-number="172"></td>
<td id="LC172" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>0</span>)</td>
</tr>
<tr>
<td id="L173" class="blob-num js-line-number" data-line-number="173"></td>
<td id="LC173" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-p'</span>,):</td>
</tr>
<tr>
<td id="L174" class="blob-num js-line-number" data-line-number="174"></td>
<td id="LC174" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L175" class="blob-num js-line-number" data-line-number="175"></td>
<td id="LC175" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pids_to_show</span> <span class=pl-c1>=</span> [<span class=pl-en>int</span>(<span class=pl-s1>x</span>) <span class=pl-k>for</span> <span class=pl-s1>x</span> <span class=pl-c1>in</span> <span class=pl-s1>a</span>.<span class=pl-en>split</span>(<span class=pl-s>','</span>)]</td>
</tr>
<tr>
<td id="L176" class="blob-num js-line-number" data-line-number="176"></td>
<td id="LC176" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span>:</td>
</tr>
<tr>
<td id="L177" class="blob-num js-line-number" data-line-number="177"></td>
<td id="LC177" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(<span class=pl-en>help</span>())</td>
</tr>
<tr>
<td id="L178" class="blob-num js-line-number" data-line-number="178"></td>
<td id="LC178" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>3</span>)</td>
</tr>
<tr>
<td id="L179" class="blob-num js-line-number" data-line-number="179"></td>
<td id="LC179" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>o</span> <span class=pl-c1>in</span> (<span class=pl-s>'-w'</span>,):</td>
</tr>
<tr>
<td id="L180" class="blob-num js-line-number" data-line-number="180"></td>
<td id="LC180" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L181" class="blob-num js-line-number" data-line-number="181"></td>
<td id="LC181" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>watch</span> <span class=pl-c1>=</span> <span class=pl-en>int</span>(<span class=pl-s1>a</span>)</td>
</tr>
<tr>
<td id="L182" class="blob-num js-line-number" data-line-number="182"></td>
<td id="LC182" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span>:</td>
</tr>
<tr>
<td id="L183" class="blob-num js-line-number" data-line-number="183"></td>
<td id="LC183" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(<span class=pl-en>help</span>())</td>
</tr>
<tr>
<td id="L184" class="blob-num js-line-number" data-line-number="184"></td>
<td id="LC184" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>3</span>)</td>
</tr>
<tr>
<td id="L185" class="blob-num js-line-number" data-line-number="185"></td>
<td id="LC185" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L186" class="blob-num js-line-number" data-line-number="186"></td>
<td id="LC186" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> (</td>
</tr>
<tr>
<td id="L187" class="blob-num js-line-number" data-line-number="187"></td>
<td id="LC187" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>split_args</span>,</td>
</tr>
<tr>
<td id="L188" class="blob-num js-line-number" data-line-number="188"></td>
<td id="LC188" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pids_to_show</span>,</td>
</tr>
<tr>
<td id="L189" class="blob-num js-line-number" data-line-number="189"></td>
<td id="LC189" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>watch</span>,</td>
</tr>
<tr>
<td id="L190" class="blob-num js-line-number" data-line-number="190"></td>
<td id="LC190" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>only_total</span>,</td>
</tr>
<tr>
<td id="L191" class="blob-num js-line-number" data-line-number="191"></td>
<td id="LC191" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>discriminate_by_pid</span>,</td>
</tr>
<tr>
<td id="L192" class="blob-num js-line-number" data-line-number="192"></td>
<td id="LC192" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>show_swap</span></td>
</tr>
<tr>
<td id="L193" class="blob-num js-line-number" data-line-number="193"></td>
<td id="LC193" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L194" class="blob-num js-line-number" data-line-number="194"></td>
<td id="LC194" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L195" class="blob-num js-line-number" data-line-number="195"></td>
<td id="LC195" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L196" class="blob-num js-line-number" data-line-number="196"></td>
<td id="LC196" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>help</span>():</td>
</tr>
<tr>
<td id="L197" class="blob-num js-line-number" data-line-number="197"></td>
<td id="LC197" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>help_msg</span> <span class=pl-c1>=</span> <span class=pl-s>'Usage: ps_mem [OPTION]...<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L198" class="blob-num js-line-number" data-line-number="198"></td>
<td id="LC198" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'Show program core memory usage<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L199" class="blob-num js-line-number" data-line-number="199"></td>
<td id="LC199" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L200" class="blob-num js-line-number" data-line-number="200"></td>
<td id="LC200" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -h, -help Show this help<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L201" class="blob-num js-line-number" data-line-number="201"></td>
<td id="LC201" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -p <pid>[,pid2,...pidN] Only show memory usage PIDs in the '</span>\</td>
</tr>
<tr>
<td id="L202" class="blob-num js-line-number" data-line-number="202"></td>
<td id="LC202" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>'specified list<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L203" class="blob-num js-line-number" data-line-number="203"></td>
<td id="LC203" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -s, --split-args Show and separate by, all command line'</span>\</td>
</tr>
<tr>
<td id="L204" class="blob-num js-line-number" data-line-number="204"></td>
<td id="LC204" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' arguments<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L205" class="blob-num js-line-number" data-line-number="205"></td>
<td id="LC205" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -t, --total Show only the total value<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L206" class="blob-num js-line-number" data-line-number="206"></td>
<td id="LC206" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -d, --discriminate-by-pid Show by process rather than by program<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L207" class="blob-num js-line-number" data-line-number="207"></td>
<td id="LC207" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -S, --swap Show swap information<span class=pl-cce>\n</span>'</span> \</td>
</tr>
<tr>
<td id="L208" class="blob-num js-line-number" data-line-number="208"></td>
<td id="LC208" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' -w <N> Measure and show process memory every'</span>\</td>
</tr>
<tr>
<td id="L209" class="blob-num js-line-number" data-line-number="209"></td>
<td id="LC209" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>' N seconds<span class=pl-cce>\n</span>'</span></td>
</tr>
<tr>
<td id="L210" class="blob-num js-line-number" data-line-number="210"></td>
<td id="LC210" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L211" class="blob-num js-line-number" data-line-number="211"></td>
<td id="LC211" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>help_msg</span></td>
</tr>
<tr>
<td id="L212" class="blob-num js-line-number" data-line-number="212"></td>
<td id="LC212" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L213" class="blob-num js-line-number" data-line-number="213"></td>
<td id="LC213" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L214" class="blob-num js-line-number" data-line-number="214"></td>
<td id="LC214" class="blob-code blob-code-inner js-file-line"><span class=pl-c># (major,minor,release)</span></td>
</tr>
<tr>
<td id="L215" class="blob-num js-line-number" data-line-number="215"></td>
<td id="LC215" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>kernel_ver</span>():</td>
</tr>
<tr>
<td id="L216" class="blob-num js-line-number" data-line-number="216"></td>
<td id="LC216" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>kv</span> <span class=pl-c1>=</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s>'sys/kernel/osrelease'</span>).<span class=pl-en>readline</span>().<span class=pl-en>split</span>(<span class=pl-s>"."</span>)[:<span class=pl-c1>3</span>]</td>
</tr>
<tr>
<td id="L217" class="blob-num js-line-number" data-line-number="217"></td>
<td id="LC217" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>last</span> <span class=pl-c1>=</span> <span class=pl-en>len</span>(<span class=pl-s1>kv</span>)</td>
</tr>
<tr>
<td id="L218" class="blob-num js-line-number" data-line-number="218"></td>
<td id="LC218" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>last</span> <span class=pl-c1>==</span> <span class=pl-c1>2</span>:</td>
</tr>
<tr>
<td id="L219" class="blob-num js-line-number" data-line-number="219"></td>
<td id="LC219" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>kv</span>.<span class=pl-en>append</span>(<span class=pl-s>'0'</span>)</td>
</tr>
<tr>
<td id="L220" class="blob-num js-line-number" data-line-number="220"></td>
<td id="LC220" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>last</span> <span class=pl-c1>-=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L221" class="blob-num js-line-number" data-line-number="221"></td>
<td id="LC221" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>while</span> <span class=pl-s1>last</span> <span class=pl-c1>></span> <span class=pl-c1>0</span>:</td>
</tr>
<tr>
<td id="L222" class="blob-num js-line-number" data-line-number="222"></td>
<td id="LC222" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>char</span> <span class=pl-c1>in</span> <span class=pl-s>"-_"</span>:</td>
</tr>
<tr>
<td id="L223" class="blob-num js-line-number" data-line-number="223"></td>
<td id="LC223" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>kv</span>[<span class=pl-s1>last</span>] <span class=pl-c1>=</span> <span class=pl-s1>kv</span>[<span class=pl-s1>last</span>].<span class=pl-en>split</span>(<span class=pl-s1>char</span>)[<span class=pl-c1>0</span>]</td>
</tr>
<tr>
<td id="L224" class="blob-num js-line-number" data-line-number="224"></td>
<td id="LC224" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L225" class="blob-num js-line-number" data-line-number="225"></td>
<td id="LC225" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>int</span>(<span class=pl-s1>kv</span>[<span class=pl-s1>last</span>])</td>
</tr>
<tr>
<td id="L226" class="blob-num js-line-number" data-line-number="226"></td>
<td id="LC226" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span>:</td>
</tr>
<tr>
<td id="L227" class="blob-num js-line-number" data-line-number="227"></td>
<td id="LC227" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>kv</span>[<span class=pl-s1>last</span>] <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L228" class="blob-num js-line-number" data-line-number="228"></td>
<td id="LC228" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>last</span> <span class=pl-c1>-=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L229" class="blob-num js-line-number" data-line-number="229"></td>
<td id="LC229" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> (<span class=pl-en>int</span>(<span class=pl-s1>kv</span>[<span class=pl-c1>0</span>]), <span class=pl-en>int</span>(<span class=pl-s1>kv</span>[<span class=pl-c1>1</span>]), <span class=pl-en>int</span>(<span class=pl-s1>kv</span>[<span class=pl-c1>2</span>]))</td>
</tr>
<tr>
<td id="L230" class="blob-num js-line-number" data-line-number="230"></td>
<td id="LC230" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L231" class="blob-num js-line-number" data-line-number="231"></td>
<td id="LC231" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L232" class="blob-num js-line-number" data-line-number="232"></td>
<td id="LC232" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#return Private,Shared</span></td>
</tr>
<tr>
<td id="L233" class="blob-num js-line-number" data-line-number="233"></td>
<td id="LC233" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#Note shared is always a subset of rss (trs is not always)</span></td>
</tr>
<tr>
<td id="L234" class="blob-num js-line-number" data-line-number="234"></td>
<td id="LC234" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>getMemStats</span>(<span class=pl-s1>pid</span>):</td>
</tr>
<tr>
<td id="L235" class="blob-num js-line-number" data-line-number="235"></td>
<td id="LC235" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>global</span> <span class=pl-s1>have_pss</span></td>
</tr>
<tr>
<td id="L236" class="blob-num js-line-number" data-line-number="236"></td>
<td id="LC236" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>global</span> <span class=pl-s1>have_swap_pss</span></td>
</tr>
<tr>
<td id="L237" class="blob-num js-line-number" data-line-number="237"></td>
<td id="LC237" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>mem_id</span> <span class=pl-c1>=</span> <span class=pl-s1>pid</span> <span class=pl-c>#unique</span></td>
</tr>
<tr>
<td id="L238" class="blob-num js-line-number" data-line-number="238"></td>
<td id="LC238" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Private_lines</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L239" class="blob-num js-line-number" data-line-number="239"></td>
<td id="LC239" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared_lines</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L240" class="blob-num js-line-number" data-line-number="240"></td>
<td id="LC240" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Pss_lines</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L241" class="blob-num js-line-number" data-line-number="241"></td>
<td id="LC241" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Rss</span> <span class=pl-c1>=</span> (<span class=pl-en>int</span>(<span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'statm'</span>).<span class=pl-en>readline</span>().<span class=pl-en>split</span>()[<span class=pl-c1>1</span>])</td>
</tr>
<tr>
<td id="L242" class="blob-num js-line-number" data-line-number="242"></td>
<td id="LC242" class="blob-code blob-code-inner js-file-line"> <span class=pl-c1>*</span> <span class=pl-v>PAGESIZE</span>)</td>
</tr>
<tr>
<td id="L243" class="blob-num js-line-number" data-line-number="243"></td>
<td id="LC243" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_lines</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L244" class="blob-num js-line-number" data-line-number="244"></td>
<td id="LC244" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_pss_lines</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L245" class="blob-num js-line-number" data-line-number="245"></td>
<td id="LC245" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L246" class="blob-num js-line-number" data-line-number="246"></td>
<td id="LC246" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L247" class="blob-num js-line-number" data-line-number="247"></td>
<td id="LC247" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_pss</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L248" class="blob-num js-line-number" data-line-number="248"></td>
<td id="LC248" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L249" class="blob-num js-line-number" data-line-number="249"></td>
<td id="LC249" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>exists</span>(<span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'smaps'</span>)): <span class=pl-c># stat</span></td>
</tr>
<tr>
<td id="L250" class="blob-num js-line-number" data-line-number="250"></td>
<td id="LC250" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>lines</span> <span class=pl-c1>=</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'smaps'</span>).<span class=pl-en>readlines</span>() <span class=pl-c># open</span></td>
</tr>
<tr>
<td id="L251" class="blob-num js-line-number" data-line-number="251"></td>
<td id="LC251" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Note we checksum smaps as maps is usually but</span></td>
</tr>
<tr>
<td id="L252" class="blob-num js-line-number" data-line-number="252"></td>
<td id="LC252" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># not always different for separate processes.</span></td>
</tr>
<tr>
<td id="L253" class="blob-num js-line-number" data-line-number="253"></td>
<td id="LC253" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>mem_id</span> <span class=pl-c1>=</span> <span class=pl-en>hash</span>(<span class=pl-s>''</span>.<span class=pl-en>join</span>(<span class=pl-s1>lines</span>))</td>
</tr>
<tr>
<td id="L254" class="blob-num js-line-number" data-line-number="254"></td>
<td id="LC254" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-s1>lines</span>:</td>
</tr>
<tr>
<td id="L255" class="blob-num js-line-number" data-line-number="255"></td>
<td id="LC255" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>line</span>.<span class=pl-en>startswith</span>(<span class=pl-s>"Shared"</span>):</td>
</tr>
<tr>
<td id="L256" class="blob-num js-line-number" data-line-number="256"></td>
<td id="LC256" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared_lines</span>.<span class=pl-en>append</span>(<span class=pl-s1>line</span>)</td>
</tr>
<tr>
<td id="L257" class="blob-num js-line-number" data-line-number="257"></td>
<td id="LC257" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>line</span>.<span class=pl-en>startswith</span>(<span class=pl-s>"Private"</span>):</td>
</tr>
<tr>
<td id="L258" class="blob-num js-line-number" data-line-number="258"></td>
<td id="LC258" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Private_lines</span>.<span class=pl-en>append</span>(<span class=pl-s1>line</span>)</td>
</tr>
<tr>
<td id="L259" class="blob-num js-line-number" data-line-number="259"></td>
<td id="LC259" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>line</span>.<span class=pl-en>startswith</span>(<span class=pl-s>"Pss"</span>):</td>
</tr>
<tr>
<td id="L260" class="blob-num js-line-number" data-line-number="260"></td>
<td id="LC260" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>have_pss</span> <span class=pl-c1>=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L261" class="blob-num js-line-number" data-line-number="261"></td>
<td id="LC261" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Pss_lines</span>.<span class=pl-en>append</span>(<span class=pl-s1>line</span>)</td>
</tr>
<tr>
<td id="L262" class="blob-num js-line-number" data-line-number="262"></td>
<td id="LC262" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>line</span>.<span class=pl-en>startswith</span>(<span class=pl-s>"Swap:"</span>):</td>
</tr>
<tr>
<td id="L263" class="blob-num js-line-number" data-line-number="263"></td>
<td id="LC263" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_lines</span>.<span class=pl-en>append</span>(<span class=pl-s1>line</span>)</td>
</tr>
<tr>
<td id="L264" class="blob-num js-line-number" data-line-number="264"></td>
<td id="LC264" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>line</span>.<span class=pl-en>startswith</span>(<span class=pl-s>"SwapPss:"</span>):</td>
</tr>
<tr>
<td id="L265" class="blob-num js-line-number" data-line-number="265"></td>
<td id="LC265" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>have_swap_pss</span> <span class=pl-c1>=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L266" class="blob-num js-line-number" data-line-number="266"></td>
<td id="LC266" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_pss_lines</span>.<span class=pl-en>append</span>(<span class=pl-s1>line</span>)</td>
</tr>
<tr>
<td id="L267" class="blob-num js-line-number" data-line-number="267"></td>
<td id="LC267" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared</span> <span class=pl-c1>=</span> <span class=pl-en>sum</span>([<span class=pl-en>int</span>(<span class=pl-s1>line</span>.<span class=pl-en>split</span>()[<span class=pl-c1>1</span>]) <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-v>Shared_lines</span>])</td>
</tr>
<tr>
<td id="L268" class="blob-num js-line-number" data-line-number="268"></td>
<td id="LC268" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Private</span> <span class=pl-c1>=</span> <span class=pl-en>sum</span>([<span class=pl-en>int</span>(<span class=pl-s1>line</span>.<span class=pl-en>split</span>()[<span class=pl-c1>1</span>]) <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-v>Private_lines</span>])</td>
</tr>
<tr>
<td id="L269" class="blob-num js-line-number" data-line-number="269"></td>
<td id="LC269" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#Note Shared + Private = Rss above</span></td>
</tr>
<tr>
<td id="L270" class="blob-num js-line-number" data-line-number="270"></td>
<td id="LC270" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#The Rss in smaps includes video card mem etc.</span></td>
</tr>
<tr>
<td id="L271" class="blob-num js-line-number" data-line-number="271"></td>
<td id="LC271" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_pss</span>:</td>
</tr>
<tr>
<td id="L272" class="blob-num js-line-number" data-line-number="272"></td>
<td id="LC272" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pss_adjust</span> <span class=pl-c1>=</span> <span class=pl-c1>0.5</span> <span class=pl-c># add 0.5KiB as this avg error due to truncation</span></td>
</tr>
<tr>
<td id="L273" class="blob-num js-line-number" data-line-number="273"></td>
<td id="LC273" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Pss</span> <span class=pl-c1>=</span> <span class=pl-en>sum</span>([<span class=pl-en>float</span>(<span class=pl-s1>line</span>.<span class=pl-en>split</span>()[<span class=pl-c1>1</span>])<span class=pl-c1>+</span><span class=pl-s1>pss_adjust</span> <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-v>Pss_lines</span>])</td>
</tr>
<tr>
<td id="L274" class="blob-num js-line-number" data-line-number="274"></td>
<td id="LC274" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared</span> <span class=pl-c1>=</span> <span class=pl-v>Pss</span> <span class=pl-c1>-</span> <span class=pl-v>Private</span></td>
</tr>
<tr>
<td id="L275" class="blob-num js-line-number" data-line-number="275"></td>
<td id="LC275" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Note that Swap = Private swap + Shared swap.</span></td>
</tr>
<tr>
<td id="L276" class="blob-num js-line-number" data-line-number="276"></td>
<td id="LC276" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap</span> <span class=pl-c1>=</span> <span class=pl-en>sum</span>([<span class=pl-en>int</span>(<span class=pl-s1>line</span>.<span class=pl-en>split</span>()[<span class=pl-c1>1</span>]) <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-v>Swap_lines</span>])</td>
</tr>
<tr>
<td id="L277" class="blob-num js-line-number" data-line-number="277"></td>
<td id="LC277" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L278" class="blob-num js-line-number" data-line-number="278"></td>
<td id="LC278" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># The kernel supports SwapPss, that shows proportional swap share.</span></td>
</tr>
<tr>
<td id="L279" class="blob-num js-line-number" data-line-number="279"></td>
<td id="LC279" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Note that Swap - SwapPss is not Private Swap.</span></td>
</tr>
<tr>
<td id="L280" class="blob-num js-line-number" data-line-number="280"></td>
<td id="LC280" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Swap_pss</span> <span class=pl-c1>=</span> <span class=pl-en>sum</span>([<span class=pl-en>int</span>(<span class=pl-s1>line</span>.<span class=pl-en>split</span>()[<span class=pl-c1>1</span>]) <span class=pl-k>for</span> <span class=pl-s1>line</span> <span class=pl-c1>in</span> <span class=pl-v>Swap_pss_lines</span>])</td>
</tr>
<tr>
<td id="L281" class="blob-num js-line-number" data-line-number="281"></td>
<td id="LC281" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> (<span class=pl-c1>2</span>,<span class=pl-c1>6</span>,<span class=pl-c1>1</span>) <span class=pl-c1><=</span> <span class=pl-en>kernel_ver</span>() <span class=pl-c1><=</span> (<span class=pl-c1>2</span>,<span class=pl-c1>6</span>,<span class=pl-c1>9</span>):</td>
</tr>
<tr>
<td id="L282" class="blob-num js-line-number" data-line-number="282"></td>
<td id="LC282" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span> <span class=pl-c>#lots of overestimation, but what can we do?</span></td>
</tr>
<tr>
<td id="L283" class="blob-num js-line-number" data-line-number="283"></td>
<td id="LC283" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Private</span> <span class=pl-c1>=</span> <span class=pl-v>Rss</span></td>
</tr>
<tr>
<td id="L284" class="blob-num js-line-number" data-line-number="284"></td>
<td id="LC284" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L285" class="blob-num js-line-number" data-line-number="285"></td>
<td id="LC285" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared</span> <span class=pl-c1>=</span> <span class=pl-en>int</span>(<span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'statm'</span>).<span class=pl-en>readline</span>().<span class=pl-en>split</span>()[<span class=pl-c1>2</span>])</td>
</tr>
<tr>
<td id="L286" class="blob-num js-line-number" data-line-number="286"></td>
<td id="LC286" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Shared</span> <span class=pl-c1>*=</span> <span class=pl-v>PAGESIZE</span></td>
</tr>
<tr>
<td id="L287" class="blob-num js-line-number" data-line-number="287"></td>
<td id="LC287" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Private</span> <span class=pl-c1>=</span> <span class=pl-v>Rss</span> <span class=pl-c1>-</span> <span class=pl-v>Shared</span></td>
</tr>
<tr>
<td id="L288" class="blob-num js-line-number" data-line-number="288"></td>
<td id="LC288" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> (<span class=pl-v>Private</span>, <span class=pl-v>Shared</span>, <span class=pl-s1>mem_id</span>, <span class=pl-v>Swap</span>, <span class=pl-v>Swap_pss</span>)</td>
</tr>
<tr>
<td id="L289" class="blob-num js-line-number" data-line-number="289"></td>
<td id="LC289" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L290" class="blob-num js-line-number" data-line-number="290"></td>
<td id="LC290" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L291" class="blob-num js-line-number" data-line-number="291"></td>
<td id="LC291" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>getCmdName</span>(<span class=pl-s1>pid</span>, <span class=pl-s1>split_args</span>, <span class=pl-s1>discriminate_by_pid</span>):</td>
</tr>
<tr>
<td id="L292" class="blob-num js-line-number" data-line-number="292"></td>
<td id="LC292" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmdline</span> <span class=pl-c1>=</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'cmdline'</span>).<span class=pl-en>read</span>().<span class=pl-en>split</span>(<span class=pl-s>"\0"</span>)</td>
</tr>
<tr>
<td id="L293" class="blob-num js-line-number" data-line-number="293"></td>
<td id="LC293" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>cmdline</span>[<span class=pl-c1>-</span><span class=pl-c1>1</span>] <span class=pl-c1>==</span> <span class=pl-s>''</span> <span class=pl-c1>and</span> <span class=pl-en>len</span>(<span class=pl-s1>cmdline</span>) <span class=pl-c1>></span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L294" class="blob-num js-line-number" data-line-number="294"></td>
<td id="LC294" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmdline</span> <span class=pl-c1>=</span> <span class=pl-s1>cmdline</span>[:<span class=pl-c1>-</span><span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L295" class="blob-num js-line-number" data-line-number="295"></td>
<td id="LC295" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L296" class="blob-num js-line-number" data-line-number="296"></td>
<td id="LC296" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'exe'</span>)</td>
</tr>
<tr>
<td id="L297" class="blob-num js-line-number" data-line-number="297"></td>
<td id="LC297" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L298" class="blob-num js-line-number" data-line-number="298"></td>
<td id="LC298" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>readlink</span>(<span class=pl-s1>path</span>)</td>
</tr>
<tr>
<td id="L299" class="blob-num js-line-number" data-line-number="299"></td>
<td id="LC299" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Some symlink targets were seen to contain NULs on RHEL 5 at least</span></td>
</tr>
<tr>
<td id="L300" class="blob-num js-line-number" data-line-number="300"></td>
<td id="LC300" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># https://github.com/pixelb/scripts/pull/10, so take string up to NUL</span></td>
</tr>
<tr>
<td id="L301" class="blob-num js-line-number" data-line-number="301"></td>
<td id="LC301" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>path</span>.<span class=pl-en>split</span>(<span class=pl-s>'\0'</span>)[<span class=pl-c1>0</span>]</td>
</tr>
<tr>
<td id="L302" class="blob-num js-line-number" data-line-number="302"></td>
<td id="LC302" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> <span class=pl-v>OSError</span>:</td>
</tr>
<tr>
<td id="L303" class="blob-num js-line-number" data-line-number="303"></td>
<td id="LC303" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>val</span> <span class=pl-c1>=</span> <span class=pl-s1>sys</span>.<span class=pl-en>exc_info</span>()[<span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L304" class="blob-num js-line-number" data-line-number="304"></td>
<td id="LC304" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> (<span class=pl-s1>val</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>ENOENT</span> <span class=pl-c1>or</span> <span class=pl-c># either kernel thread or process gone</span></td>
</tr>
<tr>
<td id="L305" class="blob-num js-line-number" data-line-number="305"></td>
<td id="LC305" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>val</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>EPERM</span>):</td>
</tr>
<tr>
<td id="L306" class="blob-num js-line-number" data-line-number="306"></td>
<td id="LC306" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>raise</span> <span class=pl-v>LookupError</span></td>
</tr>
<tr>
<td id="L307" class="blob-num js-line-number" data-line-number="307"></td>
<td id="LC307" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>raise</span></td>
</tr>
<tr>
<td id="L308" class="blob-num js-line-number" data-line-number="308"></td>
<td id="LC308" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L309" class="blob-num js-line-number" data-line-number="309"></td>
<td id="LC309" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>split_args</span>:</td>
</tr>
<tr>
<td id="L310" class="blob-num js-line-number" data-line-number="310"></td>
<td id="LC310" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s>" "</span>.<span class=pl-en>join</span>(<span class=pl-s1>cmdline</span>)</td>
</tr>
<tr>
<td id="L311" class="blob-num js-line-number" data-line-number="311"></td>
<td id="LC311" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>path</span>.<span class=pl-en>endswith</span>(<span class=pl-s>" (deleted)"</span>):</td>
</tr>
<tr>
<td id="L312" class="blob-num js-line-number" data-line-number="312"></td>
<td id="LC312" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>path</span>[:<span class=pl-c1>-</span><span class=pl-c1>10</span>]</td>
</tr>
<tr>
<td id="L313" class="blob-num js-line-number" data-line-number="313"></td>
<td id="LC313" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>exists</span>(<span class=pl-s1>path</span>):</td>
</tr>
<tr>
<td id="L314" class="blob-num js-line-number" data-line-number="314"></td>
<td id="LC314" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>+=</span> <span class=pl-s>" [updated]"</span></td>
</tr>
<tr>
<td id="L315" class="blob-num js-line-number" data-line-number="315"></td>
<td id="LC315" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L316" class="blob-num js-line-number" data-line-number="316"></td>
<td id="LC316" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#The path could be have prelink stuff so try cmdline</span></td>
</tr>
<tr>
<td id="L317" class="blob-num js-line-number" data-line-number="317"></td>
<td id="LC317" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#which might have the full path present. This helped for:</span></td>
</tr>
<tr>
<td id="L318" class="blob-num js-line-number" data-line-number="318"></td>
<td id="LC318" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)</span></td>
</tr>
<tr>
<td id="L319" class="blob-num js-line-number" data-line-number="319"></td>
<td id="LC319" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>exists</span>(<span class=pl-s1>cmdline</span>[<span class=pl-c1>0</span>]):</td>
</tr>
<tr>
<td id="L320" class="blob-num js-line-number" data-line-number="320"></td>
<td id="LC320" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>cmdline</span>[<span class=pl-c1>0</span>] <span class=pl-c1>+</span> <span class=pl-s>" [updated]"</span></td>
</tr>
<tr>
<td id="L321" class="blob-num js-line-number" data-line-number="321"></td>
<td id="LC321" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L322" class="blob-num js-line-number" data-line-number="322"></td>
<td id="LC322" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>+=</span> <span class=pl-s>" [deleted]"</span></td>
</tr>
<tr>
<td id="L323" class="blob-num js-line-number" data-line-number="323"></td>
<td id="LC323" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>exe</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>basename</span>(<span class=pl-s1>path</span>)</td>
</tr>
<tr>
<td id="L324" class="blob-num js-line-number" data-line-number="324"></td>
<td id="LC324" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd</span> <span class=pl-c1>=</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'status'</span>).<span class=pl-en>readline</span>()[<span class=pl-c1>6</span>:<span class=pl-c1>-</span><span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L325" class="blob-num js-line-number" data-line-number="325"></td>
<td id="LC325" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>exe</span>.<span class=pl-en>startswith</span>(<span class=pl-s1>cmd</span>):</td>
</tr>
<tr>
<td id="L326" class="blob-num js-line-number" data-line-number="326"></td>
<td id="LC326" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd</span> <span class=pl-c1>=</span> <span class=pl-s1>exe</span> <span class=pl-c>#show non truncated version</span></td>
</tr>
<tr>
<td id="L327" class="blob-num js-line-number" data-line-number="327"></td>
<td id="LC327" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#Note because we show the non truncated name</span></td>
</tr>
<tr>
<td id="L328" class="blob-num js-line-number" data-line-number="328"></td>
<td id="LC328" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#one can have separated programs as follows:</span></td>
</tr>
<tr>
<td id="L329" class="blob-num js-line-number" data-line-number="329"></td>
<td id="LC329" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)</span></td>
</tr>
<tr>
<td id="L330" class="blob-num js-line-number" data-line-number="330"></td>
<td id="LC330" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin</span></td>
</tr>
<tr>
<td id="L331" class="blob-num js-line-number" data-line-number="331"></td>
<td id="LC331" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>sys</span>.<span class=pl-s1>version_info</span> <span class=pl-c1>>=</span> (<span class=pl-c1>3</span>,):</td>
</tr>
<tr>
<td id="L332" class="blob-num js-line-number" data-line-number="332"></td>
<td id="LC332" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd</span> <span class=pl-c1>=</span> <span class=pl-s1>cmd</span>.<span class=pl-en>encode</span>(<span class=pl-s1>errors</span><span class=pl-c1>=</span><span class=pl-s>'replace'</span>).<span class=pl-en>decode</span>()</td>
</tr>
<tr>
<td id="L333" class="blob-num js-line-number" data-line-number="333"></td>
<td id="LC333" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>discriminate_by_pid</span>:</td>
</tr>
<tr>
<td id="L334" class="blob-num js-line-number" data-line-number="334"></td>
<td id="LC334" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd</span> <span class=pl-c1>=</span> <span class=pl-s>'%s [%d]'</span> <span class=pl-c1>%</span> (<span class=pl-s1>cmd</span>, <span class=pl-s1>pid</span>)</td>
</tr>
<tr>
<td id="L335" class="blob-num js-line-number" data-line-number="335"></td>
<td id="LC335" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>cmd</span></td>
</tr>
<tr>
<td id="L336" class="blob-num js-line-number" data-line-number="336"></td>
<td id="LC336" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L337" class="blob-num js-line-number" data-line-number="337"></td>
<td id="LC337" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L338" class="blob-num js-line-number" data-line-number="338"></td>
<td id="LC338" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#The following matches "du -h" output</span></td>
</tr>
<tr>
<td id="L339" class="blob-num js-line-number" data-line-number="339"></td>
<td id="LC339" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#see also human.py</span></td>
</tr>
<tr>
<td id="L340" class="blob-num js-line-number" data-line-number="340"></td>
<td id="LC340" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>human</span>(<span class=pl-s1>num</span>, <span class=pl-s1>power</span><span class=pl-c1>=</span><span class=pl-s>"Ki"</span>, <span class=pl-s1>units</span><span class=pl-c1>=</span><span class=pl-c1>None</span>):</td>
</tr>
<tr>
<td id="L341" class="blob-num js-line-number" data-line-number="341"></td>
<td id="LC341" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>units</span> <span class=pl-c1>is</span> <span class=pl-c1>None</span>:</td>
</tr>
<tr>
<td id="L342" class="blob-num js-line-number" data-line-number="342"></td>
<td id="LC342" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>powers</span> <span class=pl-c1>=</span> [<span class=pl-s>"Ki"</span>, <span class=pl-s>"Mi"</span>, <span class=pl-s>"Gi"</span>, <span class=pl-s>"Ti"</span>]</td>
</tr>
<tr>
<td id="L343" class="blob-num js-line-number" data-line-number="343"></td>
<td id="LC343" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>while</span> <span class=pl-s1>num</span> <span class=pl-c1>>=</span> <span class=pl-c1>1000</span>: <span class=pl-c>#4 digits</span></td>
</tr>
<tr>
<td id="L344" class="blob-num js-line-number" data-line-number="344"></td>
<td id="LC344" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>num</span> <span class=pl-c1>/=</span> <span class=pl-c1>1024.0</span></td>
</tr>
<tr>
<td id="L345" class="blob-num js-line-number" data-line-number="345"></td>
<td id="LC345" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>power</span> <span class=pl-c1>=</span> <span class=pl-s1>powers</span>[<span class=pl-s1>powers</span>.<span class=pl-en>index</span>(<span class=pl-s1>power</span>)<span class=pl-c1>+</span><span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L346" class="blob-num js-line-number" data-line-number="346"></td>
<td id="LC346" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s>"%.1f %sB"</span> <span class=pl-c1>%</span> (<span class=pl-s1>num</span>, <span class=pl-s1>power</span>)</td>
</tr>
<tr>
<td id="L347" class="blob-num js-line-number" data-line-number="347"></td>
<td id="LC347" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L348" class="blob-num js-line-number" data-line-number="348"></td>
<td id="LC348" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s>"%.f"</span> <span class=pl-c1>%</span> ((<span class=pl-s1>num</span> <span class=pl-c1>*</span> <span class=pl-c1>1024</span>) <span class=pl-c1>/</span> <span class=pl-s1>units</span>)</td>
</tr>
<tr>
<td id="L349" class="blob-num js-line-number" data-line-number="349"></td>
<td id="LC349" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L350" class="blob-num js-line-number" data-line-number="350"></td>
<td id="LC350" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L351" class="blob-num js-line-number" data-line-number="351"></td>
<td id="LC351" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>cmd_with_count</span>(<span class=pl-s1>cmd</span>, <span class=pl-s1>count</span>):</td>
</tr>
<tr>
<td id="L352" class="blob-num js-line-number" data-line-number="352"></td>
<td id="LC352" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>count</span> <span class=pl-c1>></span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L353" class="blob-num js-line-number" data-line-number="353"></td>
<td id="LC353" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s>"%s (%u)"</span> <span class=pl-c1>%</span> (<span class=pl-s1>cmd</span>, <span class=pl-s1>count</span>)</td>
</tr>
<tr>
<td id="L354" class="blob-num js-line-number" data-line-number="354"></td>
<td id="LC354" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L355" class="blob-num js-line-number" data-line-number="355"></td>
<td id="LC355" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>cmd</span></td>
</tr>
<tr>
<td id="L356" class="blob-num js-line-number" data-line-number="356"></td>
<td id="LC356" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L357" class="blob-num js-line-number" data-line-number="357"></td>
<td id="LC357" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#Warn of possible inaccuracies</span></td>
</tr>
<tr>
<td id="L358" class="blob-num js-line-number" data-line-number="358"></td>
<td id="LC358" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#2 = accurate & can total</span></td>
</tr>
<tr>
<td id="L359" class="blob-num js-line-number" data-line-number="359"></td>
<td id="LC359" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#1 = accurate only considering each process in isolation</span></td>
</tr>
<tr>
<td id="L360" class="blob-num js-line-number" data-line-number="360"></td>
<td id="LC360" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#0 = some shared mem not reported</span></td>
</tr>
<tr>
<td id="L361" class="blob-num js-line-number" data-line-number="361"></td>
<td id="LC361" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#-1= all shared mem not reported</span></td>
</tr>
<tr>
<td id="L362" class="blob-num js-line-number" data-line-number="362"></td>
<td id="LC362" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>shared_val_accuracy</span>():</td>
</tr>
<tr>
<td id="L363" class="blob-num js-line-number" data-line-number="363"></td>
<td id="LC363" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""</span></td>
</tr>
<tr>
<td id="L364" class="blob-num js-line-number" data-line-number="364"></td>
<td id="LC364" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>kv</span> <span class=pl-c1>=</span> <span class=pl-en>kernel_ver</span>()</td>
</tr>
<tr>
<td id="L365" class="blob-num js-line-number" data-line-number="365"></td>
<td id="LC365" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pid</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>getpid</span>()</td>
</tr>
<tr>
<td id="L366" class="blob-num js-line-number" data-line-number="366"></td>
<td id="LC366" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>kv</span>[:<span class=pl-c1>2</span>] <span class=pl-c1>==</span> (<span class=pl-c1>2</span>,<span class=pl-c1>4</span>):</td>
</tr>
<tr>
<td id="L367" class="blob-num js-line-number" data-line-number="367"></td>
<td id="LC367" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s>'meminfo'</span>).<span class=pl-en>read</span>().<span class=pl-en>find</span>(<span class=pl-s>"Inact_"</span>) <span class=pl-c1>==</span> <span class=pl-c1>-</span><span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L368" class="blob-num js-line-number" data-line-number="368"></td>
<td id="LC368" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L369" class="blob-num js-line-number" data-line-number="369"></td>
<td id="LC369" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L370" class="blob-num js-line-number" data-line-number="370"></td>
<td id="LC370" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>kv</span>[:<span class=pl-c1>2</span>] <span class=pl-c1>==</span> (<span class=pl-c1>2</span>,<span class=pl-c1>6</span>):</td>
</tr>
<tr>
<td id="L371" class="blob-num js-line-number" data-line-number="371"></td>
<td id="LC371" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>exists</span>(<span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'smaps'</span>)):</td>
</tr>
<tr>
<td id="L372" class="blob-num js-line-number" data-line-number="372"></td>
<td id="LC372" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>proc</span>.<span class=pl-en>open</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'smaps'</span>).<span class=pl-en>read</span>().<span class=pl-en>find</span>(<span class=pl-s>"Pss:"</span>)<span class=pl-c1>!=</span><span class=pl-c1>-</span><span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L373" class="blob-num js-line-number" data-line-number="373"></td>
<td id="LC373" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>2</span></td>
</tr>
<tr>
<td id="L374" class="blob-num js-line-number" data-line-number="374"></td>
<td id="LC374" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L375" class="blob-num js-line-number" data-line-number="375"></td>
<td id="LC375" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L376" class="blob-num js-line-number" data-line-number="376"></td>
<td id="LC376" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> (<span class=pl-c1>2</span>,<span class=pl-c1>6</span>,<span class=pl-c1>1</span>) <span class=pl-c1><=</span> <span class=pl-s1>kv</span> <span class=pl-c1><=</span> (<span class=pl-c1>2</span>,<span class=pl-c1>6</span>,<span class=pl-c1>9</span>):</td>
</tr>
<tr>
<td id="L377" class="blob-num js-line-number" data-line-number="377"></td>
<td id="LC377" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>-</span><span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L378" class="blob-num js-line-number" data-line-number="378"></td>
<td id="LC378" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L379" class="blob-num js-line-number" data-line-number="379"></td>
<td id="LC379" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>kv</span>[<span class=pl-c1>0</span>] <span class=pl-c1>></span> <span class=pl-c1>2</span> <span class=pl-c1>and</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>exists</span>(<span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s1>pid</span>, <span class=pl-s>'smaps'</span>)):</td>
</tr>
<tr>
<td id="L380" class="blob-num js-line-number" data-line-number="380"></td>
<td id="LC380" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>2</span></td>
</tr>
<tr>
<td id="L381" class="blob-num js-line-number" data-line-number="381"></td>
<td id="LC381" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L382" class="blob-num js-line-number" data-line-number="382"></td>
<td id="LC382" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L383" class="blob-num js-line-number" data-line-number="383"></td>
<td id="LC383" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L384" class="blob-num js-line-number" data-line-number="384"></td>
<td id="LC384" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>show_shared_val_accuracy</span>( <span class=pl-s1>possible_inacc</span>, <span class=pl-s1>only_total</span><span class=pl-c1>=</span><span class=pl-c1>False</span> ):</td>
</tr>
<tr>
<td id="L385" class="blob-num js-line-number" data-line-number="385"></td>
<td id="LC385" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>level</span> <span class=pl-c1>=</span> (<span class=pl-s>"Warning"</span>,<span class=pl-s>"Error"</span>)[<span class=pl-s1>only_total</span>]</td>
</tr>
<tr>
<td id="L386" class="blob-num js-line-number" data-line-number="386"></td>
<td id="LC386" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>possible_inacc</span> <span class=pl-c1>==</span> <span class=pl-c1>-</span><span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L387" class="blob-num js-line-number" data-line-number="387"></td>
<td id="LC387" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L388" class="blob-num js-line-number" data-line-number="388"></td>
<td id="LC388" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"%s: Shared memory is not reported by this system.<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span> <span class=pl-s1>level</span></td>
</tr>
<tr>
<td id="L389" class="blob-num js-line-number" data-line-number="389"></td>
<td id="LC389" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L390" class="blob-num js-line-number" data-line-number="390"></td>
<td id="LC390" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L391" class="blob-num js-line-number" data-line-number="391"></td>
<td id="LC391" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"Values reported will be too large, and totals are not reported<span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L392" class="blob-num js-line-number" data-line-number="392"></td>
<td id="LC392" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L393" class="blob-num js-line-number" data-line-number="393"></td>
<td id="LC393" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>possible_inacc</span> <span class=pl-c1>==</span> <span class=pl-c1>0</span>:</td>
</tr>
<tr>
<td id="L394" class="blob-num js-line-number" data-line-number="394"></td>
<td id="LC394" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L395" class="blob-num js-line-number" data-line-number="395"></td>
<td id="LC395" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"%s: Shared memory is not reported accurately by this system.<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span> <span class=pl-s1>level</span></td>
</tr>
<tr>
<td id="L396" class="blob-num js-line-number" data-line-number="396"></td>
<td id="LC396" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L397" class="blob-num js-line-number" data-line-number="397"></td>
<td id="LC397" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L398" class="blob-num js-line-number" data-line-number="398"></td>
<td id="LC398" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"Values reported could be too large, and totals are not reported<span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L399" class="blob-num js-line-number" data-line-number="399"></td>
<td id="LC399" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L400" class="blob-num js-line-number" data-line-number="400"></td>
<td id="LC400" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>possible_inacc</span> <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L401" class="blob-num js-line-number" data-line-number="401"></td>
<td id="LC401" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L402" class="blob-num js-line-number" data-line-number="402"></td>
<td id="LC402" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"%s: Shared memory is slightly over-estimated by this system<span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L403" class="blob-num js-line-number" data-line-number="403"></td>
<td id="LC403" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"for each program, so totals are not reported.<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span> <span class=pl-s1>level</span></td>
</tr>
<tr>
<td id="L404" class="blob-num js-line-number" data-line-number="404"></td>
<td id="LC404" class="blob-code blob-code-inner js-file-line"> )</td>
</tr>
<tr>
<td id="L405" class="blob-num js-line-number" data-line-number="405"></td>
<td id="LC405" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>close</span>()</td>
</tr>
<tr>
<td id="L406" class="blob-num js-line-number" data-line-number="406"></td>
<td id="LC406" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>only_total</span> <span class=pl-c1>and</span> <span class=pl-s1>possible_inacc</span> <span class=pl-c1>!=</span> <span class=pl-c1>2</span>:</td>
</tr>
<tr>
<td id="L407" class="blob-num js-line-number" data-line-number="407"></td>
<td id="LC407" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>1</span>)</td>
</tr>
<tr>
<td id="L408" class="blob-num js-line-number" data-line-number="408"></td>
<td id="LC408" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L409" class="blob-num js-line-number" data-line-number="409"></td>
<td id="LC409" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L410" class="blob-num js-line-number" data-line-number="410"></td>
<td id="LC410" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>get_memory_usage</span>(<span class=pl-s1>pids_to_show</span>, <span class=pl-s1>split_args</span>, <span class=pl-s1>discriminate_by_pid</span>,</td>
</tr>
<tr>
<td id="L411" class="blob-num js-line-number" data-line-number="411"></td>
<td id="LC411" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>include_self</span><span class=pl-c1>=</span><span class=pl-c1>False</span>, <span class=pl-s1>only_self</span><span class=pl-c1>=</span><span class=pl-c1>False</span>):</td>
</tr>
<tr>
<td id="L412" class="blob-num js-line-number" data-line-number="412"></td>
<td id="LC412" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmds</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L413" class="blob-num js-line-number" data-line-number="413"></td>
<td id="LC413" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shareds</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L414" class="blob-num js-line-number" data-line-number="414"></td>
<td id="LC414" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>mem_ids</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L415" class="blob-num js-line-number" data-line-number="415"></td>
<td id="LC415" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L416" class="blob-num js-line-number" data-line-number="416"></td>
<td id="LC416" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>swaps</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L417" class="blob-num js-line-number" data-line-number="417"></td>
<td id="LC417" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shared_swaps</span> <span class=pl-c1>=</span> {}</td>
</tr>
<tr>
<td id="L418" class="blob-num js-line-number" data-line-number="418"></td>
<td id="LC418" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>pid</span> <span class=pl-c1>in</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s>''</span>)):</td>
</tr>
<tr>
<td id="L419" class="blob-num js-line-number" data-line-number="419"></td>
<td id="LC419" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-c1>not</span> <span class=pl-s1>pid</span>.<span class=pl-en>isdigit</span>():</td>
</tr>
<tr>
<td id="L420" class="blob-num js-line-number" data-line-number="420"></td>
<td id="LC420" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span></td>
</tr>
<tr>
<td id="L421" class="blob-num js-line-number" data-line-number="421"></td>
<td id="LC421" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>pid</span> <span class=pl-c1>=</span> <span class=pl-en>int</span>(<span class=pl-s1>pid</span>)</td>
</tr>
<tr>
<td id="L422" class="blob-num js-line-number" data-line-number="422"></td>
<td id="LC422" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L423" class="blob-num js-line-number" data-line-number="423"></td>
<td id="LC423" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Some filters</span></td>
</tr>
<tr>
<td id="L424" class="blob-num js-line-number" data-line-number="424"></td>
<td id="LC424" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>only_self</span> <span class=pl-c1>and</span> <span class=pl-s1>pid</span> <span class=pl-c1>!=</span> <span class=pl-s1>our_pid</span>:</td>
</tr>
<tr>
<td id="L425" class="blob-num js-line-number" data-line-number="425"></td>
<td id="LC425" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span></td>
</tr>
<tr>
<td id="L426" class="blob-num js-line-number" data-line-number="426"></td>
<td id="LC426" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>pid</span> <span class=pl-c1>==</span> <span class=pl-s1>our_pid</span> <span class=pl-c1>and</span> <span class=pl-c1>not</span> <span class=pl-s1>include_self</span>:</td>
</tr>
<tr>
<td id="L427" class="blob-num js-line-number" data-line-number="427"></td>
<td id="LC427" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span></td>
</tr>
<tr>
<td id="L428" class="blob-num js-line-number" data-line-number="428"></td>
<td id="LC428" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>pids_to_show</span> <span class=pl-c1>is</span> <span class=pl-c1>not</span> <span class=pl-c1>None</span> <span class=pl-c1>and</span> <span class=pl-s1>pid</span> <span class=pl-c1>not</span> <span class=pl-c1>in</span> <span class=pl-s1>pids_to_show</span>:</td>
</tr>
<tr>
<td id="L429" class="blob-num js-line-number" data-line-number="429"></td>
<td id="LC429" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span></td>
</tr>
<tr>
<td id="L430" class="blob-num js-line-number" data-line-number="430"></td>
<td id="LC430" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L431" class="blob-num js-line-number" data-line-number="431"></td>
<td id="LC431" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L432" class="blob-num js-line-number" data-line-number="432"></td>
<td id="LC432" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd</span> <span class=pl-c1>=</span> <span class=pl-en>getCmdName</span>(<span class=pl-s1>pid</span>, <span class=pl-s1>split_args</span>, <span class=pl-s1>discriminate_by_pid</span>)</td>
</tr>
<tr>
<td id="L433" class="blob-num js-line-number" data-line-number="433"></td>
<td id="LC433" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> <span class=pl-v>LookupError</span>:</td>
</tr>
<tr>
<td id="L434" class="blob-num js-line-number" data-line-number="434"></td>
<td id="LC434" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#operation not permitted</span></td>
</tr>
<tr>
<td id="L435" class="blob-num js-line-number" data-line-number="435"></td>
<td id="LC435" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#kernel threads don't have exe links or</span></td>
</tr>
<tr>
<td id="L436" class="blob-num js-line-number" data-line-number="436"></td>
<td id="LC436" class="blob-code blob-code-inner js-file-line"> <span class=pl-c>#process gone</span></td>
</tr>
<tr>
<td id="L437" class="blob-num js-line-number" data-line-number="437"></td>
<td id="LC437" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span></td>
</tr>
<tr>
<td id="L438" class="blob-num js-line-number" data-line-number="438"></td>
<td id="LC438" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L439" class="blob-num js-line-number" data-line-number="439"></td>
<td id="LC439" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L440" class="blob-num js-line-number" data-line-number="440"></td>
<td id="LC440" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>private</span>, <span class=pl-s1>shared</span>, <span class=pl-s1>mem_id</span>, <span class=pl-s1>swap</span>, <span class=pl-s1>swap_pss</span> <span class=pl-c1>=</span> <span class=pl-en>getMemStats</span>(<span class=pl-s1>pid</span>)</td>
</tr>
<tr>
<td id="L441" class="blob-num js-line-number" data-line-number="441"></td>
<td id="LC441" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> <span class=pl-v>RuntimeError</span>:</td>
</tr>
<tr>
<td id="L442" class="blob-num js-line-number" data-line-number="442"></td>
<td id="LC442" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>continue</span> <span class=pl-c>#process gone</span></td>
</tr>
<tr>
<td id="L443" class="blob-num js-line-number" data-line-number="443"></td>
<td id="LC443" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>shareds</span>.<span class=pl-en>get</span>(<span class=pl-s1>cmd</span>):</td>
</tr>
<tr>
<td id="L444" class="blob-num js-line-number" data-line-number="444"></td>
<td id="LC444" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_pss</span>: <span class=pl-c>#add shared portion of PSS together</span></td>
</tr>
<tr>
<td id="L445" class="blob-num js-line-number" data-line-number="445"></td>
<td id="LC445" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>+=</span> <span class=pl-s1>shared</span></td>
</tr>
<tr>
<td id="L446" class="blob-num js-line-number" data-line-number="446"></td>
<td id="LC446" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1><</span> <span class=pl-s1>shared</span>: <span class=pl-c>#just take largest shared val</span></td>
</tr>
<tr>
<td id="L447" class="blob-num js-line-number" data-line-number="447"></td>
<td id="LC447" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>shared</span></td>
</tr>
<tr>
<td id="L448" class="blob-num js-line-number" data-line-number="448"></td>
<td id="LC448" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L449" class="blob-num js-line-number" data-line-number="449"></td>
<td id="LC449" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>shared</span></td>
</tr>
<tr>
<td id="L450" class="blob-num js-line-number" data-line-number="450"></td>
<td id="LC450" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>cmds</span>.<span class=pl-en>setdefault</span>(<span class=pl-s1>cmd</span>, <span class=pl-c1>0</span>) <span class=pl-c1>+</span> <span class=pl-s1>private</span></td>
</tr>
<tr>
<td id="L451" class="blob-num js-line-number" data-line-number="451"></td>
<td id="LC451" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>cmd</span> <span class=pl-c1>in</span> <span class=pl-s1>count</span>:</td>
</tr>
<tr>
<td id="L452" class="blob-num js-line-number" data-line-number="452"></td>
<td id="LC452" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>+=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L453" class="blob-num js-line-number" data-line-number="453"></td>
<td id="LC453" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L454" class="blob-num js-line-number" data-line-number="454"></td>
<td id="LC454" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L455" class="blob-num js-line-number" data-line-number="455"></td>
<td id="LC455" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>mem_ids</span>.<span class=pl-en>setdefault</span>(<span class=pl-s1>cmd</span>, {}).<span class=pl-en>update</span>({<span class=pl-s1>mem_id</span>: <span class=pl-c1>None</span>})</td>
</tr>
<tr>
<td id="L456" class="blob-num js-line-number" data-line-number="456"></td>
<td id="LC456" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L457" class="blob-num js-line-number" data-line-number="457"></td>
<td id="LC457" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Swap (overcounting for now...)</span></td>
</tr>
<tr>
<td id="L458" class="blob-num js-line-number" data-line-number="458"></td>
<td id="LC458" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>swaps</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>swaps</span>.<span class=pl-en>setdefault</span>(<span class=pl-s1>cmd</span>, <span class=pl-c1>0</span>) <span class=pl-c1>+</span> <span class=pl-s1>swap</span></td>
</tr>
<tr>
<td id="L459" class="blob-num js-line-number" data-line-number="459"></td>
<td id="LC459" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L460" class="blob-num js-line-number" data-line-number="460"></td>
<td id="LC460" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shared_swaps</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>shared_swaps</span>.<span class=pl-en>setdefault</span>(<span class=pl-s1>cmd</span>, <span class=pl-c1>0</span>) <span class=pl-c1>+</span> <span class=pl-s1>swap_pss</span></td>
</tr>
<tr>
<td id="L461" class="blob-num js-line-number" data-line-number="461"></td>
<td id="LC461" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L462" class="blob-num js-line-number" data-line-number="462"></td>
<td id="LC462" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shared_swaps</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L463" class="blob-num js-line-number" data-line-number="463"></td>
<td id="LC463" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L464" class="blob-num js-line-number" data-line-number="464"></td>
<td id="LC464" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Total swaped mem for each program</span></td>
</tr>
<tr>
<td id="L465" class="blob-num js-line-number" data-line-number="465"></td>
<td id="LC465" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_swap</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L466" class="blob-num js-line-number" data-line-number="466"></td>
<td id="LC466" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L467" class="blob-num js-line-number" data-line-number="467"></td>
<td id="LC467" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Total swaped shared mem for each program</span></td>
</tr>
<tr>
<td id="L468" class="blob-num js-line-number" data-line-number="468"></td>
<td id="LC468" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_shared_swap</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L469" class="blob-num js-line-number" data-line-number="469"></td>
<td id="LC469" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L470" class="blob-num js-line-number" data-line-number="470"></td>
<td id="LC470" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Add shared mem for each program</span></td>
</tr>
<tr>
<td id="L471" class="blob-num js-line-number" data-line-number="471"></td>
<td id="LC471" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L472" class="blob-num js-line-number" data-line-number="472"></td>
<td id="LC472" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L473" class="blob-num js-line-number" data-line-number="473"></td>
<td id="LC473" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>cmd</span> <span class=pl-c1>in</span> <span class=pl-s1>cmds</span>:</td>
</tr>
<tr>
<td id="L474" class="blob-num js-line-number" data-line-number="474"></td>
<td id="LC474" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmd_count</span> <span class=pl-c1>=</span> <span class=pl-s1>count</span>[<span class=pl-s1>cmd</span>]</td>
</tr>
<tr>
<td id="L475" class="blob-num js-line-number" data-line-number="475"></td>
<td id="LC475" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-en>len</span>(<span class=pl-s1>mem_ids</span>[<span class=pl-s1>cmd</span>]) <span class=pl-c1>==</span> <span class=pl-c1>1</span> <span class=pl-c1>and</span> <span class=pl-s1>cmd_count</span> <span class=pl-c1>></span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L476" class="blob-num js-line-number" data-line-number="476"></td>
<td id="LC476" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># Assume this program is using CLONE_VM without CLONE_THREAD</span></td>
</tr>
<tr>
<td id="L477" class="blob-num js-line-number" data-line-number="477"></td>
<td id="LC477" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># so only account for one of the processes</span></td>
</tr>
<tr>
<td id="L478" class="blob-num js-line-number" data-line-number="478"></td>
<td id="LC478" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>/=</span> <span class=pl-s1>cmd_count</span></td>
</tr>
<tr>
<td id="L479" class="blob-num js-line-number" data-line-number="479"></td>
<td id="LC479" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_pss</span>:</td>
</tr>
<tr>
<td id="L480" class="blob-num js-line-number" data-line-number="480"></td>
<td id="LC480" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>/=</span> <span class=pl-s1>cmd_count</span></td>
</tr>
<tr>
<td id="L481" class="blob-num js-line-number" data-line-number="481"></td>
<td id="LC481" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cmds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>=</span> <span class=pl-s1>cmds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c1>+</span> <span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>]</td>
</tr>
<tr>
<td id="L482" class="blob-num js-line-number" data-line-number="482"></td>
<td id="LC482" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total</span> <span class=pl-c1>+=</span> <span class=pl-s1>cmds</span>[<span class=pl-s1>cmd</span>] <span class=pl-c># valid if PSS available</span></td>
</tr>
<tr>
<td id="L483" class="blob-num js-line-number" data-line-number="483"></td>
<td id="LC483" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_swap</span> <span class=pl-c1>+=</span> <span class=pl-s1>swaps</span>[<span class=pl-s1>cmd</span>]</td>
</tr>
<tr>
<td id="L484" class="blob-num js-line-number" data-line-number="484"></td>
<td id="LC484" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L485" class="blob-num js-line-number" data-line-number="485"></td>
<td id="LC485" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_shared_swap</span> <span class=pl-c1>+=</span> <span class=pl-s1>shared_swaps</span>[<span class=pl-s1>cmd</span>]</td>
</tr>
<tr>
<td id="L486" class="blob-num js-line-number" data-line-number="486"></td>
<td id="LC486" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L487" class="blob-num js-line-number" data-line-number="487"></td>
<td id="LC487" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sorted_cmds</span> <span class=pl-c1>=</span> <span class=pl-en>sorted</span>(<span class=pl-s1>cmds</span>.<span class=pl-en>items</span>(), <span class=pl-s1>key</span><span class=pl-c1>=</span><span class=pl-k>lambda</span> <span class=pl-s1>x</span>:<span class=pl-s1>x</span>[<span class=pl-c1>1</span>])</td>
</tr>
<tr>
<td id="L488" class="blob-num js-line-number" data-line-number="488"></td>
<td id="LC488" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sorted_cmds</span> <span class=pl-c1>=</span> [<span class=pl-s1>x</span> <span class=pl-k>for</span> <span class=pl-s1>x</span> <span class=pl-c1>in</span> <span class=pl-s1>sorted_cmds</span> <span class=pl-k>if</span> <span class=pl-s1>x</span>[<span class=pl-c1>1</span>]]</td>
</tr>
<tr>
<td id="L489" class="blob-num js-line-number" data-line-number="489"></td>
<td id="LC489" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L490" class="blob-num js-line-number" data-line-number="490"></td>
<td id="LC490" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>, <span class=pl-s1>swaps</span>, <span class=pl-s1>shared_swaps</span>, \</td>
</tr>
<tr>
<td id="L491" class="blob-num js-line-number" data-line-number="491"></td>
<td id="LC491" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_swap</span>, <span class=pl-s1>total_shared_swap</span></td>
</tr>
<tr>
<td id="L492" class="blob-num js-line-number" data-line-number="492"></td>
<td id="LC492" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L493" class="blob-num js-line-number" data-line-number="493"></td>
<td id="LC493" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L494" class="blob-num js-line-number" data-line-number="494"></td>
<td id="LC494" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>print_header</span>(<span class=pl-s1>show_swap</span>, <span class=pl-s1>discriminate_by_pid</span>):</td>
</tr>
<tr>
<td id="L495" class="blob-num js-line-number" data-line-number="495"></td>
<td id="LC495" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>=</span> <span class=pl-s>" Private + Shared = RAM used"</span></td>
</tr>
<tr>
<td id="L496" class="blob-num js-line-number" data-line-number="496"></td>
<td id="LC496" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>show_swap</span>:</td>
</tr>
<tr>
<td id="L497" class="blob-num js-line-number" data-line-number="497"></td>
<td id="LC497" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L498" class="blob-num js-line-number" data-line-number="498"></td>
<td id="LC498" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>5</span> <span class=pl-c1>+</span> <span class=pl-s>"Shared Swap"</span></td>
</tr>
<tr>
<td id="L499" class="blob-num js-line-number" data-line-number="499"></td>
<td id="LC499" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>" Swap used"</span></td>
</tr>
<tr>
<td id="L500" class="blob-num js-line-number" data-line-number="500"></td>
<td id="LC500" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>"<span class=pl-cce>\t</span>Program"</span></td>
</tr>
<tr>
<td id="L501" class="blob-num js-line-number" data-line-number="501"></td>
<td id="LC501" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>discriminate_by_pid</span>:</td>
</tr>
<tr>
<td id="L502" class="blob-num js-line-number" data-line-number="502"></td>
<td id="LC502" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>"[pid]"</span></td>
</tr>
<tr>
<td id="L503" class="blob-num js-line-number" data-line-number="503"></td>
<td id="LC503" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>"<span class=pl-cce>\n</span><span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L504" class="blob-num js-line-number" data-line-number="504"></td>
<td id="LC504" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s1>output_string</span>)</td>
</tr>
<tr>
<td id="L505" class="blob-num js-line-number" data-line-number="505"></td>
<td id="LC505" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L506" class="blob-num js-line-number" data-line-number="506"></td>
<td id="LC506" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L507" class="blob-num js-line-number" data-line-number="507"></td>
<td id="LC507" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>print_memory_usage</span>(<span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>, <span class=pl-s1>swaps</span>, <span class=pl-s1>total_swap</span>,</td>
</tr>
<tr>
<td id="L508" class="blob-num js-line-number" data-line-number="508"></td>
<td id="LC508" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shared_swaps</span>, <span class=pl-s1>total_shared_swap</span>, <span class=pl-s1>show_swap</span>):</td>
</tr>
<tr>
<td id="L509" class="blob-num js-line-number" data-line-number="509"></td>
<td id="LC509" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>cmd</span> <span class=pl-c1>in</span> <span class=pl-s1>sorted_cmds</span>:</td>
</tr>
<tr>
<td id="L510" class="blob-num js-line-number" data-line-number="510"></td>
<td id="LC510" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L511" class="blob-num js-line-number" data-line-number="511"></td>
<td id="LC511" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>=</span> <span class=pl-s>"%9s + %9s = %9s"</span></td>
</tr>
<tr>
<td id="L512" class="blob-num js-line-number" data-line-number="512"></td>
<td id="LC512" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_data</span> <span class=pl-c1>=</span> (<span class=pl-en>human</span>(<span class=pl-s1>cmd</span>[<span class=pl-c1>1</span>]<span class=pl-c1>-</span><span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>]]),</td>
</tr>
<tr>
<td id="L513" class="blob-num js-line-number" data-line-number="513"></td>
<td id="LC513" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>human</span>(<span class=pl-s1>shareds</span>[<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>]]), <span class=pl-en>human</span>(<span class=pl-s1>cmd</span>[<span class=pl-c1>1</span>]))</td>
</tr>
<tr>
<td id="L514" class="blob-num js-line-number" data-line-number="514"></td>
<td id="LC514" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>show_swap</span>:</td>
</tr>
<tr>
<td id="L515" class="blob-num js-line-number" data-line-number="515"></td>
<td id="LC515" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L516" class="blob-num js-line-number" data-line-number="516"></td>
<td id="LC516" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>"<span class=pl-cce>\t</span>%9s"</span></td>
</tr>
<tr>
<td id="L517" class="blob-num js-line-number" data-line-number="517"></td>
<td id="LC517" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_data</span> <span class=pl-c1>+=</span> (<span class=pl-en>human</span>(<span class=pl-s1>shared_swaps</span>[<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>]]),)</td>
</tr>
<tr>
<td id="L518" class="blob-num js-line-number" data-line-number="518"></td>
<td id="LC518" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>" %9s"</span></td>
</tr>
<tr>
<td id="L519" class="blob-num js-line-number" data-line-number="519"></td>
<td id="LC519" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_data</span> <span class=pl-c1>+=</span> (<span class=pl-en>human</span>(<span class=pl-s1>swaps</span>[<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>]]),)</td>
</tr>
<tr>
<td id="L520" class="blob-num js-line-number" data-line-number="520"></td>
<td id="LC520" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_string</span> <span class=pl-c1>+=</span> <span class=pl-s>"<span class=pl-cce>\t</span>%s<span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L521" class="blob-num js-line-number" data-line-number="521"></td>
<td id="LC521" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>output_data</span> <span class=pl-c1>+=</span> (<span class=pl-en>cmd_with_count</span>(<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>], <span class=pl-s1>count</span>[<span class=pl-s1>cmd</span>[<span class=pl-c1>0</span>]]),)</td>
</tr>
<tr>
<td id="L522" class="blob-num js-line-number" data-line-number="522"></td>
<td id="LC522" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L523" class="blob-num js-line-number" data-line-number="523"></td>
<td id="LC523" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s1>output_string</span> <span class=pl-c1>%</span> <span class=pl-s1>output_data</span>)</td>
</tr>
<tr>
<td id="L524" class="blob-num js-line-number" data-line-number="524"></td>
<td id="LC524" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L525" class="blob-num js-line-number" data-line-number="525"></td>
<td id="LC525" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_pss</span>:</td>
</tr>
<tr>
<td id="L526" class="blob-num js-line-number" data-line-number="526"></td>
<td id="LC526" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>show_swap</span>:</td>
</tr>
<tr>
<td id="L527" class="blob-num js-line-number" data-line-number="527"></td>
<td id="LC527" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>have_swap_pss</span>:</td>
</tr>
<tr>
<td id="L528" class="blob-num js-line-number" data-line-number="528"></td>
<td id="LC528" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s>"%s<span class=pl-cce>\n</span>%s%9s%s%9s%s%9s<span class=pl-cce>\n</span>%s<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span></td>
</tr>
<tr>
<td id="L529" class="blob-num js-line-number" data-line-number="529"></td>
<td id="LC529" class="blob-code blob-code-inner js-file-line"> (<span class=pl-s>"-"</span> <span class=pl-c1>*</span> <span class=pl-c1>61</span>, <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>24</span>, <span class=pl-en>human</span>(<span class=pl-s1>total</span>), <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>7</span>,</td>
</tr>
<tr>
<td id="L530" class="blob-num js-line-number" data-line-number="530"></td>
<td id="LC530" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>human</span>(<span class=pl-s1>total_shared_swap</span>), <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>3</span>,</td>
</tr>
<tr>
<td id="L531" class="blob-num js-line-number" data-line-number="531"></td>
<td id="LC531" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>human</span>(<span class=pl-s1>total_swap</span>), <span class=pl-s>"="</span> <span class=pl-c1>*</span> <span class=pl-c1>61</span>))</td>
</tr>
<tr>
<td id="L532" class="blob-num js-line-number" data-line-number="532"></td>
<td id="LC532" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L533" class="blob-num js-line-number" data-line-number="533"></td>
<td id="LC533" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s>"%s<span class=pl-cce>\n</span>%s%9s%s%9s<span class=pl-cce>\n</span>%s<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span></td>
</tr>
<tr>
<td id="L534" class="blob-num js-line-number" data-line-number="534"></td>
<td id="LC534" class="blob-code blob-code-inner js-file-line"> (<span class=pl-s>"-"</span> <span class=pl-c1>*</span> <span class=pl-c1>45</span>, <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>24</span>, <span class=pl-en>human</span>(<span class=pl-s1>total</span>), <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>3</span>,</td>
</tr>
<tr>
<td id="L535" class="blob-num js-line-number" data-line-number="535"></td>
<td id="LC535" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>human</span>(<span class=pl-s1>total_swap</span>), <span class=pl-s>"="</span> <span class=pl-c1>*</span> <span class=pl-c1>45</span>))</td>
</tr>
<tr>
<td id="L536" class="blob-num js-line-number" data-line-number="536"></td>
<td id="LC536" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L537" class="blob-num js-line-number" data-line-number="537"></td>
<td id="LC537" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s>"%s<span class=pl-cce>\n</span>%s%9s<span class=pl-cce>\n</span>%s<span class=pl-cce>\n</span>"</span> <span class=pl-c1>%</span></td>
</tr>
<tr>
<td id="L538" class="blob-num js-line-number" data-line-number="538"></td>
<td id="LC538" class="blob-code blob-code-inner js-file-line"> (<span class=pl-s>"-"</span> <span class=pl-c1>*</span> <span class=pl-c1>33</span>, <span class=pl-s>" "</span> <span class=pl-c1>*</span> <span class=pl-c1>24</span>, <span class=pl-en>human</span>(<span class=pl-s1>total</span>), <span class=pl-s>"="</span> <span class=pl-c1>*</span> <span class=pl-c1>33</span>))</td>
</tr>
<tr>
<td id="L539" class="blob-num js-line-number" data-line-number="539"></td>
<td id="LC539" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L540" class="blob-num js-line-number" data-line-number="540"></td>
<td id="LC540" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L541" class="blob-num js-line-number" data-line-number="541"></td>
<td id="LC541" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>verify_environment</span>():</td>
</tr>
<tr>
<td id="L542" class="blob-num js-line-number" data-line-number="542"></td>
<td id="LC542" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>os</span>.<span class=pl-en>geteuid</span>() <span class=pl-c1>!=</span> <span class=pl-c1>0</span>:</td>
</tr>
<tr>
<td id="L543" class="blob-num js-line-number" data-line-number="543"></td>
<td id="LC543" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(<span class=pl-s>"Sorry, root permission required.<span class=pl-cce>\n</span>"</span>)</td>
</tr>
<tr>
<td id="L544" class="blob-num js-line-number" data-line-number="544"></td>
<td id="LC544" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>close</span>()</td>
</tr>
<tr>
<td id="L545" class="blob-num js-line-number" data-line-number="545"></td>
<td id="LC545" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>1</span>)</td>
</tr>
<tr>
<td id="L546" class="blob-num js-line-number" data-line-number="546"></td>
<td id="LC546" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L547" class="blob-num js-line-number" data-line-number="547"></td>
<td id="LC547" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L548" class="blob-num js-line-number" data-line-number="548"></td>
<td id="LC548" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>kernel_ver</span>()</td>
</tr>
<tr>
<td id="L549" class="blob-num js-line-number" data-line-number="549"></td>
<td id="LC549" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> (<span class=pl-v>IOError</span>, <span class=pl-v>OSError</span>):</td>
</tr>
<tr>
<td id="L550" class="blob-num js-line-number" data-line-number="550"></td>
<td id="LC550" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>val</span> <span class=pl-c1>=</span> <span class=pl-s1>sys</span>.<span class=pl-en>exc_info</span>()[<span class=pl-c1>1</span>]</td>
</tr>
<tr>
<td id="L551" class="blob-num js-line-number" data-line-number="551"></td>
<td id="LC551" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>val</span>.<span class=pl-s1>errno</span> <span class=pl-c1>==</span> <span class=pl-s1>errno</span>.<span class=pl-v>ENOENT</span>:</td>
</tr>
<tr>
<td id="L552" class="blob-num js-line-number" data-line-number="552"></td>
<td id="LC552" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stderr</span>.<span class=pl-en>write</span>(</td>
</tr>
<tr>
<td id="L553" class="blob-num js-line-number" data-line-number="553"></td>
<td id="LC553" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"Couldn't access "</span> <span class=pl-c1>+</span> <span class=pl-s1>proc</span>.<span class=pl-en>path</span>(<span class=pl-s>''</span>) <span class=pl-c1>+</span> <span class=pl-s>"<span class=pl-cce>\n</span>"</span></td>
</tr>
<tr>
<td id="L554" class="blob-num js-line-number" data-line-number="554"></td>
<td id="LC554" class="blob-code blob-code-inner js-file-line"> <span class=pl-s>"Only GNU/Linux and FreeBSD (with linprocfs) are supported<span class=pl-cce>\n</span>"</span>)</td>
</tr>
<tr>
<td id="L555" class="blob-num js-line-number" data-line-number="555"></td>
<td id="LC555" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-en>exit</span>(<span class=pl-c1>2</span>)</td>
</tr>
<tr>
<td id="L556" class="blob-num js-line-number" data-line-number="556"></td>
<td id="LC556" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L557" class="blob-num js-line-number" data-line-number="557"></td>
<td id="LC557" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>raise</span></td>
</tr>
<tr>
<td id="L558" class="blob-num js-line-number" data-line-number="558"></td>
<td id="LC558" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L559" class="blob-num js-line-number" data-line-number="559"></td>
<td id="LC559" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>main</span>():</td>
</tr>
<tr>
<td id="L560" class="blob-num js-line-number" data-line-number="560"></td>
<td id="LC560" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>split_args</span>, <span class=pl-s1>pids_to_show</span>, <span class=pl-s1>watch</span>, <span class=pl-s1>only_total</span>, <span class=pl-s1>discriminate_by_pid</span>, \</td>
</tr>
<tr>
<td id="L561" class="blob-num js-line-number" data-line-number="561"></td>
<td id="LC561" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>show_swap</span> <span class=pl-c1>=</span> <span class=pl-en>parse_options</span>()</td>
</tr>
<tr>
<td id="L562" class="blob-num js-line-number" data-line-number="562"></td>
<td id="LC562" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L563" class="blob-num js-line-number" data-line-number="563"></td>
<td id="LC563" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>verify_environment</span>()</td>
</tr>
<tr>
<td id="L564" class="blob-num js-line-number" data-line-number="564"></td>
<td id="LC564" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L565" class="blob-num js-line-number" data-line-number="565"></td>
<td id="LC565" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-c1>not</span> <span class=pl-s1>only_total</span>:</td>
</tr>
<tr>
<td id="L566" class="blob-num js-line-number" data-line-number="566"></td>
<td id="LC566" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print_header</span>(<span class=pl-s1>show_swap</span>, <span class=pl-s1>discriminate_by_pid</span>)</td>
</tr>
<tr>
<td id="L567" class="blob-num js-line-number" data-line-number="567"></td>
<td id="LC567" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L568" class="blob-num js-line-number" data-line-number="568"></td>
<td id="LC568" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>watch</span> <span class=pl-c1>is</span> <span class=pl-c1>not</span> <span class=pl-c1>None</span>:</td>
</tr>
<tr>
<td id="L569" class="blob-num js-line-number" data-line-number="569"></td>
<td id="LC569" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L570" class="blob-num js-line-number" data-line-number="570"></td>
<td id="LC570" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sorted_cmds</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span></td>
</tr>
<tr>
<td id="L571" class="blob-num js-line-number" data-line-number="571"></td>
<td id="LC571" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>while</span> <span class=pl-s1>sorted_cmds</span>:</td>
</tr>
<tr>
<td id="L572" class="blob-num js-line-number" data-line-number="572"></td>
<td id="LC572" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>, <span class=pl-s1>swaps</span>, <span class=pl-s1>shared_swaps</span>, \</td>
</tr>
<tr>
<td id="L573" class="blob-num js-line-number" data-line-number="573"></td>
<td id="LC573" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_swap</span>, <span class=pl-s1>total_shared_swap</span> <span class=pl-c1>=</span> \</td>
</tr>
<tr>
<td id="L574" class="blob-num js-line-number" data-line-number="574"></td>
<td id="LC574" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>get_memory_usage</span>(<span class=pl-s1>pids_to_show</span>, <span class=pl-s1>split_args</span>,</td>
</tr>
<tr>
<td id="L575" class="blob-num js-line-number" data-line-number="575"></td>
<td id="LC575" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>discriminate_by_pid</span>)</td>
</tr>
<tr>
<td id="L576" class="blob-num js-line-number" data-line-number="576"></td>
<td id="LC576" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>only_total</span> <span class=pl-c1>and</span> <span class=pl-s1>have_pss</span>:</td>
</tr>
<tr>
<td id="L577" class="blob-num js-line-number" data-line-number="577"></td>
<td id="LC577" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-en>human</span>(<span class=pl-s1>total</span>, <span class=pl-s1>units</span><span class=pl-c1>=</span><span class=pl-c1>1</span>)<span class=pl-c1>+</span><span class=pl-s>'<span class=pl-cce>\n</span>'</span>)</td>
</tr>
<tr>
<td id="L578" class="blob-num js-line-number" data-line-number="578"></td>
<td id="LC578" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-c1>not</span> <span class=pl-s1>only_total</span>:</td>
</tr>
<tr>
<td id="L579" class="blob-num js-line-number" data-line-number="579"></td>
<td id="LC579" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print_memory_usage</span>(<span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>,</td>
</tr>
<tr>
<td id="L580" class="blob-num js-line-number" data-line-number="580"></td>
<td id="LC580" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>swaps</span>, <span class=pl-s1>total_swap</span>, <span class=pl-s1>shared_swaps</span>,</td>
</tr>
<tr>
<td id="L581" class="blob-num js-line-number" data-line-number="581"></td>
<td id="LC581" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_shared_swap</span>, <span class=pl-s1>show_swap</span>)</td>
</tr>
<tr>
<td id="L582" class="blob-num js-line-number" data-line-number="582"></td>
<td id="LC582" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L583" class="blob-num js-line-number" data-line-number="583"></td>
<td id="LC583" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>flush</span>()</td>
</tr>
<tr>
<td id="L584" class="blob-num js-line-number" data-line-number="584"></td>
<td id="LC584" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>time</span>.<span class=pl-en>sleep</span>(<span class=pl-s1>watch</span>)</td>
</tr>
<tr>
<td id="L585" class="blob-num js-line-number" data-line-number="585"></td>
<td id="LC585" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L586" class="blob-num js-line-number" data-line-number="586"></td>
<td id="LC586" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-s>'Process does not exist anymore.<span class=pl-cce>\n</span>'</span>)</td>
</tr>
<tr>
<td id="L587" class="blob-num js-line-number" data-line-number="587"></td>
<td id="LC587" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span> <span class=pl-v>KeyboardInterrupt</span>:</td>
</tr>
<tr>
<td id="L588" class="blob-num js-line-number" data-line-number="588"></td>
<td id="LC588" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>pass</span></td>
</tr>
<tr>
<td id="L589" class="blob-num js-line-number" data-line-number="589"></td>
<td id="LC589" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L590" class="blob-num js-line-number" data-line-number="590"></td>
<td id="LC590" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># This is the default behavior</span></td>
</tr>
<tr>
<td id="L591" class="blob-num js-line-number" data-line-number="591"></td>
<td id="LC591" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>, <span class=pl-s1>swaps</span>, <span class=pl-s1>shared_swaps</span>, <span class=pl-s1>total_swap</span>, \</td>
</tr>
<tr>
<td id="L592" class="blob-num js-line-number" data-line-number="592"></td>
<td id="LC592" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_shared_swap</span> <span class=pl-c1>=</span> <span class=pl-en>get_memory_usage</span>(<span class=pl-s1>pids_to_show</span>, <span class=pl-s1>split_args</span>,</td>
</tr>
<tr>
<td id="L593" class="blob-num js-line-number" data-line-number="593"></td>
<td id="LC593" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>discriminate_by_pid</span>)</td>
</tr>
<tr>
<td id="L594" class="blob-num js-line-number" data-line-number="594"></td>
<td id="LC594" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-s1>only_total</span> <span class=pl-c1>and</span> <span class=pl-s1>have_pss</span>:</td>
</tr>
<tr>
<td id="L595" class="blob-num js-line-number" data-line-number="595"></td>
<td id="LC595" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>write</span>(<span class=pl-en>human</span>(<span class=pl-s1>total</span>, <span class=pl-s1>units</span><span class=pl-c1>=</span><span class=pl-c1>1</span>)<span class=pl-c1>+</span><span class=pl-s>'<span class=pl-cce>\n</span>'</span>)</td>
</tr>
<tr>
<td id="L596" class="blob-num js-line-number" data-line-number="596"></td>
<td id="LC596" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>elif</span> <span class=pl-c1>not</span> <span class=pl-s1>only_total</span>:</td>
</tr>
<tr>
<td id="L597" class="blob-num js-line-number" data-line-number="597"></td>
<td id="LC597" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print_memory_usage</span>(<span class=pl-s1>sorted_cmds</span>, <span class=pl-s1>shareds</span>, <span class=pl-s1>count</span>, <span class=pl-s1>total</span>, <span class=pl-s1>swaps</span>,</td>
</tr>
<tr>
<td id="L598" class="blob-num js-line-number" data-line-number="598"></td>
<td id="LC598" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>total_swap</span>, <span class=pl-s1>shared_swaps</span>, <span class=pl-s1>total_shared_swap</span>,</td>
</tr>
<tr>
<td id="L599" class="blob-num js-line-number" data-line-number="599"></td>
<td id="LC599" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>show_swap</span>)</td>
</tr>
<tr>
<td id="L600" class="blob-num js-line-number" data-line-number="600"></td>
<td id="LC600" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L601" class="blob-num js-line-number" data-line-number="601"></td>
<td id="LC601" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># We must close explicitly, so that any EPIPE exception</span></td>
</tr>
<tr>
<td id="L602" class="blob-num js-line-number" data-line-number="602"></td>
<td id="LC602" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># is handled by our excepthook, rather than the default</span></td>
</tr>
<tr>
<td id="L603" class="blob-num js-line-number" data-line-number="603"></td>
<td id="LC603" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># one which is reenabled after this script finishes.</span></td>
</tr>
<tr>
<td id="L604" class="blob-num js-line-number" data-line-number="604"></td>
<td id="LC604" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>sys</span>.<span class=pl-s1>stdout</span>.<span class=pl-en>close</span>()</td>
</tr>
<tr>
<td id="L605" class="blob-num js-line-number" data-line-number="605"></td>
<td id="LC605" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L606" class="blob-num js-line-number" data-line-number="606"></td>
<td id="LC606" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>vm_accuracy</span> <span class=pl-c1>=</span> <span class=pl-en>shared_val_accuracy</span>()</td>
</tr>
<tr>
<td id="L607" class="blob-num js-line-number" data-line-number="607"></td>
<td id="LC607" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>show_shared_val_accuracy</span>( <span class=pl-s1>vm_accuracy</span>, <span class=pl-s1>only_total</span> )</td>
</tr>
<tr>
<td id="L608" class="blob-num js-line-number" data-line-number="608"></td>
<td id="LC608" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L609" class="blob-num js-line-number" data-line-number="609"></td>
<td id="LC609" class="blob-code blob-code-inner js-file-line"><span class=pl-k>if</span> <span class=pl-s1>__name__</span> <span class=pl-c1>==</span> <span class=pl-s>'__main__'</span>: <span class=pl-en>main</span>()</td>
</tr>
</table>
<details class="details-reset details-overlay BlobToolbar position-absolute js-file-line-actions dropdown d-none" aria-hidden="true">
<summary class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1" aria-label="Inline file action toolbar">
<svg class="octicon octicon-kebab-horizontal" viewBox="0 0 13 16" version="1.1" width="13" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm5 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM13 7.5a1.5 1.5 0 11-3 0 1.5 1.5 0 013 0z"/></svg>
</summary>
<details-menu>
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2" style="width:185px">
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-lines" style="cursor:pointer;">
Copy lines
</clipboard-copy>
</li>
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-permalink" style="cursor:pointer;">
Copy permalink
</clipboard-copy>
</li>
<li><a class="dropdown-item js-update-url-with-hash" id="js-view-git-blame" role="menuitem" href="/Sup3r-Us3r/MyDotfiles/blame/d56e40d49cb9a6a3af04b3d2442408df622637f4/scripts/mem.py">View git blame</a></li>
<li><a class="dropdown-item" id="js-new-issue" role="menuitem" href="/Sup3r-Us3r/MyDotfiles/issues/new">Reference in new issue</a></li>
</ul>
</details-menu>
</details>
</div>
</div>
<details class="details-reset details-overlay details-overlay-dark">
<summary data-hotkey="l" aria-label="Jump to line"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast linejump" aria-label="Jump to line">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-jump-to-line-form Box-body d-flex" action="" accept-charset="UTF-8" method="get"><input name="utf8" type="hidden" value="✓" />
<input class="form-control flex-auto mr-3 linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn" data-close-dialog>Go</button>
</form> </details-dialog>
</details>
</div>
</div>
</main>
</div>
</div>
<div class="footer container-lg width-full p-responsive" role="contentinfo">
<div class="position-relative d-flex flex-row-reverse flex-lg-row flex-wrap flex-lg-nowrap flex-justify-center flex-lg-justify-between pt-6 pb-2 mt-6 f6 text-gray border-top border-gray-light ">
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0">© 2020 GitHub, Inc.</li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to terms, text:terms" href="https://github.com/site/terms">Terms</a></li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to privacy, text:privacy" href="https://github.com/site/privacy">Privacy</a></li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to security, text:security" href="https://github.com/security">Security</a></li>
<li class="mr-3 mr-lg-0"><a href="https://githubstatus.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a data-ga-click="Footer, go to help, text:help" href="https://help.github.com">Help</a></li>
</ul>
<a aria-label="Homepage" title="GitHub" class="footer-octicon d-none d-lg-block mx-lg-4" href="https://github.com">
<svg height="24" class="octicon octicon-mark-github" viewBox="0 0 16 16" version="1.1" width="24" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to contact, text:contact" href="https://github.com/contact">Contact GitHub</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.com/pricing" data-ga-click="Footer, go to Pricing, text:Pricing">Pricing</a></li>
<li class="mr-3 mr-lg-0"><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li class="mr-3 mr-lg-0"><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a data-ga-click="Footer, go to about, text:about" href="https://github.com/about">About</a></li>
</ul>
</div>
<div class="d-flex flex-justify-center pb-6">
<span class="f6 text-gray-light"></span>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.893 1.5c-.183-.31-.52-.5-.887-.5s-.703.19-.886.5L.138 13.499a.98.98 0 000 1.001c.193.31.53.501.886.501h13.964c.367 0 .704-.19.877-.5a1.03 1.03 0 00.01-1.002L8.893 1.5zm.133 11.497H6.987v-2.003h2.039v2.003zm0-3.004H6.987V5.987h2.039v4.006z"/></svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg class="octicon octicon-x" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48L7.48 8z"/></svg>
</button>
You can’t perform that action at this time.
</div>
<script crossorigin="anonymous" async="async" integrity="sha512-0ME9ftiuUHsYYdeBdLtPHbeL4j0UjTAy9YxHpX0AaZLPPex/JTS5mmHozk8Bnp2czMkd3FmK8DfVID7zxmD5OA==" type="application/javascript" id="js-conditional-compat" data-src="https://github.githubassets.com/assets/compat-bootstrap-d0c13d7e.js"></script>
<script crossorigin="anonymous" integrity="sha512-TLzCXKAWFfmVHOvLRll41VeyfKWOe2F9x7YtPewoj4fUNvEx5rB4EnI7h68aC1CUU4ubBJrQXcbc/GKqVInrOg==" type="application/javascript" src="https://github.githubassets.com/assets/frameworks-4cbcc25c.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-dU+njx4UAJoyzhW204hz0qkvZ7SXnGSZqJjymdtByF7TbIdeTc9o6CaUK6514hkTOSd0+1PT845QTJrag6Thvw==" type="application/javascript" src="https://github.githubassets.com/assets/github-bootstrap-754fa78f.js"></script>
<div class="js-stale-session-flash flash flash-warn flash-banner" hidden
>
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.893 1.5c-.183-.31-.52-.5-.887-.5s-.703.19-.886.5L.138 13.499a.98.98 0 000 1.001c.193.31.53.501.886.501h13.964c.367 0 .704-.19.877-.5a1.03 1.03 0 00.01-1.002L8.893 1.5zm.133 11.497H6.987v-2.003h2.039v2.003zm0-3.004H6.987V5.987h2.039v4.006z"/></svg>
<span class="js-stale-session-flash-signed-in" hidden>You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="js-stale-session-flash-signed-out" hidden>You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<template id="site-details-dialog">
<details class="details-reset details-overlay details-overlay-dark lh-default text-gray-dark hx_rsm" open>
<summary role="button" aria-label="Close dialog"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast hx_rsm-dialog hx_rsm-modal">
<button class="Box-btn-octicon m-0 btn-octicon position-absolute right-0 top-0" type="button" aria-label="Close dialog" data-close-dialog>
<svg class="octicon octicon-x" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48L7.48 8z"/></svg>
</button>
<div class="octocat-spinner my-6 js-details-dialog-spinner"></div>
</details-dialog>
</details>
</template>
<div class="Popover js-hovercard-content position-absolute" style="display: none; outline: none;" tabindex="0">
<div class="Popover-message Popover-message--bottom-left Popover-message--large Box box-shadow-large" style="width:360px;">
</div>
</div>
<div aria-live="polite" class="js-global-screen-reader-notice sr-only"></div>
</body>
</html>
| 71.15658
| 758
| 0.615205
|
3177808f63c3277ffb663cd4bd9f076141be2712
| 49,516
|
py
|
Python
|
python/ccxt/probit.py
|
cloudinertia/ccxt
|
877316d1e8e9b909d3781c8d23c0ad01f442357f
|
[
"MIT"
] | 1
|
2021-09-06T00:09:11.000Z
|
2021-09-06T00:09:11.000Z
|
python/ccxt/probit.py
|
cloudinertia/ccxt
|
877316d1e8e9b909d3781c8d23c0ad01f442357f
|
[
"MIT"
] | null | null | null |
python/ccxt/probit.py
|
cloudinertia/ccxt
|
877316d1e8e9b909d3781c8d23c0ad01f442357f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import base64
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class probit(Exchange):
def describe(self):
return self.deep_extend(super(probit, self).describe(), {
'id': 'probit',
'name': 'ProBit',
'countries': ['SC', 'KR'], # Seychelles, South Korea
'rateLimit': 250, # ms
'has': {
'CORS': True,
'fetchTime': True,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchTicker': True,
'fetchOHLCV': True,
'fetchOrderBook': True,
'fetchTrades': True,
'fetchBalance': True,
'createOrder': True,
'createMarketOrder': True,
'cancelOrder': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'signIn': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'10m': '10m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '1W',
'1M': '1M',
},
'version': 'v1',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/79268032-c4379480-7ea2-11ea-80b3-dd96bb29fd0d.jpg',
'api': {
'accounts': 'https://accounts.probit.com',
'public': 'https://api.probit.com/api/exchange',
'private': 'https://api.probit.com/api/exchange',
},
'www': 'https://www.probit.com',
'doc': [
'https://docs-en.probit.com',
'https://docs-ko.probit.com',
],
'fees': 'https://support.probit.com/hc/en-us/articles/360020968611-Trading-Fees',
'referral': 'https://www.probit.com/r/34608773',
},
'api': {
'public': {
'get': [
'market',
'currency',
'currency_with_platform',
'time',
'ticker',
'order_book',
'trade',
'candle',
],
},
'private': {
'post': [
'new_order',
'cancel_order',
'withdrawal',
],
'get': [
'balance',
'order',
'open_order',
'order_history',
'trade_history',
'deposit_address',
],
},
'accounts': {
'post': [
'token',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
'exceptions': {
'exact': {
'UNAUTHORIZED': AuthenticationError,
'INVALID_ARGUMENT': BadRequest, # Parameters are not a valid format, parameters are empty, or out of range, or a parameter was sent when not required.
'TRADING_UNAVAILABLE': ExchangeNotAvailable,
'NOT_ENOUGH_BALANCE': InsufficientFunds,
'NOT_ALLOWED_COMBINATION': BadRequest,
'INVALID_ORDER': InvalidOrder, # Requested order does not exist, or it is not your order
'RATE_LIMIT_EXCEEDED': RateLimitExceeded, # You are sending requests too frequently. Please try it later.
'MARKET_UNAVAILABLE': ExchangeNotAvailable, # Market is closed today
'INVALID_MARKET': BadSymbol, # Requested market is not exist
'INVALID_CURRENCY': BadRequest, # Requested currency is not exist on ProBit system
'TOO_MANY_OPEN_ORDERS': DDoSProtection, # Too many open orders
'DUPLICATE_ADDRESS': InvalidAddress, # Address already exists in withdrawal address list
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'precisionMode': TICK_SIZE,
'options': {
'createMarketBuyOrderRequiresPrice': True,
'timeInForce': {
'limit': 'gtc',
'market': 'ioc',
},
},
'commonCurrencies': {
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
},
})
def fetch_markets(self, params={}):
response = self.publicGetMarket(params)
#
# {
# "data":[
# {
# "id":"MONA-USDT",
# "base_currency_id":"MONA",
# "quote_currency_id":"USDT",
# "min_price":"0.001",
# "max_price":"9999999999999999",
# "price_increment":"0.001",
# "min_quantity":"0.0001",
# "max_quantity":"9999999999999999",
# "quantity_precision":4,
# "min_cost":"1",
# "max_cost":"9999999999999999",
# "cost_precision":8,
# "taker_fee_rate":"0.2",
# "maker_fee_rate":"0.2",
# "show_in_ui":true,
# "closed":false
# },
# ]
# }
#
markets = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency_id')
quoteId = self.safe_string(market, 'quote_currency_id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
closed = self.safe_value(market, 'closed', False)
active = not closed
amountPrecision = self.safe_integer(market, 'quantity_precision')
costPrecision = self.safe_integer(market, 'cost_precision')
precision = {
'amount': 1 / math.pow(10, amountPrecision),
'price': self.safe_float(market, 'price_increment'),
'cost': 1 / math.pow(10, costPrecision),
}
takerFeeRate = self.safe_float(market, 'taker_fee_rate')
makerFeeRate = self.safe_float(market, 'maker_fee_rate')
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': takerFeeRate / 100,
'maker': makerFeeRate / 100,
'limits': {
'amount': {
'min': self.safe_float(market, 'min_quantity'),
'max': self.safe_float(market, 'max_quantity'),
},
'price': {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
},
'cost': {
'min': self.safe_float(market, 'min_cost'),
'max': self.safe_float(market, 'max_cost'),
},
},
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetCurrencyWithPlatform(params)
#
# {
# "data":[
# {
# "id":"USDT",
# "display_name":{"ko-kr":"테더","en-us":"Tether"},
# "show_in_ui":true,
# "platform":[
# {
# "id":"ETH",
# "priority":1,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":15,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"ERC-20","en-us":"ERC-20"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"1",
# "withdrawal_fee":[
# {"amount":"0.01","priority":2,"currency_id":"ETH"},
# {"amount":"1.5","priority":1,"currency_id":"USDT"},
# ],
# "deposit_fee":{},
# "suspended_reason":"",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# },
# {
# "id":"OMNI",
# "priority":2,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":3,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"OMNI","en-us":"OMNI"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"5",
# "withdrawal_fee":[{"amount":"5","priority":1,"currency_id":"USDT"}],
# "deposit_fee":{},
# "suspended_reason":"wallet_maintenance",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# }
# ],
# "stakeable":false,
# "unstakeable":false,
# "auto_stake":false,
# "auto_stake_amount":"0"
# }
# ]
# }
#
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'id')
code = self.safe_currency_code(id)
displayName = self.safe_value(currency, 'display_name')
name = self.safe_string(displayName, 'en-us')
platforms = self.safe_value(currency, 'platform', [])
platformsByPriority = self.sort_by(platforms, 'priority')
platform = self.safe_value(platformsByPriority, 0, {})
precision = self.safe_integer(platform, 'precision')
depositSuspended = self.safe_value(platform, 'deposit_suspended')
withdrawalSuspended = self.safe_value(platform, 'withdrawal_suspended')
active = not (depositSuspended and withdrawalSuspended)
withdrawalFees = self.safe_value(platform, 'withdrawal_fee', {})
withdrawalFeesByPriority = self.sort_by(withdrawalFees, 'priority')
withdrawalFee = self.safe_value(withdrawalFeesByPriority, 0, {})
fee = self.safe_float(withdrawalFee, 'amount')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': self.safe_float(platform, 'min_deposit_amount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(platform, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# data: [
# {
# "currency_id":"XRP",
# "total":"100",
# "available":"0",
# }
# ]
# }
#
data = self.safe_value(response, 'data')
result = {'info': data}
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency_id')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'total')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
response = self.publicGetOrderBook(self.extend(request, params))
#
# {
# data: [
# {side: 'buy', price: '0.000031', quantity: '10'},
# {side: 'buy', price: '0.00356007', quantity: '4.92156877'},
# {side: 'sell', price: '0.1857', quantity: '0.17'},
# ]
# }
#
data = self.safe_value(response, 'data', [])
dataBySide = self.group_by(data, 'side')
return self.parse_order_book(dataBySide, None, 'buy', 'sell', 'price', 'quantity')
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
marketIds = self.market_ids(symbols)
request['market_ids'] = ','.join(marketIds)
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_tickers(data, symbols)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_ids': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
ticker = self.safe_value(data, 0)
if ticker is None:
raise BadResponse(self.id + ' fetchTicker() returned an empty response')
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
#
timestamp = self.parse8601(self.safe_string(ticker, 'time'))
symbol = None
marketId = self.safe_string(ticker, 'market_id')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
close = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
percentage = None
open = None
if change is not None:
if close is not None:
open = close - change
percentage = (change / open) * 100
baseVolume = self.safe_float(ticker, 'base_volume')
quoteVolume = self.safe_float(ticker, 'quote_volume')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'limit': 100,
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
}
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.privateGetTradeHistory(self.extend(request, params))
#
# {
# data: [
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'limit': 100,
'start_time': '1970-01-01T00:00:00.000Z',
'end_time': self.iso8601(self.milliseconds()),
}
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# {
# "data":[
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# },
# {
# "id":"ETH-BTC:3331885",
# "price":"0.022982",
# "quantity":"6.472",
# "time":"2020-04-12T20:55:39.652Z",
# "side":"sell",
# "tick_direction":"down"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# }
#
# fetchMyTrades(private)
#
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'time'))
symbol = None
id = self.safe_string(trade, 'id')
if id is not None:
parts = id.split(':')
marketId = self.safe_string(parts, 0)
if marketId is None:
marketId = self.safe_string(trade, 'market_id')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
side = self.safe_string(trade, 'side')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'quantity')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
orderId = self.safe_string(trade, 'order_id')
feeCost = self.safe_float(trade, 'fee_amount')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency_id')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {"data":"2020-04-12T18:54:25.390Z"}
#
timestamp = self.parse8601(self.safe_string(response, 'data'))
return timestamp
def normalize_ohlcv_timestamp(self, timestamp, timeframe, after=False):
duration = self.parse_timeframe(timeframe)
if timeframe == '1M':
iso8601 = self.iso8601(timestamp)
parts = iso8601.split('-')
year = self.safe_string(parts, 0)
month = self.safe_integer(parts, 1)
if after:
month = self.sum(month, 1)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
return year + '-' + month + '-01T00:00:00.000Z'
elif timeframe == '1w':
timestamp = int(timestamp / 1000)
firstSunday = 259200 # 1970-01-04T00:00:00.000Z
difference = timestamp - firstSunday
numWeeks = self.integer_divide(difference, duration)
previousSunday = self.sum(firstSunday, numWeeks * duration)
if after:
previousSunday = self.sum(previousSunday, duration)
return self.iso8601(previousSunday * 1000)
else:
timestamp = int(timestamp / 1000)
timestamp = duration * int(timestamp / duration)
if after:
timestamp = self.sum(timestamp, duration)
return self.iso8601(timestamp * 1000)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
interval = self.timeframes[timeframe]
limit = 100 if (limit is None) else limit
requestLimit = self.sum(limit, 1)
requestLimit = min(1000, requestLimit) # max 1000
request = {
'market_ids': market['id'],
'interval': interval,
'sort': 'asc', # 'asc' will always include the start_time, 'desc' will always include end_time
'limit': requestLimit, # max 1000
}
now = self.milliseconds()
duration = self.parse_timeframe(timeframe)
startTime = since
endTime = now
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires either a since argument or a limit argument')
else:
startTime = now - limit * duration * 1000
else:
if limit is None:
endTime = now
else:
endTime = self.sum(since, self.sum(limit, 1) * duration * 1000)
startTimeNormalized = self.normalize_ohlcv_timestamp(startTime, timeframe)
endTimeNormalized = self.normalize_ohlcv_timestamp(endTime, timeframe, True)
request['start_time'] = startTimeNormalized
request['end_time'] = endTimeNormalized
response = self.publicGetCandle(self.extend(request, params))
#
# {
# "data":[
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'start_time')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'base_volume'),
]
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
since = self.parse8601(since)
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
response = self.privateGetOpenOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
'limit': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since:
request['start_time'] = self.iso8601(since)
if limit:
request['limit'] = limit
response = self.privateGetOrderHistory(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
else:
request['order_id'] = id
query = self.omit(params, ['clientOrderId', 'client_order_id'])
response = self.privateGetOrder(self.extend(request, query))
data = self.safe_value(response, 'data', [])
order = self.safe_value(data, 0)
return self.parse_order(order, market)
def parse_order_status(self, status):
statuses = {
'open': 'open',
'cancelled': 'canceled',
'filled': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
#
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
symbol = None
marketId = self.safe_string(order, 'market_id')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'time'))
price = self.safe_float(order, 'limit_price')
filled = self.safe_float(order, 'filled_quantity')
remaining = self.safe_float(order, 'open_quantity')
canceledAmount = self.safe_float(order, 'cancelled_quantity')
if canceledAmount is not None:
remaining = self.sum(remaining, canceledAmount)
amount = self.safe_float(order, 'quantity', self.sum(filled, remaining))
cost = self.safe_float_2(order, 'filled_cost', 'cost')
if type == 'market':
price = None
average = None
if filled is not None:
if cost is None:
if price is not None:
cost = price * filled
if cost is not None:
if filled > 0:
average = cost / filled
clientOrderId = self.safe_string(order, 'client_order_id')
if clientOrderId == '':
clientOrderId = None
return {
'id': id,
'info': order,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'status': status,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'average': average,
'cost': cost,
'fee': None,
'trades': None,
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
options = self.safe_value(self.options, 'timeInForce')
defaultTimeInForce = self.safe_value(options, type)
timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force', defaultTimeInForce)
request = {
'market_id': market['id'],
'type': type,
'side': side,
'time_in_force': timeInForce,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
costToPrecision = None
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
cost = self.safe_float(params, 'cost')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if cost is None:
cost = amount * price
elif cost is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument for market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'cost' extra parameter(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
costToPrecision = self.cost_to_precision(symbol, cost)
request['cost'] = costToPrecision
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
query = self.omit(params, ['timeInForce', 'time_in_force', 'clientOrderId', 'client_order_id'])
response = self.privatePostNewOrder(self.extend(request, query))
#
# {
# data: {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
# }
#
data = self.safe_value(response, 'data')
order = self.parse_order(data, market)
# a workaround for incorrect huge amounts
# returned by the exchange on market buys
if (type == 'market') and (side == 'buy'):
order['amount'] = None
order['cost'] = float(costToPrecision)
order['remaining'] = None
return order
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'order_id': id,
}
response = self.privatePostCancelOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data)
def parse_deposit_address(self, depositAddress, currency=None):
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'destination_tag')
currencyId = self.safe_string(depositAddress, 'currency_id')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency_id': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
#
# {
# "data":[
# {
# "currency_id":"ETH",
# "address":"0x12e2caf3c4051ba1146e612f532901a423a9898a",
# "destination_tag":null
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
firstAddress = self.safe_value(data, 0)
if firstAddress is None:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response')
return self.parse_deposit_address(firstAddress, currency)
def fetch_deposit_addresses(self, codes=None, params={}):
self.load_markets()
request = {}
if codes:
currencyIds = []
for i in range(0, len(codes)):
currency = self.currency(codes[i])
currencyIds.append(currency['id'])
request['currency_id'] = ','.join(codes)
response = self.privateGetDepositAddress(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_deposit_addresses(data)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def withdraw(self, code, amount, address, tag=None, params={}):
# In order to use self method
# you need to allow API withdrawal from the API Settings Page, and
# and register the list of withdrawal addresses and destination tags on the API Settings page
# you can only withdraw to the registered addresses using the API
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag is None:
tag = ''
request = {
'currency_id': currency['id'],
# 'platform_id': 'ETH', # if omitted it will use the default platform for the currency
'address': address,
'destination_tag': tag,
'amount': self.currency_to_precision(code, amount),
# which currency to pay the withdrawal fees
# only applicable for currencies that accepts multiple withdrawal fee options
# 'fee_currency_id': 'ETH', # if omitted it will use the default fee policy for each currency
# whether the amount field includes fees
# 'include_fee': False, # makes sense only when fee_currency_id is equal to currency_id
}
response = self.privatePostWithdrawal(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_transaction(data, currency)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'id')
amount = self.safe_float(transaction, 'amount')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'destination_tag')
txid = self.safe_string(transaction, 'hash')
timestamp = self.parse8601(self.safe_string(transaction, 'time'))
type = self.safe_string(transaction, 'type')
currencyId = self.safe_string(transaction, 'currency_id')
code = self.safe_currency_code(currencyId)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_float(transaction, 'fee')
fee = None
if feeCost is not None and feeCost != 0:
fee = {
'currency': code,
'cost': feeCost,
}
return {
'id': id,
'currency': code,
'amount': amount,
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'status': status,
'type': type,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
'info': transaction,
}
def parse_transaction_status(self, status):
statuses = {
'requested': 'pending',
'pending': 'pending',
'confirming': 'pending',
'confirmed': 'pending',
'applying': 'pending',
'done': 'ok',
'cancelled': 'canceled',
'cancelling': 'canceled',
}
return self.safe_string(statuses, status, status)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
query = self.omit(params, self.extract_params(path))
if api == 'accounts':
self.check_required_credentials()
url += self.implode_params(path, params)
auth = self.apiKey + ':' + self.secret
auth64 = base64.b64encode(self.encode(auth))
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
'Content-Type': 'application/json',
}
if query:
body = self.json(query)
else:
url += self.version + '/'
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
now = self.milliseconds()
self.check_required_credentials()
expires = self.safe_integer(self.options, 'expires')
if (expires is None) or (expires < now):
raise AuthenticationError(self.id + ' access token expired, call signIn() method')
accessToken = self.safe_string(self.options, 'accessToken')
headers = {
'Authorization': 'Bearer ' + accessToken,
}
url += self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
headers['Content-Type'] = 'application/json'
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def sign_in(self, params={}):
self.check_required_credentials()
request = {
'grant_type': 'client_credentials', # the only supported value
}
response = self.accountsPostToken(self.extend(request, params))
#
# {
# access_token: '0ttDv/2hTTn3bLi8GP1gKaneiEQ6+0hOBenPrxNQt2s=',
# token_type: 'bearer',
# expires_in: 900
# }
#
expiresIn = self.safe_integer(response, 'expires_in')
accessToken = self.safe_string(response, 'access_token')
self.options['accessToken'] = accessToken
self.options['expires'] = self.sum(self.milliseconds(), expiresIn * 1000)
return response
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'errorCode' in response:
errorCode = self.safe_string(response, 'errorCode')
message = self.safe_string(response, 'message')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
| 40.821105
| 512
| 0.482571
|
4ca4aeb513286783dbb9bced30a8ce7e6ce8a898
| 679
|
py
|
Python
|
setup.py
|
ClarityCoders/xagents
|
73ad38b07e2b2bca7487f0a5ac5a49e4f3f0dc35
|
[
"MIT"
] | null | null | null |
setup.py
|
ClarityCoders/xagents
|
73ad38b07e2b2bca7487f0a5ac5a49e4f3f0dc35
|
[
"MIT"
] | null | null | null |
setup.py
|
ClarityCoders/xagents
|
73ad38b07e2b2bca7487f0a5ac5a49e4f3f0dc35
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
install_requires = [dep.strip() for dep in open('requirements.txt')]
setup(
name='xagents',
version='1.0',
packages=find_packages(),
url='https://github.com/schissmantics/xagents',
license='MIT',
author='schissmantics',
author_email='schissmantics@outlook.com',
description='Implementations of deep reinforcement learning algorithms in tensorflow 2.5',
include_package_data=True,
setup_requires=['numpy==1.19.5'],
install_requires=install_requires,
python_requires='>=3.6',
entry_points={
'console_scripts': [
'xagents=xagents.cli:execute',
],
},
)
| 28.291667
| 94
| 0.677467
|
709b18228eca8c9a846c24aadf2a717dc4827fa3
| 1,092
|
py
|
Python
|
tests/command_parse/test_connection.py
|
qiaocci/iredis
|
81892e9083b3ebccef246926076888e986340f96
|
[
"BSD-3-Clause"
] | null | null | null |
tests/command_parse/test_connection.py
|
qiaocci/iredis
|
81892e9083b3ebccef246926076888e986340f96
|
[
"BSD-3-Clause"
] | null | null | null |
tests/command_parse/test_connection.py
|
qiaocci/iredis
|
81892e9083b3ebccef246926076888e986340f96
|
[
"BSD-3-Clause"
] | null | null | null |
def test_auth(judge_command):
judge_command("auth 123", {"command_password": "auth", "password": "123"})
def test_echo(judge_command):
judge_command("echo hello", {"command_message": "echo", "message": "hello"})
def test_ping(judge_command):
judge_command("ping hello", {"command_messagex": "ping", "message": "hello"})
judge_command("ping", {"command_messagex": "ping", "message": None})
judge_command("ping hello world", None)
def test_select(judge_command):
for index in range(16):
judge_command(
f"select {index}", {"command_index": "select", "index": str(index)}
)
for index in range(16, 100):
judge_command(f"select {index}", None)
judge_command("select acb", None)
def test_swapdb(judge_command):
for index1 in range(16):
for index2 in range(16):
judge_command(
f"swapdb {index1} {index2}",
{"command_index_index": "swapdb", "index": [str(index1), str(index2)]},
)
judge_command("swapdb abc 1", None)
judge_command("swapdb 1", None)
| 32.117647
| 87
| 0.624542
|
336e90a27b3302eb5cf1bf524dfe66cbb6dc67f1
| 7,462
|
py
|
Python
|
lib/django-1.4/django/contrib/auth/admin.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
lib/django-1.4/django/contrib/auth/admin.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 4
|
2016-02-28T05:53:54.000Z
|
2017-01-03T07:39:50.000Z
|
lib/django-1.4/django/contrib/auth/admin.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 13
|
2016-02-28T00:14:23.000Z
|
2021-05-03T15:47:36.000Z
|
from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.rel.to.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import patterns
return patterns('',
(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
defaults = {
'auto_populated_fields': (),
'username_help_text': self.model._meta.get_field('username').help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': form.base_fields.keys()})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.username),
'adminForm': adminForm,
'form_url': mark_safe(form_url),
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request, [
self.change_user_password_template or
'admin/auth/user/change_password.html'
], context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
| 42.64
| 83
| 0.631332
|
08c79e7b5109bfa55cb76f35ebd3bb72f65d8ed4
| 14,371
|
py
|
Python
|
Lib/asyncio/futures.py
|
ErikBjare/cpython
|
b68431fadb3150134ac6ccbf501cdfeaf4c75678
|
[
"0BSD"
] | 2
|
2022-03-27T14:52:48.000Z
|
2022-03-27T17:35:22.000Z
|
Lib/asyncio/futures.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 8
|
2022-01-07T11:31:11.000Z
|
2022-03-04T00:07:16.000Z
|
Lib/asyncio/futures.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 1
|
2022-03-27T18:34:54.000Z
|
2022-03-27T18:34:54.000Z
|
"""A Future class similar to the one in PEP 3148."""
__all__ = (
'Future', 'wrap_future', 'isfuture',
)
import concurrent.futures
import contextvars
import logging
import sys
import warnings
from types import GenericAlias
from . import base_futures
from . import events
from . import exceptions
from . import format_helpers
isfuture = base_futures.isfuture
_PENDING = base_futures._PENDING
_CANCELLED = base_futures._CANCELLED
_FINISHED = base_futures._FINISHED
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
class Future:
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
- This class is not thread-safe.
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
via the event loop's call_soon().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
(In Python 3.4 or later we may be able to unify the implementations.)
"""
# Class variables serving as defaults for instance variables.
_state = _PENDING
_result = None
_exception = None
_loop = None
_source_traceback = None
_cancel_message = None
# A saved CancelledError for later chaining as an exception context.
_cancelled_exc = None
# This field is used for a dual purpose:
# - Its presence is a marker to declare that a class implements
# the Future protocol (i.e. is intended to be duck-type compatible).
# The value must also be not-None, to enable a subclass to declare
# that it is not compatible by setting this to None.
# - It is set by __iter__() below so that Task._step() can tell
# the difference between
# `await Future()` or`yield from Future()` (correct) vs.
# `yield Future()` (incorrect).
_asyncio_future_blocking = False
__log_traceback = False
def __init__(self, *, loop=None):
"""Initialize the future.
The optional event_loop argument allows explicitly setting the event
loop object used by the future. If it's not provided, the future uses
the default event loop.
"""
if loop is None:
self._loop = events._get_event_loop()
else:
self._loop = loop
self._callbacks = []
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
def __repr__(self):
return base_futures._future_repr(self)
def __del__(self):
if not self.__log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
exc = self._exception
context = {
'message':
f'{self.__class__.__name__} exception was never retrieved',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
__class_getitem__ = classmethod(GenericAlias)
@property
def _log_traceback(self):
return self.__log_traceback
@_log_traceback.setter
def _log_traceback(self, val):
if val:
raise ValueError('_log_traceback can only be set to False')
self.__log_traceback = False
def get_loop(self):
"""Return the event loop the Future is bound to."""
loop = self._loop
if loop is None:
raise RuntimeError("Future object is not initialized.")
return loop
def _make_cancelled_error(self):
"""Create the CancelledError to raise if the Future is cancelled.
This should only be called once when handling a cancellation since
it erases the saved context exception value.
"""
if self._cancelled_exc is not None:
exc = self._cancelled_exc
self._cancelled_exc = None
return exc
if self._cancel_message is None:
exc = exceptions.CancelledError()
else:
exc = exceptions.CancelledError(self._cancel_message)
exc.__context__ = self._cancelled_exc
# Remove the reference since we don't need this anymore.
self._cancelled_exc = None
return exc
def cancel(self, msg=None):
"""Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
change the future's state to cancelled, schedule the callbacks and
return True.
"""
if msg is not None:
warnings.warn("Passing 'msg' argument to Future.cancel() "
"is deprecated since Python 3.11, and "
"scheduled for removal in Python 3.14.",
DeprecationWarning, stacklevel=2)
self.__log_traceback = False
if self._state != _PENDING:
return False
self._state = _CANCELLED
self._cancel_message = msg
self.__schedule_callbacks()
return True
def __schedule_callbacks(self):
"""Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
"""
callbacks = self._callbacks[:]
if not callbacks:
return
self._callbacks[:] = []
for callback, ctx in callbacks:
self._loop.call_soon(callback, self, context=ctx)
def cancelled(self):
"""Return True if the future was cancelled."""
return self._state == _CANCELLED
# Don't implement running(); see http://bugs.python.org/issue18699
def done(self):
"""Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
return self._state != _PENDING
def result(self):
"""Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
self.__log_traceback = False
if self._exception is not None:
raise self._exception
return self._result
def exception(self):
"""Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Exception is not set.')
self.__log_traceback = False
return self._exception
def add_done_callback(self, fn, *, context=None):
"""Add a callback to be run when the future becomes done.
The callback is called with a single argument - the future object. If
the future is already done when this is called, the callback is
scheduled with call_soon.
"""
if self._state != _PENDING:
self._loop.call_soon(fn, self, context=context)
else:
if context is None:
context = contextvars.copy_context()
self._callbacks.append((fn, context))
# New method not in PEP 3148.
def remove_done_callback(self, fn):
"""Remove all instances of a callback from the "call when done" list.
Returns the number of callbacks removed.
"""
filtered_callbacks = [(f, ctx)
for (f, ctx) in self._callbacks
if f != fn]
removed_count = len(self._callbacks) - len(filtered_callbacks)
if removed_count:
self._callbacks[:] = filtered_callbacks
return removed_count
# So-called internal methods (note: no set_running_or_notify_cancel()).
def set_result(self, result):
"""Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
self._result = result
self._state = _FINISHED
self.__schedule_callbacks()
def set_exception(self, exception):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError("StopIteration interacts badly with generators "
"and cannot be raised into a Future")
self._exception = exception
self._state = _FINISHED
self.__schedule_callbacks()
self.__log_traceback = True
def __await__(self):
if not self.done():
self._asyncio_future_blocking = True
yield self # This tells Task to wait for completion.
if not self.done():
raise RuntimeError("await wasn't used with future")
return self.result() # May raise too.
__iter__ = __await__ # make compatible with 'yield from'.
# Needed for testing purposes.
_PyFuture = Future
def _get_loop(fut):
# Tries to call Future.get_loop() if it's available.
# Otherwise fallbacks to using the old '_loop' property.
try:
get_loop = fut.get_loop
except AttributeError:
pass
else:
return get_loop()
return fut._loop
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _convert_future_exc(exc):
exc_class = type(exc)
if exc_class is concurrent.futures.CancelledError:
return exceptions.CancelledError(*exc.args)
elif exc_class is concurrent.futures.TimeoutError:
return exceptions.TimeoutError(*exc.args)
elif exc_class is concurrent.futures.InvalidStateError:
return exceptions.InvalidStateError(*exc.args)
else:
return exc
def _set_concurrent_future_state(concurrent, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurrent.cancel()
if not concurrent.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurrent.set_exception(_convert_future_exc(exception))
else:
result = source.result()
concurrent.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(exception))
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isfuture(source) and not isinstance(source,
concurrent.futures.Future):
raise TypeError('A future is required for source argument')
if not isfuture(destination) and not isinstance(destination,
concurrent.futures.Future):
raise TypeError('A future is required for destination argument')
source_loop = _get_loop(source) if isfuture(source) else None
dest_loop = _get_loop(destination) if isfuture(destination) else None
def _set_state(future, other):
if isfuture(future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if (destination.cancelled() and
dest_loop is not None and dest_loop.is_closed()):
return
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def wrap_future(future, *, loop=None):
"""Wrap concurrent.futures.Future object."""
if isfuture(future):
return future
assert isinstance(future, concurrent.futures.Future), \
f'concurrent.futures.Future is expected, got {future!r}'
if loop is None:
loop = events._get_event_loop()
new_future = loop.create_future()
_chain_future(future, new_future)
return new_future
try:
import _asyncio
except ImportError:
pass
else:
# _CFuture is needed for tests.
Future = _CFuture = _asyncio.Future
| 33.266204
| 79
| 0.640248
|
d3f0e4b74479385780af521d4e53b4900737d845
| 1,537
|
py
|
Python
|
sebs/local/deployment.py
|
opal-mimuw/serverless-benchmarks
|
16ac3988b3891a6ad2ae91e7c7175315d924c70c
|
[
"BSD-3-Clause"
] | 35
|
2020-12-30T19:31:30.000Z
|
2022-03-28T11:10:00.000Z
|
sebs/local/deployment.py
|
opal-mimuw/serverless-benchmarks
|
16ac3988b3891a6ad2ae91e7c7175315d924c70c
|
[
"BSD-3-Clause"
] | 24
|
2021-01-04T15:37:05.000Z
|
2022-03-14T00:45:20.000Z
|
sebs/local/deployment.py
|
opal-mimuw/serverless-benchmarks
|
16ac3988b3891a6ad2ae91e7c7175315d924c70c
|
[
"BSD-3-Clause"
] | 10
|
2021-06-13T13:13:39.000Z
|
2021-12-20T22:05:50.000Z
|
import json
from typing import List, Optional
from sebs.cache import Cache
from sebs.local.function import LocalFunction
from sebs.local.storage import Minio
from sebs.utils import serialize
class Deployment:
def __init__(self):
self._functions: List[LocalFunction] = []
self._storage: Optional[Minio]
self._inputs: List[dict] = []
def add_function(self, func: LocalFunction):
self._functions.append(func)
def add_input(self, func_input: dict):
self._inputs.append(func_input)
def set_storage(self, storage: Minio):
self._storage = storage
def serialize(self, path: str):
with open(path, "w") as out:
out.write(
serialize(
{"functions": self._functions, "storage": self._storage, "inputs": self._inputs}
)
)
@staticmethod
def deserialize(path: str, cache_client: Cache) -> "Deployment":
with open(path, "r") as in_f:
input_data = json.load(in_f)
deployment = Deployment()
for input_cfg in input_data["inputs"]:
deployment._inputs.append(input_cfg)
for func in input_data["functions"]:
deployment._functions.append(LocalFunction.deserialize(func))
deployment._storage = Minio.deserialize(input_data["storage"], cache_client)
return deployment
def shutdown(self):
for func in self._functions:
func.stop()
self._storage.stop()
| 31.367347
| 100
| 0.62134
|
4575e998126a2fdf86986a9fcaba9b069e2a9dbd
| 1,072
|
py
|
Python
|
examples/obtain_refresh_token.py
|
NadimAsad/prawcore
|
168aea5ee4284d8eeeb1d2bcfbad84af828f8cdc
|
[
"BSD-2-Clause"
] | null | null | null |
examples/obtain_refresh_token.py
|
NadimAsad/prawcore
|
168aea5ee4284d8eeeb1d2bcfbad84af828f8cdc
|
[
"BSD-2-Clause"
] | null | null | null |
examples/obtain_refresh_token.py
|
NadimAsad/prawcore
|
168aea5ee4284d8eeeb1d2bcfbad84af828f8cdc
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""This example demonstrates the flow for retrieving a refresh token.
In order for this example to work your application's redirect URI must be set
to http://localhost:65010/auth_callback.
This tool can be used to conveniently create refresh tokens for later use with
your web application OAuth2 credentials.
"""
import os
import sys
import prawcore
def main():
"""Provide the program's entry point when directly executed."""
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} SCOPE...")
return 1
authorizer = prawcore.LocalWSGIServerAuthorizer(
prawcore.TrustedAuthenticator(
prawcore.Requestor("prawcore_refresh_token_example"),
os.environ["PRAWCORE_CLIENT_ID"],
os.environ["PRAWCORE_CLIENT_SECRET"],
"http://localhost:65010/auth_callback",
),
sys.argv[1:],
duration="permanent",
)
authorizer.authorize_local_server()
print(f"Refresh token: {authorizer.refresh_token}")
if __name__ == "__main__":
sys.exit(main())
| 26.8
| 78
| 0.681903
|
e9a9ad0bd880cdcde87e21e799545bea27d39060
| 6,483
|
py
|
Python
|
telethon/events/common.py
|
MxAboli/Telethon
|
5d13626d7119eb23463495412e0e707d23723680
|
[
"MIT"
] | 8
|
2021-06-09T17:26:18.000Z
|
2022-02-15T06:13:15.000Z
|
telethon/events/common.py
|
MxAboli/Telethon
|
5d13626d7119eb23463495412e0e707d23723680
|
[
"MIT"
] | 11
|
2021-06-09T17:19:38.000Z
|
2021-12-30T07:22:16.000Z
|
telethon/events/common.py
|
TAMILVIP007/Telethon
|
15c0369b8fb90c991ff241bff34c9d80a171d972
|
[
"MIT"
] | 19
|
2021-06-26T13:24:22.000Z
|
2022-03-31T07:28:22.000Z
|
import abc
import asyncio
import warnings
import logging
from .. import utils
from ..tl import TLObject, types
from ..tl.custom.chatgetter import ChatGetter
logger = logging.getLogger(__name__)
async def _into_id_set(client, chats):
"""Helper util to turn the input chat or chats into a set of IDs."""
if chats is None:
return None
logger.debug(f"got {chats} for id changing...")
if not utils.is_list_like(chats):
chats = (chats,)
result = set()
for chat in chats:
if isinstance(chat, int):
if chat < 0:
result.add(chat) # Explicitly marked IDs are negative
else:
result.update(
{ # Support all valid types of peers
utils.get_peer_id(types.PeerUser(chat)),
utils.get_peer_id(types.PeerChat(chat)),
utils.get_peer_id(types.PeerChannel(chat)),
}
)
elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2D45687:
# 0x2d45687 == crc32(b'Peer')
result.add(utils.get_peer_id(chat))
else:
chat = await client.get_input_entity(chat)
if isinstance(chat, types.InputPeerSelf):
chat = await client.get_me(input_peer=True)
result.add(utils.get_peer_id(chat))
return result
class EventBuilder(abc.ABC):
"""
The common event builder, with builtin support to filter per chat.
Args:
chats (`entity`, optional):
May be one or more entities (username/peer/etc.), preferably IDs.
By default, only matching chats will be handled.
blacklist_chats (`bool`, optional):
Whether to treat the chats as a blacklist instead of
as a whitelist (default). This means that every chat
will be handled *except* those specified in ``chats``
which will be ignored if ``blacklist_chats=True``.
func (`callable`, optional):
A callable (async or not) function that should accept the event as input
parameter, and return a value indicating whether the event
should be dispatched or not (any truthy value will do, it
does not need to be a `bool`). It works like a custom filter:
.. code-block:: python
@client.on(events.NewMessage(func=lambda e: e.is_private))
async def handler(event):
pass # code here
"""
def __init__(self, chats=None, *, blacklist_chats=False, func=None):
self.chats = chats
self.blacklist_chats = bool(blacklist_chats)
self.resolved = False
self.func = func
self._resolve_lock = None
@classmethod
@abc.abstractmethod
def build(cls, update, others=None, self_id=None):
"""
Builds an event for the given update if possible, or returns None.
`others` are the rest of updates that came in the same container
as the current `update`.
`self_id` should be the current user's ID, since it is required
for some events which lack this information but still need it.
"""
# TODO So many parameters specific to only some update types seems dirty
async def resolve(self, client):
"""Helper method to allow event builders to be resolved before usage"""
if self.resolved:
return
if not self._resolve_lock:
self._resolve_lock = asyncio.Lock()
async with self._resolve_lock:
if not self.resolved:
await self._resolve(client)
self.resolved = True
async def _resolve(self, client):
self.chats = await _into_id_set(client, self.chats)
def filter(self, event):
"""
Returns a truthy value if the event passed the filter and should be
used, or falsy otherwise. The return value may need to be awaited.
The events must have been resolved before this can be called.
"""
if not self.resolved:
return
if self.chats is not None:
# Note: the `event.chat_id` property checks if it's `None` for us
inside = event.chat_id in self.chats
if inside == self.blacklist_chats:
# If this chat matches but it's a blacklist ignore.
# If it doesn't match but it's a whitelist ignore.
return
if not self.func:
return True
# Return the result of func directly as it may need to be awaited
return self.func(event)
class EventCommon(ChatGetter, abc.ABC):
"""
Intermediate class with common things to all events.
Remember that this class implements `ChatGetter
<telethon.tl.custom.chatgetter.ChatGetter>` which
means you have access to all chat properties and methods.
In addition, you can access the `original_update`
field which contains the original :tl:`Update`.
"""
_event_name = "Event"
def __init__(self, chat_peer=None, msg_id=None, broadcast=None):
super().__init__(chat_peer, broadcast=broadcast)
self._entities = {}
self._client = None
self._message_id = msg_id
self.original_update = None
def _set_client(self, client):
"""
Setter so subclasses can act accordingly when the client is set.
"""
self._client = client
if self._chat_peer:
self._chat, self._input_chat = utils._get_entity_pair(
self.chat_id, self._entities, client._entity_cache
)
else:
self._chat = self._input_chat = None
@property
def client(self):
"""
The `telethon.TelegramClient` that created this event.
"""
return self._client
def __str__(self):
return TLObject.pretty_format(self.to_dict())
def stringify(self):
return TLObject.pretty_format(self.to_dict(), indent=0)
def to_dict(self):
d = {k: v for k, v in self.__dict__.items() if k[0] != "_"}
d["_"] = self._event_name
return d
def name_inner_event(cls):
"""Decorator to rename cls.Event 'Event' as 'cls.Event'"""
if hasattr(cls, "Event"):
cls.Event._event_name = "{}.Event".format(cls.__name__)
else:
warnings.warn("Class {} does not have a inner Event".format(cls))
return cls
| 33.590674
| 84
| 0.607898
|
b3a15d2fb985db29009ddd273877987046b7bfe3
| 1,316
|
py
|
Python
|
saleor/graphql/page/bulk_mutations.py
|
famavott/saleor
|
becf07bf2a4fa8e98b2ea9ad13f910532d48cab0
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/graphql/page/bulk_mutations.py
|
famavott/saleor
|
becf07bf2a4fa8e98b2ea9ad13f910532d48cab0
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/graphql/page/bulk_mutations.py
|
famavott/saleor
|
becf07bf2a4fa8e98b2ea9ad13f910532d48cab0
|
[
"BSD-3-Clause"
] | null | null | null |
import graphene
from ...page import models
from ..core.mutations import ModelBulkDeleteMutation, ModelBulkPublishMutation
class PageBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description='List of page IDs to delete.')
class Meta:
description = 'Deletes pages.'
model = models.Page
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('page.manage_pages')
class PageBulkPublish(ModelBulkPublishMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description='List of page IDs to publish.')
class Meta:
description = 'Publish pages.'
model = models.Page
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('page.manage_pages')
class PageBulkUnpublish(PageBulkPublish):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description='List of page IDs to unpublish.')
class Meta:
description = 'Unpublish pages.'
model = models.Page
@classmethod
def bulk_action(cls, queryset):
queryset.update(is_published=False)
| 24.830189
| 78
| 0.638298
|
5abd80aef365f52a4bf28a2a76ac23676a6e77dc
| 18,354
|
py
|
Python
|
cmd/location-provider/example/gpsfake/simulator/geographiclib/geodesicline.py
|
edgefarm/edgefarm-service-modules
|
b72f9b1a9a72906db2bf8c18d6f70ca3518707fd
|
[
"Apache-2.0"
] | 5
|
2018-10-29T10:33:24.000Z
|
2021-02-16T13:41:31.000Z
|
cmd/location-provider/example/gpsfake/simulator/geographiclib/geodesicline.py
|
edgefarm/edgefarm-service-modules
|
b72f9b1a9a72906db2bf8c18d6f70ca3518707fd
|
[
"Apache-2.0"
] | 1
|
2021-06-13T16:19:32.000Z
|
2021-06-13T16:19:32.000Z
|
gpsfake-module/simulator/geographiclib/geodesicline.py
|
edgefarm/alm-service-modules
|
8f66cf4122ef0df43288a4ac879ecc0098ad0d51
|
[
"Apache-2.0"
] | 2
|
2020-09-04T01:14:43.000Z
|
2021-03-25T13:47:16.000Z
|
"""Define the :class:`~geographiclib.geodesicline.GeodesicLine` class
The constructor defines the starting point of the line. Points on the
line are given by
* :meth:`~geographiclib.geodesicline.GeodesicLine.Position` position
given in terms of distance
* :meth:`~geographiclib.geodesicline.GeodesicLine.ArcPosition` position
given in terms of spherical arc length
A reference point 3 can be defined with
* :meth:`~geographiclib.geodesicline.GeodesicLine.SetDistance` set
position of 3 in terms of the distance from the starting point
* :meth:`~geographiclib.geodesicline.GeodesicLine.SetArc` set
position of 3 in terms of the spherical arc length from the starting point
The object can also be constructed by
* :meth:`Geodesic.Line <geographiclib.geodesic.Geodesic.Line>`
* :meth:`Geodesic.DirectLine <geographiclib.geodesic.Geodesic.DirectLine>`
* :meth:`Geodesic.ArcDirectLine
<geographiclib.geodesic.Geodesic.ArcDirectLine>`
* :meth:`Geodesic.InverseLine <geographiclib.geodesic.Geodesic.InverseLine>`
The public attributes for this class are
* :attr:`~geographiclib.geodesicline.GeodesicLine.a`
:attr:`~geographiclib.geodesicline.GeodesicLine.f`
:attr:`~geographiclib.geodesicline.GeodesicLine.caps`
:attr:`~geographiclib.geodesicline.GeodesicLine.lat1`
:attr:`~geographiclib.geodesicline.GeodesicLine.lon1`
:attr:`~geographiclib.geodesicline.GeodesicLine.azi1`
:attr:`~geographiclib.geodesicline.GeodesicLine.salp1`
:attr:`~geographiclib.geodesicline.GeodesicLine.calp1`
:attr:`~geographiclib.geodesicline.GeodesicLine.s13`
:attr:`~geographiclib.geodesicline.GeodesicLine.a13`
"""
# geodesicline.py
#
# This is a rather literal translation of the GeographicLib::GeodesicLine class
# to python. See the documentation for the C++ class for more information at
#
# https://geographiclib.sourceforge.io/html/annotated.html
#
# The algorithms are derived in
#
# Charles F. F. Karney,
# Algorithms for geodesics, J. Geodesy 87, 43-55 (2013),
# https://doi.org/10.1007/s00190-012-0578-z
# Addenda: https://geographiclib.sourceforge.io/geod-addenda.html
#
# Copyright (c) Charles Karney (2011-2016) <charles@karney.com> and licensed
# under the MIT/X11 License. For more information, see
# https://geographiclib.sourceforge.io/
######################################################################
import math
from geographiclib.geodesiccapability import GeodesicCapability
from geographiclib.geomath import Math
class GeodesicLine(object):
"""Points on a geodesic path"""
def __init__(self, geod, lat1, lon1, azi1,
caps = GeodesicCapability.STANDARD |
GeodesicCapability.DISTANCE_IN,
salp1 = Math.nan, calp1 = Math.nan):
"""Construct a GeodesicLine object
:param geod: a :class:`~geographiclib.geodesic.Geodesic` object
:param lat1: latitude of the first point in degrees
:param lon1: longitude of the first point in degrees
:param azi1: azimuth at the first point in degrees
:param caps: the :ref:`capabilities <outmask>`
This creates an object allowing points along a geodesic starting at
(*lat1*, *lon1*), with azimuth *azi1* to be found. The default
value of *caps* is STANDARD | DISTANCE_IN. The optional parameters
*salp1* and *calp1* should not be supplied; they are part of the
private interface.
"""
from geographiclib.geodesic import Geodesic
self.a = geod.a
"""The equatorial radius in meters (readonly)"""
self.f = geod.f
"""The flattening (readonly)"""
self._b = geod._b
self._c2 = geod._c2
self._f1 = geod._f1
self.caps = (caps | Geodesic.LATITUDE | Geodesic.AZIMUTH |
Geodesic.LONG_UNROLL)
"""the capabilities (readonly)"""
# Guard against underflow in salp0
self.lat1 = Math.LatFix(lat1)
"""the latitude of the first point in degrees (readonly)"""
self.lon1 = lon1
"""the longitude of the first point in degrees (readonly)"""
if Math.isnan(salp1) or Math.isnan(calp1):
self.azi1 = Math.AngNormalize(azi1)
self.salp1, self.calp1 = Math.sincosd(Math.AngRound(azi1))
else:
self.azi1 = azi1
"""the azimuth at the first point in degrees (readonly)"""
self.salp1 = salp1
"""the sine of the azimuth at the first point (readonly)"""
self.calp1 = calp1
"""the cosine of the azimuth at the first point (readonly)"""
# real cbet1, sbet1
sbet1, cbet1 = Math.sincosd(Math.AngRound(lat1)); sbet1 *= self._f1
# Ensure cbet1 = +epsilon at poles
sbet1, cbet1 = Math.norm(sbet1, cbet1); cbet1 = max(Geodesic.tiny_, cbet1)
self._dn1 = math.sqrt(1 + geod._ep2 * Math.sq(sbet1))
# Evaluate alp0 from sin(alp1) * cos(bet1) = sin(alp0),
self._salp0 = self.salp1 * cbet1 # alp0 in [0, pi/2 - |bet1|]
# Alt: calp0 = hypot(sbet1, calp1 * cbet1). The following
# is slightly better (consider the case salp1 = 0).
self._calp0 = math.hypot(self.calp1, self.salp1 * sbet1)
# Evaluate sig with tan(bet1) = tan(sig1) * cos(alp1).
# sig = 0 is nearest northward crossing of equator.
# With bet1 = 0, alp1 = pi/2, we have sig1 = 0 (equatorial line).
# With bet1 = pi/2, alp1 = -pi, sig1 = pi/2
# With bet1 = -pi/2, alp1 = 0 , sig1 = -pi/2
# Evaluate omg1 with tan(omg1) = sin(alp0) * tan(sig1).
# With alp0 in (0, pi/2], quadrants for sig and omg coincide.
# No atan2(0,0) ambiguity at poles since cbet1 = +epsilon.
# With alp0 = 0, omg1 = 0 for alp1 = 0, omg1 = pi for alp1 = pi.
self._ssig1 = sbet1; self._somg1 = self._salp0 * sbet1
self._csig1 = self._comg1 = (cbet1 * self.calp1
if sbet1 != 0 or self.calp1 != 0 else 1)
# sig1 in (-pi, pi]
self._ssig1, self._csig1 = Math.norm(self._ssig1, self._csig1)
# No need to normalize
# self._somg1, self._comg1 = Math.norm(self._somg1, self._comg1)
self._k2 = Math.sq(self._calp0) * geod._ep2
eps = self._k2 / (2 * (1 + math.sqrt(1 + self._k2)) + self._k2)
if self.caps & Geodesic.CAP_C1:
self._A1m1 = Geodesic._A1m1f(eps)
self._C1a = list(range(Geodesic.nC1_ + 1))
Geodesic._C1f(eps, self._C1a)
self._B11 = Geodesic._SinCosSeries(
True, self._ssig1, self._csig1, self._C1a)
s = math.sin(self._B11); c = math.cos(self._B11)
# tau1 = sig1 + B11
self._stau1 = self._ssig1 * c + self._csig1 * s
self._ctau1 = self._csig1 * c - self._ssig1 * s
# Not necessary because C1pa reverts C1a
# _B11 = -_SinCosSeries(true, _stau1, _ctau1, _C1pa)
if self.caps & Geodesic.CAP_C1p:
self._C1pa = list(range(Geodesic.nC1p_ + 1))
Geodesic._C1pf(eps, self._C1pa)
if self.caps & Geodesic.CAP_C2:
self._A2m1 = Geodesic._A2m1f(eps)
self._C2a = list(range(Geodesic.nC2_ + 1))
Geodesic._C2f(eps, self._C2a)
self._B21 = Geodesic._SinCosSeries(
True, self._ssig1, self._csig1, self._C2a)
if self.caps & Geodesic.CAP_C3:
self._C3a = list(range(Geodesic.nC3_))
geod._C3f(eps, self._C3a)
self._A3c = -self.f * self._salp0 * geod._A3f(eps)
self._B31 = Geodesic._SinCosSeries(
True, self._ssig1, self._csig1, self._C3a)
if self.caps & Geodesic.CAP_C4:
self._C4a = list(range(Geodesic.nC4_))
geod._C4f(eps, self._C4a)
# Multiplier = a^2 * e^2 * cos(alpha0) * sin(alpha0)
self._A4 = Math.sq(self.a) * self._calp0 * self._salp0 * geod._e2
self._B41 = Geodesic._SinCosSeries(
False, self._ssig1, self._csig1, self._C4a)
self.s13 = Math.nan
"""the distance between point 1 and point 3 in meters (readonly)"""
self.a13 = Math.nan
"""the arc length between point 1 and point 3 in degrees (readonly)"""
# return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12
def _GenPosition(self, arcmode, s12_a12, outmask):
"""Private: General solution of position along geodesic"""
from geographiclib.geodesic import Geodesic
a12 = lat2 = lon2 = azi2 = s12 = m12 = M12 = M21 = S12 = Math.nan
outmask &= self.caps & Geodesic.OUT_MASK
if not (arcmode or
(self.caps & (Geodesic.OUT_MASK & Geodesic.DISTANCE_IN))):
# Uninitialized or impossible distance calculation requested
return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12
# Avoid warning about uninitialized B12.
B12 = 0.0; AB1 = 0.0
if arcmode:
# Interpret s12_a12 as spherical arc length
sig12 = math.radians(s12_a12)
ssig12, csig12 = Math.sincosd(s12_a12)
else:
# Interpret s12_a12 as distance
tau12 = s12_a12 / (self._b * (1 + self._A1m1))
s = math.sin(tau12); c = math.cos(tau12)
# tau2 = tau1 + tau12
B12 = - Geodesic._SinCosSeries(True,
self._stau1 * c + self._ctau1 * s,
self._ctau1 * c - self._stau1 * s,
self._C1pa)
sig12 = tau12 - (B12 - self._B11)
ssig12 = math.sin(sig12); csig12 = math.cos(sig12)
if abs(self.f) > 0.01:
# Reverted distance series is inaccurate for |f| > 1/100, so correct
# sig12 with 1 Newton iteration. The following table shows the
# approximate maximum error for a = WGS_a() and various f relative to
# GeodesicExact.
# erri = the error in the inverse solution (nm)
# errd = the error in the direct solution (series only) (nm)
# errda = the error in the direct solution (series + 1 Newton) (nm)
#
# f erri errd errda
# -1/5 12e6 1.2e9 69e6
# -1/10 123e3 12e6 765e3
# -1/20 1110 108e3 7155
# -1/50 18.63 200.9 27.12
# -1/100 18.63 23.78 23.37
# -1/150 18.63 21.05 20.26
# 1/150 22.35 24.73 25.83
# 1/100 22.35 25.03 25.31
# 1/50 29.80 231.9 30.44
# 1/20 5376 146e3 10e3
# 1/10 829e3 22e6 1.5e6
# 1/5 157e6 3.8e9 280e6
ssig2 = self._ssig1 * csig12 + self._csig1 * ssig12
csig2 = self._csig1 * csig12 - self._ssig1 * ssig12
B12 = Geodesic._SinCosSeries(True, ssig2, csig2, self._C1a)
serr = ((1 + self._A1m1) * (sig12 + (B12 - self._B11)) -
s12_a12 / self._b)
sig12 = sig12 - serr / math.sqrt(1 + self._k2 * Math.sq(ssig2))
ssig12 = math.sin(sig12); csig12 = math.cos(sig12)
# Update B12 below
# real omg12, lam12, lon12
# real ssig2, csig2, sbet2, cbet2, somg2, comg2, salp2, calp2
# sig2 = sig1 + sig12
ssig2 = self._ssig1 * csig12 + self._csig1 * ssig12
csig2 = self._csig1 * csig12 - self._ssig1 * ssig12
dn2 = math.sqrt(1 + self._k2 * Math.sq(ssig2))
if outmask & (
Geodesic.DISTANCE | Geodesic.REDUCEDLENGTH | Geodesic.GEODESICSCALE):
if arcmode or abs(self.f) > 0.01:
B12 = Geodesic._SinCosSeries(True, ssig2, csig2, self._C1a)
AB1 = (1 + self._A1m1) * (B12 - self._B11)
# sin(bet2) = cos(alp0) * sin(sig2)
sbet2 = self._calp0 * ssig2
# Alt: cbet2 = hypot(csig2, salp0 * ssig2)
cbet2 = math.hypot(self._salp0, self._calp0 * csig2)
if cbet2 == 0:
# I.e., salp0 = 0, csig2 = 0. Break the degeneracy in this case
cbet2 = csig2 = Geodesic.tiny_
# tan(alp0) = cos(sig2)*tan(alp2)
salp2 = self._salp0; calp2 = self._calp0 * csig2 # No need to normalize
if outmask & Geodesic.DISTANCE:
s12 = self._b * ((1 + self._A1m1) * sig12 + AB1) if arcmode else s12_a12
if outmask & Geodesic.LONGITUDE:
# tan(omg2) = sin(alp0) * tan(sig2)
somg2 = self._salp0 * ssig2; comg2 = csig2 # No need to normalize
E = Math.copysign(1, self._salp0) # East or west going?
# omg12 = omg2 - omg1
omg12 = (E * (sig12
- (math.atan2( ssig2, csig2) -
math.atan2( self._ssig1, self._csig1))
+ (math.atan2(E * somg2, comg2) -
math.atan2(E * self._somg1, self._comg1)))
if outmask & Geodesic.LONG_UNROLL
else math.atan2(somg2 * self._comg1 - comg2 * self._somg1,
comg2 * self._comg1 + somg2 * self._somg1))
lam12 = omg12 + self._A3c * (
sig12 + (Geodesic._SinCosSeries(True, ssig2, csig2, self._C3a)
- self._B31))
lon12 = math.degrees(lam12)
lon2 = (self.lon1 + lon12 if outmask & Geodesic.LONG_UNROLL else
Math.AngNormalize(Math.AngNormalize(self.lon1) +
Math.AngNormalize(lon12)))
if outmask & Geodesic.LATITUDE:
lat2 = Math.atan2d(sbet2, self._f1 * cbet2)
if outmask & Geodesic.AZIMUTH:
azi2 = Math.atan2d(salp2, calp2)
if outmask & (Geodesic.REDUCEDLENGTH | Geodesic.GEODESICSCALE):
B22 = Geodesic._SinCosSeries(True, ssig2, csig2, self._C2a)
AB2 = (1 + self._A2m1) * (B22 - self._B21)
J12 = (self._A1m1 - self._A2m1) * sig12 + (AB1 - AB2)
if outmask & Geodesic.REDUCEDLENGTH:
# Add parens around (_csig1 * ssig2) and (_ssig1 * csig2) to ensure
# accurate cancellation in the case of coincident points.
m12 = self._b * (( dn2 * (self._csig1 * ssig2) -
self._dn1 * (self._ssig1 * csig2))
- self._csig1 * csig2 * J12)
if outmask & Geodesic.GEODESICSCALE:
t = (self._k2 * (ssig2 - self._ssig1) *
(ssig2 + self._ssig1) / (self._dn1 + dn2))
M12 = csig12 + (t * ssig2 - csig2 * J12) * self._ssig1 / self._dn1
M21 = csig12 - (t * self._ssig1 - self._csig1 * J12) * ssig2 / dn2
if outmask & Geodesic.AREA:
B42 = Geodesic._SinCosSeries(False, ssig2, csig2, self._C4a)
# real salp12, calp12
if self._calp0 == 0 or self._salp0 == 0:
# alp12 = alp2 - alp1, used in atan2 so no need to normalize
salp12 = salp2 * self.calp1 - calp2 * self.salp1
calp12 = calp2 * self.calp1 + salp2 * self.salp1
else:
# tan(alp) = tan(alp0) * sec(sig)
# tan(alp2-alp1) = (tan(alp2) -tan(alp1)) / (tan(alp2)*tan(alp1)+1)
# = calp0 * salp0 * (csig1-csig2) / (salp0^2 + calp0^2 * csig1*csig2)
# If csig12 > 0, write
# csig1 - csig2 = ssig12 * (csig1 * ssig12 / (1 + csig12) + ssig1)
# else
# csig1 - csig2 = csig1 * (1 - csig12) + ssig12 * ssig1
# No need to normalize
salp12 = self._calp0 * self._salp0 * (
self._csig1 * (1 - csig12) + ssig12 * self._ssig1 if csig12 <= 0
else ssig12 * (self._csig1 * ssig12 / (1 + csig12) + self._ssig1))
calp12 = (Math.sq(self._salp0) +
Math.sq(self._calp0) * self._csig1 * csig2)
S12 = (self._c2 * math.atan2(salp12, calp12) +
self._A4 * (B42 - self._B41))
a12 = s12_a12 if arcmode else math.degrees(sig12)
return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12
def Position(self, s12, outmask = GeodesicCapability.STANDARD):
"""Find the position on the line given *s12*
:param s12: the distance from the first point to the second in
meters
:param outmask: the :ref:`output mask <outmask>`
:return: a :ref:`dict`
The default value of *outmask* is STANDARD, i.e., the *lat1*,
*lon1*, *azi1*, *lat2*, *lon2*, *azi2*, *s12*, *a12* entries are
returned. The :class:`~geographiclib.geodesicline.GeodesicLine`
object must have been constructed with the DISTANCE_IN capability.
"""
from geographiclib.geodesic import Geodesic
result = {'lat1': self.lat1,
'lon1': self.lon1 if outmask & Geodesic.LONG_UNROLL else
Math.AngNormalize(self.lon1),
'azi1': self.azi1, 's12': s12}
a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 = self._GenPosition(
False, s12, outmask)
outmask &= Geodesic.OUT_MASK
result['a12'] = a12
if outmask & Geodesic.LATITUDE: result['lat2'] = lat2
if outmask & Geodesic.LONGITUDE: result['lon2'] = lon2
if outmask & Geodesic.AZIMUTH: result['azi2'] = azi2
if outmask & Geodesic.REDUCEDLENGTH: result['m12'] = m12
if outmask & Geodesic.GEODESICSCALE:
result['M12'] = M12; result['M21'] = M21
if outmask & Geodesic.AREA: result['S12'] = S12
return result
def ArcPosition(self, a12, outmask = GeodesicCapability.STANDARD):
"""Find the position on the line given *a12*
:param a12: spherical arc length from the first point to the second
in degrees
:param outmask: the :ref:`output mask <outmask>`
:return: a :ref:`dict`
The default value of *outmask* is STANDARD, i.e., the *lat1*,
*lon1*, *azi1*, *lat2*, *lon2*, *azi2*, *s12*, *a12* entries are
returned.
"""
from geographiclib.geodesic import Geodesic
result = {'lat1': self.lat1,
'lon1': self.lon1 if outmask & Geodesic.LONG_UNROLL else
Math.AngNormalize(self.lon1),
'azi1': self.azi1, 'a12': a12}
a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 = self._GenPosition(
True, a12, outmask)
outmask &= Geodesic.OUT_MASK
if outmask & Geodesic.DISTANCE: result['s12'] = s12
if outmask & Geodesic.LATITUDE: result['lat2'] = lat2
if outmask & Geodesic.LONGITUDE: result['lon2'] = lon2
if outmask & Geodesic.AZIMUTH: result['azi2'] = azi2
if outmask & Geodesic.REDUCEDLENGTH: result['m12'] = m12
if outmask & Geodesic.GEODESICSCALE:
result['M12'] = M12; result['M21'] = M21
if outmask & Geodesic.AREA: result['S12'] = S12
return result
def SetDistance(self, s13):
"""Specify the position of point 3 in terms of distance
:param s13: distance from point 1 to point 3 in meters
"""
self.s13 = s13
self.a13, _, _, _, _, _, _, _, _ = self._GenPosition(False, self.s13, 0)
def SetArc(self, a13):
"""Specify the position of point 3 in terms of arc length
:param a13: spherical arc length from point 1 to point 3 in degrees
"""
from geographiclib.geodesic import Geodesic
self.a13 = a13
_, _, _, _, self.s13, _, _, _, _ = self._GenPosition(True, self.a13,
Geodesic.DISTANCE)
| 42.883178
| 79
| 0.618939
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.