hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f85fe3d0fc0972e2f4e39bd88f70e4c006132765
| 14,977
|
py
|
Python
|
various_types_of_self_citation/main.py
|
KasyanovPavel/wos_api_usecases
|
6f87e46d45eaffe1bb98c63ae63a8818160d3273
|
[
"MIT"
] | null | null | null |
various_types_of_self_citation/main.py
|
KasyanovPavel/wos_api_usecases
|
6f87e46d45eaffe1bb98c63ae63a8818160d3273
|
[
"MIT"
] | null | null | null |
various_types_of_self_citation/main.py
|
KasyanovPavel/wos_api_usecases
|
6f87e46d45eaffe1bb98c63ae63a8818160d3273
|
[
"MIT"
] | 1
|
2021-07-28T16:47:58.000Z
|
2021-07-28T16:47:58.000Z
|
import requests
from apikey import apikey # Your API key, it's better not to store it in the program
# Enter the WoS search query to evaluate its self-citation percentage:
search_query = '(TS=("self citation*" or selfcitation*)) AND (TP==("HIGHLY CITED PAPERS"))'
headers = {
'X-APIKey': apikey
}
endpoint = "https://api.clarivate.com/api/wos"
# This will save several API queries/records received by storing the already checked citing papers locally
checked_citing_papers = [('ut', 'cited_paper')]
# This is the function that performs the self-citation calculation for every cited reference. If the self-citation event
# has been identified by the above calculation() function, then the citing document is analyzed for the number of
# references to that particular cited document. This is required because the number of citations and the number of
# citing documents are not the same thing. One citing document can have multiple cited references leading to the cited
# one, so the total amount of citations to a paper can sometimes be significantly higher than the number of citing
# records.
a = cited_papers()
b = citing_papers(a)
self_citations(a)
| 61.130612
| 237
| 0.686252
|
import requests
from apikey import apikey # Your API key, it's better not to store it in the program
# Enter the WoS search query to evaluate its self-citation percentage:
search_query = '(TS=("self citation*" or selfcitation*)) AND (TP==("HIGHLY CITED PAPERS"))'
headers = {
'X-APIKey': apikey
}
endpoint = "https://api.clarivate.com/api/wos"
# This will save several API queries/records received by storing the already checked citing papers locally
checked_citing_papers = [('ut', 'cited_paper')]
class CitedPaper:
def __init__(self, ut, author_names, author_dais, author_rids, author_orcids, org_names, source_names, times_cited,
citing_papers_list):
self.ut = ut
self.author_names = author_names
self.author_dais = author_dais
self.author_rids = author_rids
self.author_orcids = author_orcids
self.org_names = org_names
self.source_names = source_names
self.times_cited = times_cited
self.citing_papers_list = citing_papers_list
class CitingPaper:
def __init__(self, ut, author_names, author_dais, author_rids, author_orcids, org_names, source_names,
self_citation_crs):
self.ut = ut
self.author_names = author_names
self.author_dais = author_dais
self.author_rids = author_rids
self.author_orcids = author_orcids
self.org_names = org_names
self.source_names = source_names
self.self_citation_crs = self_citation_crs
def cited_papers(): # This is how we create a list of cited papers based on a search query specified in the start
data = cited_request() # Getting the data from Web of Science API
cited_papers_list = []
for paper in data: # Breaking the received JSON data into separate instances of cited_paper class
ut = paper['UID']
author_names, author_dais, author_rids, author_orcids = get_author_list(paper)
org_names = get_org_list(paper)
source_names = get_source_list(paper)
times_cited = get_times_cited(paper)
citing_papers_list = []
cited_papers_list.append(CitedPaper(ut, author_names, author_dais, author_rids, author_orcids, org_names,
source_names, times_cited, citing_papers_list))
return cited_papers_list
def cited_request(): # This function actually gets the cited paper data via API
cited_data = []
initial_response = requests.get(f'{endpoint}?databaseId=WOS&usrQuery={search_query}&count=0&firstRecord=1',
headers=headers)
initial_data = initial_response.json()
for i in range(((initial_data['QueryResult']['RecordsFound'] - 1) // 100) + 1):
subsequent_response = requests.get(
f'{endpoint}?databaseId=WOS&usrQuery={search_query}&count=100&firstRecord={(100 * i + 1)}',
headers=headers)
print(f"Getting cited papers data: {i+1} of {((initial_data['QueryResult']['RecordsFound'] - 1) // 100) + 1}")
addtl_data = subsequent_response.json()
for j in range(len(addtl_data['Data']['Records']['records']['REC'])):
cited_data.append(addtl_data['Data']['Records']['records']['REC'][j])
return cited_data
def citing_papers(cited_papers_list): # Based on the list of cited papers, we get a list of the records which cite them
for paper in cited_papers_list:
data = citing_request(paper)
print(f"Now getting citing papers data for each of them: {cited_papers_list.index(paper) + 1} of {len(cited_papers_list)}")
paper.citing_papers_list = []
for record in data:
ut = record['UID']
author_names, author_dais, author_rids, author_orcids = get_author_list(record)
org_names = get_org_list(record)
source_names = get_source_list(record)
self_citation_crs = 0
paper.citing_papers_list.append(CitingPaper(ut, author_names, author_dais, author_rids, author_orcids,
org_names, source_names, self_citation_crs))
# Please pay attention to the line above: every object of CitingPaper class is an item of
# citing_papers_list, an attribute of specific cited paper. By this, we establish links between cited and
# citing records. This allows finding self-citations only in the records that reference each other, not
# just arbitrary records in the cited and citing dataset. This would be extremely helpful for coauthor
# self-citation evaluation
return cited_papers_list
def citing_request(paper): # This function gets the citing paper data via API
citing_data = []
initial_response = requests.get(f'{endpoint}/citing?databaseId=WOS&uniqueId={paper.ut}&count=0&firstRecord=1',
headers=headers)
initial_data = initial_response.json()
for i in range(((initial_data['QueryResult']['RecordsFound'] - 1) // 100) + 1):
subsequent_response = requests.get(f'{endpoint}/citing?databaseId=WOS&uniqueId={paper.ut}&count=100&firstRecord={(100*i+1)}', headers=headers)
addtl_data = subsequent_response.json()
for j in range(len(addtl_data['Data']['Records']['records']['REC'])):
citing_data.append(addtl_data['Data']['Records']['records']['REC'][j])
return citing_data
def get_author_list(paper): # This function gets lists of authors (and coauthors) for every paper
author_names = set() # This set uses the author name field, which can be spelled differently for the same person
author_dais = set() # This uses Web of Science record sets made by Clarivate author name disambiguation algorithm
author_rids = set() # This set relies on author ResearcherID
author_orcids = set() # This set relies on author ORCID
if paper['static_data']['summary']['names']['count'] == 1:
author_names.add(paper['static_data']['summary']['names']['name']['wos_standard'])
else:
for person_name in paper['static_data']['summary']['names']['name']:
try:
author_names.add(person_name['wos_standard'])
except KeyError:
pass # No author name data in this contributor record - i.e., it can be a group author
try:
author_dais.add(person_name['daisng_id'])
except KeyError:
pass # No DAIS data in this author record
try:
for rid in person_name['data-item-ids']['data-item-id']:
if rid['id-type'] == 'PreferredRID':
author_rids.add(rid['content'])
except KeyError:
pass # No RID data in this author record
except TypeError:
pass # A rare case when the RID is linked to the author, but the record isn't claimed
try:
author_orcids.add(person_name['orcid_id'])
except KeyError:
pass # No ORCID data in this author record
return author_names, author_dais, author_rids, author_orcids
def get_org_list(paper): # This function gets lists of affiliated organizations for every paper
org_names = set() # The set relies on Affiliation a.k.a. Organization-Enhanced field of every record
try:
if paper['static_data']['fullrecord_metadata']['addresses']['count'] == 1:
for org in paper['static_data']['fullrecord_metadata']['addresses']['address_name']['address_spec']['organizations']['organization']:
if org['pref'] == 'Y':
org_names.add(org['content'])
else:
for affiliation in paper['static_data']['fullrecord_metadata']['addresses']['address_name']:
for org in affiliation['address_spec']['organizations']['organization']:
if org['pref'] == 'Y':
org_names.add(org['content'])
except KeyError:
pass # When there is no address data on the paper record at all
return org_names
def get_source_list(paper): # This function gets lists of publication sources for every paper
source_list = set() # The set relies on Abbreviated Source Name field of every record
for title in paper['static_data']['summary']['titles']['title']:
if title['type'] == 'source_abbrev':
source_list.add(title['content'])
return source_list
def get_times_cited(paper): # This function gets the times cited count for every cited paper
times_cited = paper['dynamic_data']['citation_related']['tc_list']['silo_tc']['local_count']
return times_cited
# This is the function that performs the self-citation calculation for every cited reference. If the self-citation event
# has been identified by the above calculation() function, then the citing document is analyzed for the number of
# references to that particular cited document. This is required because the number of citations and the number of
# citing documents are not the same thing. One citing document can have multiple cited references leading to the cited
# one, so the total amount of citations to a paper can sometimes be significantly higher than the number of citing
# records.
def self_citation_crs_calc(cited_paper, citing_paper):
citing_paper.self_citation_crs = 0 # The self-citation cited references count for every citing paper
for checked_citing_paper in checked_citing_papers: # Checking if the paper has already been extracted via API
if checked_citing_paper[0] == citing_paper.ut:
cr_data = checked_citing_paper[1]
else: # If it hasn't - the code will send a request to Web of Science API for cited references of that paper
initial_response = requests.get(f'{endpoint}/references?databaseId=WOS&uniqueId={citing_paper.ut}&count=100&firstRecord=1', headers=headers)
cr_data = initial_response.json()
for i in range(((cr_data['QueryResult']['RecordsFound'] - 1) // 100)):
subsequent_response = requests.get(
f'{endpoint}/references?databaseId=WOS&uniqueId={citing_paper.ut}&count=100&firstRecord={(100 * (i + 1) + 1)}',
headers=headers)
addtl_cr_data = subsequent_response.json()
for paper in range(len(addtl_cr_data['Data'])):
cr_data['Data'].append(addtl_cr_data['Data'][paper])
checked_citing_papers.append((citing_paper.ut, cr_data)) # Storing all the checked citing papers locally
for cr in cr_data['Data']: # Checking if the ID of a paper in cited reference matches the ID of a cited paper
if cr['UID'] == cited_paper.ut:
citing_paper.self_citation_crs += 1 # If it does, this citing paper self-citation count is increased by 1
return citing_paper, checked_citing_papers
def self_citations(cited_papers_list): # Self-citation calculations occur here
total_citations = 0
author_name_self_citation = 0
author_dais_self_citation = 0
author_rids_self_citation = 0
author_orcids_self_citation = 0
org_self_citation = 0
source_self_citation = 0
for cited_paper in cited_papers_list: # For every cited paper we run a check
for citing_paper in cited_paper.citing_papers_list: # Every paper that was citing it is checked for matches
if len(cited_paper.author_names.intersection(citing_paper.author_names)) > 0: # For (co)author names
self_citation_crs_calc(cited_paper, citing_paper) # If at least 1 match is found, a calculaton of references from citing to cited document is counted
print(f'Oops, seems like a self-citation found: paper {cited_papers_list.index(cited_paper) + 1} of {len(cited_papers_list)}')
author_name_self_citation += citing_paper.self_citation_crs
if len(cited_paper.author_dais.intersection(citing_paper.author_dais)) > 0: # For (co)author paper sets
if citing_paper.self_citation_crs == 0:
self_citation_crs_calc(cited_paper, citing_paper)
author_dais_self_citation += citing_paper.self_citation_crs
if len(cited_paper.author_rids.intersection(citing_paper.author_rids)) > 0: # For their ResearcherIDs
if citing_paper.self_citation_crs == 0:
self_citation_crs_calc(cited_paper, citing_paper)
author_rids_self_citation += citing_paper.self_citation_crs
if len(cited_paper.author_orcids.intersection(citing_paper.author_orcids)) > 0: # For their ORCIDs
if citing_paper.self_citation_crs == 0:
self_citation_crs_calc(cited_paper, citing_paper)
author_orcids_self_citation += citing_paper.self_citation_crs
if len(cited_paper.org_names.intersection(citing_paper.org_names)) > 0: # For their org affiliations
if citing_paper.self_citation_crs == 0:
self_citation_crs_calc(cited_paper, citing_paper)
org_self_citation += citing_paper.self_citation_crs
if len(cited_paper.source_names.intersection(citing_paper.source_names)) > 0: # For the titles those papers were published in
if citing_paper.self_citation_crs == 0:
self_citation_crs_calc(cited_paper, citing_paper)
source_self_citation += citing_paper.self_citation_crs
total_citations += cited_paper.times_cited # The total citations is going to be the common denominator
print(f'Coauthor self-citation:\n Name-level: {(author_name_self_citation/total_citations * 100):.2f}% ({author_name_self_citation} self-citations, {total_citations - author_name_self_citation} external, {total_citations} total)')
print(f' DAIS-level: {(author_dais_self_citation/total_citations * 100):.2f}% ({author_dais_self_citation} self-citations, {total_citations - author_dais_self_citation} external, {total_citations} total)')
print(f' ResearcherID-level: {(author_rids_self_citation/total_citations * 100):.2f}% ({author_rids_self_citation} self-citations, {total_citations - author_rids_self_citation} external, {total_citations} total)')
print(f' ORCID-level: {(author_orcids_self_citation/total_citations * 100):.2f}% ({author_orcids_self_citation} self-citations, {total_citations - author_orcids_self_citation} external, {total_citations} total)')
print(f'Organization-level self citation: {(org_self_citation/total_citations * 100):.2f}% ({org_self_citation} self-citations, {total_citations - org_self_citation} external, {total_citations} total)')
print(f'Publication Source-level self citation: {(source_self_citation/total_citations * 100):.2f}% ({source_self_citation} self-citations, {total_citations - source_self_citation} external, {total_citations} total)')
a = cited_papers()
b = citing_papers(a)
self_citations(a)
| 13,480
| -7
| 327
|
e704606e17552b1dc7f3698e2abe2715af86b384
| 3,867
|
py
|
Python
|
src/fbsrankings/service/service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/service/service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/service/service.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta
from types import TracebackType
from typing import ContextManager
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing_extensions import Literal
from typing_extensions import Protocol
from fbsrankings.common import Command
from fbsrankings.common import CommandBus
from fbsrankings.common import EventBus
from fbsrankings.common import Query
from fbsrankings.common import QueryBus
from fbsrankings.domain import RaiseBehavior
from fbsrankings.domain import ValidationError
from fbsrankings.domain import ValidationService
from fbsrankings.infrastructure import QueryManagerFactory
from fbsrankings.infrastructure import TransactionFactory
from fbsrankings.infrastructure.memory import DataSource as MemoryDataSource
from fbsrankings.infrastructure.sportsreference import SportsReference
from fbsrankings.infrastructure.sqlite import DataSource as SqliteDataSource
from fbsrankings.service.command import CommandManager
from fbsrankings.service.config import Config
from fbsrankings.service.config import ConfigStorageType
R = TypeVar("R", covariant=True)
| 31.696721
| 87
| 0.709853
|
from abc import ABCMeta
from types import TracebackType
from typing import ContextManager
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing_extensions import Literal
from typing_extensions import Protocol
from fbsrankings.common import Command
from fbsrankings.common import CommandBus
from fbsrankings.common import EventBus
from fbsrankings.common import Query
from fbsrankings.common import QueryBus
from fbsrankings.domain import RaiseBehavior
from fbsrankings.domain import ValidationError
from fbsrankings.domain import ValidationService
from fbsrankings.infrastructure import QueryManagerFactory
from fbsrankings.infrastructure import TransactionFactory
from fbsrankings.infrastructure.memory import DataSource as MemoryDataSource
from fbsrankings.infrastructure.sportsreference import SportsReference
from fbsrankings.infrastructure.sqlite import DataSource as SqliteDataSource
from fbsrankings.service.command import CommandManager
from fbsrankings.service.config import Config
from fbsrankings.service.config import ConfigStorageType
R = TypeVar("R", covariant=True)
class DataSource(QueryManagerFactory, TransactionFactory, Protocol, metaclass=ABCMeta):
def drop(self) -> None:
pass
def close(self) -> None:
pass
def __enter__(self) -> "DataSource":
pass
def __exit__(
self,
type_: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
pass
class Service(ContextManager["Service"]):
def __init__(self, config: Config, event_bus: EventBus) -> None:
self._event_bus = event_bus
self._data_source: DataSource
storage_type = config.storage_type
if storage_type == ConfigStorageType.MEMORY:
self._data_source = MemoryDataSource()
elif storage_type == ConfigStorageType.SQLITE:
database = config.database
self._data_source = SqliteDataSource(str(database))
else:
raise ValueError(f"Unknown storage type: {storage_type}")
alternate_names = config.alternate_names
if alternate_names is None:
alternate_names = {}
self.validation_service = ValidationService(RaiseBehavior.ON_DEMAND)
self._sports_reference = SportsReference(
alternate_names, self.validation_service,
)
self._command_bus = CommandBus()
self._command_manager = CommandManager(
self._sports_reference,
self._data_source,
self._command_bus,
self._event_bus,
)
self._query_bus = QueryBus()
self._query_manager = self._data_source.query_manager(self._query_bus)
@property
def errors(self) -> List[ValidationError]:
return self.validation_service.errors
def send(self, command: Command) -> None:
self._command_bus.send(command)
def query(self, query: Query[R]) -> R:
return self._query_bus.query(query)
def drop(self) -> None:
self._data_source.drop()
def close(self) -> None:
self._query_manager.close()
self._command_manager.close()
self._data_source.close()
def __enter__(self) -> "Service":
self._data_source.__enter__()
self._command_manager.__enter__()
self._query_manager.__enter__()
return self
def __exit__(
self,
type_: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
self._query_manager.__exit__(type_, value, traceback)
self._command_manager.__exit__(type_, value, traceback)
self._data_source.__exit__(type_, value, traceback)
return False
| 2,252
| 315
| 153
|
9c23ac4cd76abc536a947f4db42f36183636e6ef
| 3,243
|
py
|
Python
|
xpresso/binders/_body/openapi/discriminated.py
|
yezz123/xpresso
|
89ba9f3d164b0d76cbe085e09e39d4bca2315b59
|
[
"MIT"
] | 4
|
2022-02-07T05:12:51.000Z
|
2022-02-28T12:34:57.000Z
|
xpresso/binders/_body/openapi/discriminated.py
|
yezz123/xpresso
|
89ba9f3d164b0d76cbe085e09e39d4bca2315b59
|
[
"MIT"
] | 2
|
2022-01-25T02:05:02.000Z
|
2022-01-25T02:38:59.000Z
|
xpresso/binders/_body/openapi/discriminated.py
|
yezz123/xpresso
|
89ba9f3d164b0d76cbe085e09e39d4bca2315b59
|
[
"MIT"
] | null | null | null |
import inspect
import sys
import typing
from dataclasses import dataclass
if sys.version_info < (3, 9):
from typing_extensions import Annotated, get_args, get_origin
else:
from typing import Annotated, get_origin, get_args
from di.typing import get_markers_from_parameter
from xpresso._utils.typing import model_field_from_param
from xpresso.binders.api import ModelNameMap, OpenAPIBody, OpenAPIBodyMarker, Schemas
from xpresso.binders.dependants import BodyBinderMarker
from xpresso.openapi import models as openapi_models
@dataclass(frozen=True)
@dataclass(frozen=True)
| 36.852273
| 85
| 0.670984
|
import inspect
import sys
import typing
from dataclasses import dataclass
if sys.version_info < (3, 9):
from typing_extensions import Annotated, get_args, get_origin
else:
from typing import Annotated, get_origin, get_args
from di.typing import get_markers_from_parameter
from xpresso._utils.typing import model_field_from_param
from xpresso.binders.api import ModelNameMap, OpenAPIBody, OpenAPIBodyMarker, Schemas
from xpresso.binders.dependants import BodyBinderMarker
from xpresso.openapi import models as openapi_models
@dataclass(frozen=True)
class OpenAPIContentTypeDiscriminated(OpenAPIBody):
sub_body_providers: typing.Mapping[str, OpenAPIBody]
description: typing.Optional[str]
required: typing.Optional[bool]
include_in_schema = True
def get_models(self) -> typing.List[type]:
return [
model
for provider in self.sub_body_providers.values()
for model in provider.get_models()
]
def get_openapi(
self, model_name_map: ModelNameMap, schemas: Schemas
) -> openapi_models.RequestBody:
return openapi_models.RequestBody(
description=self.description,
required=self.required,
content={
media_type: provider.get_openapi_media_type(model_name_map, schemas)
for media_type, provider in self.sub_body_providers.items()
},
)
@dataclass(frozen=True)
class OpenAPIContentTypeDiscriminatedMarker(OpenAPIBodyMarker):
description: typing.Optional[str]
def register_parameter(self, param: inspect.Parameter) -> OpenAPIBody:
field = model_field_from_param(param)
required = field.required is not False
sub_body_providers: typing.Dict[str, OpenAPIBody] = {}
annotation = param.annotation
origin = get_origin(annotation)
assert origin is Annotated
annotation = next(iter(get_args(annotation)))
origin = get_origin(annotation)
if origin is not typing.Union:
raise TypeError("Unioned bodies must be a Union of simple bodies")
args = get_args(annotation)
for arg in args:
sub_body_param = inspect.Parameter(
name=param.name,
kind=param.kind,
annotation=arg,
default=param.default,
)
marker: typing.Optional[BodyBinderMarker] = None
for param_marker in get_markers_from_parameter(sub_body_param):
if isinstance(param_marker, BodyBinderMarker):
marker = param_marker
break
if marker is None:
raise TypeError(f"Type annotation is missing body marker: {arg}")
sub_body_openapi = marker.openapi_marker
provider = sub_body_openapi.register_parameter(sub_body_param)
if provider.include_in_schema:
media_type = provider.get_media_type_string()
sub_body_providers[media_type] = provider
return OpenAPIContentTypeDiscriminated(
sub_body_providers=sub_body_providers,
description=self.description,
required=None if required else False,
)
| 2,261
| 351
| 44
|
acd2ea12491c5b1f25bd235a0238c6e418dc7018
| 3,227
|
py
|
Python
|
scripts/particle_sym.py
|
mihranmashhud/PolyMarine
|
f959016b4505356213f10bdf0ff1b2157f719c87
|
[
"MIT"
] | 1
|
2022-03-10T21:06:51.000Z
|
2022-03-10T21:06:51.000Z
|
scripts/particle_sym.py
|
Mrugank-Upadhyay/PolyMarine
|
f959016b4505356213f10bdf0ff1b2157f719c87
|
[
"MIT"
] | null | null | null |
scripts/particle_sym.py
|
Mrugank-Upadhyay/PolyMarine
|
f959016b4505356213f10bdf0ff1b2157f719c87
|
[
"MIT"
] | 1
|
2022-03-10T21:02:04.000Z
|
2022-03-10T21:02:04.000Z
|
from operator import itemgetter
import time
import math
import random
import numpy as np
import datetime
from osgeo import ogr, osr
latlongToAlbers = getCoordConverter(4326,5070)
albersToLatlong = getCoordConverter(5070,4326)
start_date = datetime.datetime(1992,1,1)
end_date = datetime.datetime(2017,12,31)
current_date = start_date
increment = datetime.timedelta(minutes=15)
sample_point = (-41.8822705,28.4248646) # (Long, Lat)
travel_path = [sample_point]
while current_date < end_date:
# while line != "":
# line = sea_file.readline()
# point_data = line.split(',')
# try:
# print(type(point_data))
# print(type(point_data[1]))
# print(datetime.datetime.strptime(point_data[1][1],"%Y-%m-%d"))
# # sorted(point_data, key=lambda e: datetime.datetime.strptime(e[1], "%Y-%m-%d"))
# except Exception:
# print("sorting didn't work")
# print(point_data)
# line = ""
bin_file = f"ecco_{str(current_date.year).zfill(4)}-{str(current_date.month).zfill(2)}_000.npy"
curr_vector_field = np.load(f"../images/{bin_file}")
[y,x] = latlongToIndex(sample_point)
# print(f"Index: {[y,x]}")
# print(f"Possible Index: {curr_vector_field[y,x]}")
# print(f"Possible Index: {curr_vector_field[x,y]}")
# print(f"Does this shit even exist???? {curr_vector_field[360-y-1,x]}")
curr_vector = curr_vector_field[y,x]
if np.isnan(curr_vector[0]):
neighbors = get_neighbors(curr_vector_field, x, y)
if len(neighbors) is not 0:
curr_vector = random.choice(neighbors)
sample_point = move_point(sample_point, curr_vector)
travel_path.append(sample_point)
current_date += increment
| 31.637255
| 99
| 0.654168
|
from operator import itemgetter
import time
import math
import random
import numpy as np
import datetime
from osgeo import ogr, osr
def getCoordConverter(src='', targ=''):
srcproj = osr.SpatialReference()
srcproj.ImportFromEPSG(src)
targproj = osr.SpatialReference()
if isinstance(targ, str):
targproj.ImportFromProj4(targ)
else:
targproj.ImportFromEPSG(targ)
transform = osr.CoordinateTransformation(srcproj, targproj)
def convertCoords(xy):
pt = ogr.Geometry(ogr.wkbPoint)
pt.AddPoint(xy[0], xy[1])
pt.Transform(transform)
return [pt.GetX(), pt.GetY()]
return convertCoords
latlongToAlbers = getCoordConverter(4326,5070)
albersToLatlong = getCoordConverter(5070,4326)
start_date = datetime.datetime(1992,1,1)
end_date = datetime.datetime(2017,12,31)
current_date = start_date
increment = datetime.timedelta(minutes=15)
sample_point = (-41.8822705,28.4248646) # (Long, Lat)
travel_path = [sample_point]
def get_neighbors(vector_field, x, y):
neighbors = []
for i in range(-1,2):
for j in range(-1,2):
neighbors.append(vector_field[y + j, x + i])
return list(filter(lambda x: not np.isnan(x[0]), neighbors))
def map_range(input_start,input_end,output_start,output_end,val):
slope = (output_end - output_start) / (input_end - input_start)
return output_start + slope * (val - input_start)
def move_point(latlong, distance):
#print(f"Lat Long Before: {latlong}")
point = latlongToAlbers(latlong)
#print(f"Point: {point}")
#print(f"Distance: {distance}")
point[0] += distance[0] * 900
point[1] += distance[1] * 900
#print(f"Transformed Point: {point}")
return albersToLatlong(point)
def latlongToIndex(latlong):
print(f"LatLong: {latlong}")
return [
math.floor(map_range(90,-90,0,360,latlong[1])),
math.floor(map_range(-180,180,0,720, latlong[0])),
]
while current_date < end_date:
# while line != "":
# line = sea_file.readline()
# point_data = line.split(',')
# try:
# print(type(point_data))
# print(type(point_data[1]))
# print(datetime.datetime.strptime(point_data[1][1],"%Y-%m-%d"))
# # sorted(point_data, key=lambda e: datetime.datetime.strptime(e[1], "%Y-%m-%d"))
# except Exception:
# print("sorting didn't work")
# print(point_data)
# line = ""
bin_file = f"ecco_{str(current_date.year).zfill(4)}-{str(current_date.month).zfill(2)}_000.npy"
curr_vector_field = np.load(f"../images/{bin_file}")
[y,x] = latlongToIndex(sample_point)
# print(f"Index: {[y,x]}")
# print(f"Possible Index: {curr_vector_field[y,x]}")
# print(f"Possible Index: {curr_vector_field[x,y]}")
# print(f"Does this shit even exist???? {curr_vector_field[360-y-1,x]}")
curr_vector = curr_vector_field[y,x]
if np.isnan(curr_vector[0]):
neighbors = get_neighbors(curr_vector_field, x, y)
if len(neighbors) is not 0:
curr_vector = random.choice(neighbors)
sample_point = move_point(sample_point, curr_vector)
travel_path.append(sample_point)
current_date += increment
| 1,364
| 0
| 116
|
b6e4e3028a5fc99b70a4292e1c382b359d9f11a6
| 30
|
py
|
Python
|
example_pkg/__init__.py
|
IoC-Sunderland/Example-Package-Structure
|
3664c780a52d73ac93cb6bab83c1506c0a9c08c9
|
[
"MIT"
] | null | null | null |
example_pkg/__init__.py
|
IoC-Sunderland/Example-Package-Structure
|
3664c780a52d73ac93cb6bab83c1506c0a9c08c9
|
[
"MIT"
] | null | null | null |
example_pkg/__init__.py
|
IoC-Sunderland/Example-Package-Structure
|
3664c780a52d73ac93cb6bab83c1506c0a9c08c9
|
[
"MIT"
] | null | null | null |
from .example import my_func
| 15
| 29
| 0.8
|
from .example import my_func
| 0
| 0
| 0
|
7da0f36bf0c0b92c2f4f845c1893cc78ea890b2b
| 591
|
py
|
Python
|
PythonClient/reinforcement_learning/airgym/envs/xbox/xboxtest.py
|
zewuzheng17/Carintercept
|
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
|
[
"MIT"
] | null | null | null |
PythonClient/reinforcement_learning/airgym/envs/xbox/xboxtest.py
|
zewuzheng17/Carintercept
|
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
|
[
"MIT"
] | null | null | null |
PythonClient/reinforcement_learning/airgym/envs/xbox/xboxtest.py
|
zewuzheng17/Carintercept
|
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
|
[
"MIT"
] | 1
|
2022-03-22T06:16:25.000Z
|
2022-03-22T06:16:25.000Z
|
import time
from Xboxcmd import *
import pygame
pygame.init()
pygame.joystick.init()
#查看现在有几个遥控器
joycount = pygame.joystick.get_count()
print("joycount:"+str(joycount))
#连接第一个控制器
joystick = pygame.joystick.Joystick(0)
while True:
#接收事件
pygame.event.get()
axis = get_axis(joystick=joystick)
button = get_button(joystick=joystick)
hats = get_hats(joystick=joystick)
print("_____________")
print(" axis_value:")
print(axis)
print(" button_value")
print(button[3])
print("hat_value")
print(hats)
print("_____________")
time.sleep(3)
| 18.46875
| 42
| 0.690355
|
import time
from Xboxcmd import *
import pygame
pygame.init()
pygame.joystick.init()
#查看现在有几个遥控器
joycount = pygame.joystick.get_count()
print("joycount:"+str(joycount))
#连接第一个控制器
joystick = pygame.joystick.Joystick(0)
while True:
#接收事件
pygame.event.get()
axis = get_axis(joystick=joystick)
button = get_button(joystick=joystick)
hats = get_hats(joystick=joystick)
print("_____________")
print(" axis_value:")
print(axis)
print(" button_value")
print(button[3])
print("hat_value")
print(hats)
print("_____________")
time.sleep(3)
| 0
| 0
| 0
|
a33569915e92987b4fd6add1ae4a92db8c9fd19a
| 3,836
|
py
|
Python
|
fourth.py
|
yk7333/DIP
|
7e8df6631d89a6bce61e45d5f9ddc671ed050732
|
[
"MIT"
] | 1
|
2021-06-02T13:28:39.000Z
|
2021-06-02T13:28:39.000Z
|
fourth.py
|
yk7333/DIP
|
7e8df6631d89a6bce61e45d5f9ddc671ed050732
|
[
"MIT"
] | null | null | null |
fourth.py
|
yk7333/DIP
|
7e8df6631d89a6bce61e45d5f9ddc671ed050732
|
[
"MIT"
] | 2
|
2021-03-28T11:15:19.000Z
|
2021-11-08T12:22:17.000Z
|
'''
@author:yk7333
last modified:2021-4-7
language:python
'''
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import os
if __name__ == "__main__":
os.chdir("C:\\Users\\m\\Desktop\\第四次作业")
for i in range(3,8,2): #3,5,7
img=read("test2.tif") #第一问
gaussion=Blur(img,i,"Gaussion")
median=Blur(img,i,"Median")
save("gaussion2{0}x{1}.jpg".format(i,i),gaussion)
save("medium2{0}x{1}.jpg".format(i,i),median)
for i in range(3,8,2):
print(Gaussion(i,1.5)) #第二问
print("\n")
img3=read("test3_corrupt.pgm")
img4=read("test4 copy.bmp")
#unshape masking
img3_blur=Blur(img3,5,sigma=1) #采用5x5高斯滤波进行模糊处理
img4_blur=Blur(img4,5,sigma=1)
mask3=img3-img3_blur
mask4=img4-img4_blur
save("img3_unmask.jpg",mask3)
save("img4_unmask.jpg",mask4)
#Sobel edge detector
sobelx=cv.Sobel(img3,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img3,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img3_sobel.jpg",sobelxy)
sobelx=cv.Sobel(img4,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img4,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img4_sobel.jpg",sobelxy)
#laplace edge detection
laplacian = cv.Laplacian(img3,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img3_lap.jpg",laplacian)
laplacian = cv.Laplacian(img4,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img4_lap.jpg",laplacian)
#canny algorithm
canny=cv.Canny(img3,50,80)
save("img3_canny.jpg",canny)
canny=cv.Canny(img4,50,80)
save("img4_canny.jpg",canny)
| 33.068966
| 103
| 0.585506
|
'''
@author:yk7333
last modified:2021-4-7
language:python
'''
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import os
def show(img,name="img"):
cv.imshow(name,img)
cv.waitKey(0)
cv.destroyAllWindows()
def read(path):
return cv.imread(path,0)
def save(name,src):
return cv.imwrite(name,src)
def p(x,y,sigma): #高斯数值生成
return 1/(2*np.pi*sigma**2)*np.exp(-(x**2+y**2)/(2*sigma**2))
def norm(arr): #核归一化
sumary=np.sum(arr)
return arr/sumary
def Gaussion(size,sigma): #生成高斯核
gaussion=np.zeros((size,size))
center_x,center_y=size//2,size//2
for i in range(size):
for j in range(size):
gaussion[i][j]=p(i-center_x,j-center_y,sigma)
gaussion=norm(gaussion)
return gaussion
def BorderProc(img,size): #外围补零操作,共size//2圈
M,N=img.shape
arr_x=np.zeros((M,size//2))
arr_y=np.zeros((size//2,N+size-1))
img=np.hstack([arr_x,img,arr_x])
img=np.vstack([arr_y,img,arr_y])
return img
def Calculate(img,size,method,i,j,sigma): #计算i,j点滤波之后的值
arr=np.zeros((size,size)) #arr记录img[i][j]附近待进行操作的元素
i+=size//2;j+=size//2 #因为外围增加了size//2圈0,因此在进行计算时,横纵轴均加size//2以定位到第一个非零元素
for x in range(-size//2,size//2+1,1): #从-size/2到size/2,依次在i,j处附近进行操作
for y in range(-size//2,size//2+1,1):
arr[x+size//2][y+size//2]=img[i+x][j+y]
if method=="Gaussion": #高斯滤波
blur=Gaussion(size,sigma)
return np.sum(arr*blur)
if method=="Median": #中值滤波
return np.median(arr)
def Blur(img,size,method="Gaussion",sigma=1): #滤波操作
M,N=img.shape
dst=np.zeros(img.shape,dtype=np.uint8)
img=BorderProc(img,size)
for i in range(M):
for j in range(N):
dst[i][j]=Calculate(img,size,method,i,j,sigma)
return dst
if __name__ == "__main__":
os.chdir("C:\\Users\\m\\Desktop\\第四次作业")
for i in range(3,8,2): #3,5,7
img=read("test2.tif") #第一问
gaussion=Blur(img,i,"Gaussion")
median=Blur(img,i,"Median")
save("gaussion2{0}x{1}.jpg".format(i,i),gaussion)
save("medium2{0}x{1}.jpg".format(i,i),median)
for i in range(3,8,2):
print(Gaussion(i,1.5)) #第二问
print("\n")
img3=read("test3_corrupt.pgm")
img4=read("test4 copy.bmp")
#unshape masking
img3_blur=Blur(img3,5,sigma=1) #采用5x5高斯滤波进行模糊处理
img4_blur=Blur(img4,5,sigma=1)
mask3=img3-img3_blur
mask4=img4-img4_blur
save("img3_unmask.jpg",mask3)
save("img4_unmask.jpg",mask4)
#Sobel edge detector
sobelx=cv.Sobel(img3,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img3,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img3_sobel.jpg",sobelxy)
sobelx=cv.Sobel(img4,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img4,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img4_sobel.jpg",sobelxy)
#laplace edge detection
laplacian = cv.Laplacian(img3,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img3_lap.jpg",laplacian)
laplacian = cv.Laplacian(img4,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img4_lap.jpg",laplacian)
#canny algorithm
canny=cv.Canny(img3,50,80)
save("img3_canny.jpg",canny)
canny=cv.Canny(img4,50,80)
save("img4_canny.jpg",canny)
| 1,918
| 0
| 219
|
0f7190955ffa54728d61c5c858685f6df3393455
| 339
|
py
|
Python
|
gists/forma_incorreta.py
|
cassioeskelsen/precisamos-falar-de-di-ioc-python
|
b7a08f887d4a6c56e8e2465087dc33687e837f34
|
[
"MIT"
] | 2
|
2022-02-03T15:49:09.000Z
|
2022-03-10T01:20:07.000Z
|
gists/forma_incorreta.py
|
cassioeskelsen/precisamos-falar-de-di-ioc-python
|
b7a08f887d4a6c56e8e2465087dc33687e837f34
|
[
"MIT"
] | null | null | null |
gists/forma_incorreta.py
|
cassioeskelsen/precisamos-falar-de-di-ioc-python
|
b7a08f887d4a6c56e8e2465087dc33687e837f34
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
if __name__ == '__main__':
print(CustomerRepository().get_customers())
| 24.214286
| 65
| 0.666667
|
from pymongo import MongoClient
class CustomerRepository:
def get_customers(self):
mongo_client = MongoClient('mongodb://localhost:27017/')
collection = mongo_client['erp']["customer"]
return list(collection.find({}))
if __name__ == '__main__':
print(CustomerRepository().get_customers())
| 165
| 4
| 54
|
41e59c4093b29b325db70120ed587aceb09b82c7
| 2,219
|
py
|
Python
|
main.py
|
RememberTheAir/tg-github-updates
|
bad529415f4f2de2748f6c8ea5af5b81ab261a6c
|
[
"MIT"
] | 4
|
2020-03-02T10:13:27.000Z
|
2020-11-11T18:24:19.000Z
|
main.py
|
RememberTheAir/tg-github-updates
|
bad529415f4f2de2748f6c8ea5af5b81ab261a6c
|
[
"MIT"
] | 3
|
2020-03-26T09:52:38.000Z
|
2021-05-17T06:14:50.000Z
|
main.py
|
RememberTheAir/tg-github-updates
|
bad529415f4f2de2748f6c8ea5af5b81ab261a6c
|
[
"MIT"
] | 3
|
2020-05-17T02:25:14.000Z
|
2020-09-30T15:46:47.000Z
|
import os
import json
import logging
import logging.config
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import Filters
from config import config
from jobs import JOBS_CALLBACKS
import utils as u
logger = logging.getLogger(__name__)
load_logging_config()
@u.restricted
@u.restricted
@u.restricted
if __name__ == '__main__':
main()
| 29.586667
| 132
| 0.703921
|
import os
import json
import logging
import logging.config
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import Filters
from config import config
from jobs import JOBS_CALLBACKS
import utils as u
def load_logging_config(config_file_path='logging.json'):
with open(config_file_path, 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
logger = logging.getLogger(__name__)
load_logging_config()
@u.restricted
def delete_downloads(_, update):
logger.info('cleaning download dir')
files = [f for f in os.listdir('downloads/') if f != '.gitkeep']
for f in files:
os.remove(os.path.join('downloads', f))
update.message.reply_text('Deleted {} files'.format(len(files)))
@u.restricted
def send_db(_, update):
logger.info('sending_db')
with open(config.database.filename, 'rb') as f:
update.message.reply_document(f)
@u.restricted
def help_command(_, update):
logger.info('help')
commands = ['/del', '/db', '/start']
update.message.reply_text('Commands: {}'.format(', '.join(commands)))
def main():
updater = Updater(token=config.telegram.token, workers=config.telegram.run_async_workers)
dispatcher = updater.dispatcher
jobs = updater.job_queue
logger.info('registering %d scheduled jobs', len(JOBS_CALLBACKS))
for callback in JOBS_CALLBACKS:
jobs.run_repeating(callback, interval=config.jobs.run_every, first=config.jobs.start_after)
# dispatcher.add_handler(MessageHandler(~Filters.private & ~Filters.group & Filters.text, on_channel_post))
dispatcher.add_handler(CommandHandler(['del'], delete_downloads, filters=Filters.private))
dispatcher.add_handler(CommandHandler(['db'], send_db, filters=Filters.private))
dispatcher.add_handler(CommandHandler(['start', 'help'], help_command, filters=Filters.private))
logger.info('starting polling loop as @%s (run_async workers: %d)...', updater.bot.username, config.telegram.run_async_workers)
updater.start_polling(clean=False)
updater.idle()
if __name__ == '__main__':
main()
| 1,675
| 0
| 119
|
d4c3e9e40cb29b97068cc11b38b7b15678c17a95
| 10,056
|
py
|
Python
|
scripts/checker.py
|
drycc/stacks
|
11612170603d108451a58254c9ab0064a827c636
|
[
"Apache-2.0"
] | null | null | null |
scripts/checker.py
|
drycc/stacks
|
11612170603d108451a58254c9ab0064a827c636
|
[
"Apache-2.0"
] | 48
|
2022-03-02T02:14:35.000Z
|
2022-03-30T00:07:47.000Z
|
scripts/checker.py
|
drycc/stacks
|
11612170603d108451a58254c9ab0064a827c636
|
[
"Apache-2.0"
] | 2
|
2022-02-14T06:25:55.000Z
|
2022-02-16T05:38:57.000Z
|
import os
import re
import json
import requests
from datetime import datetime
github_headers = {'Authorization': 'token %s' % os.environ.get("GITHUB_TOKEN")}
repo_info_table = {
"vouch-proxy": {
"name": "vouch-proxy",
"type": "github",
"owner": "vouch",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis_exporter": {
"name": "redis_exporter",
"type": "github",
"owner": "oliver006",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mysqld_exporter": {
"name": "mysqld_exporter",
"type": "github",
"owner": "prometheus",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgres_exporter": {
"name": "postgres_exporter",
"type": "github",
"owner": "prometheus-community",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"caddy": {
"name": "caddy",
"type": "github",
"owner": "caddyserver",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"envtpl": {
"name": "envtpl",
"type": "github",
"owner": "subfuzion",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"erlang": {
"name": "otp",
"type": "github",
"owner": "erlang",
"match": "^OTP-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"fluentd": {
"name": "fluentd",
"type": "github",
"owner": "fluent",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"go": {
"name": "go",
"type": "github",
"owner": "golang",
"match": "^go[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"gosu": {
"name": "gosu",
"type": "github",
"owner": "tianon",
"match": "^[0-9]{1,}\.[0-9]{1,}$",
},
"grafana": {
"name": "grafana",
"type": "github",
"owner": "grafana",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"helm": {
"name": "helm",
"type": "github",
"owner": "helm",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"influxdb": {
"name": "influxdb",
"type": "github",
"owner": "influxdata",
"match": "^v[2-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ini-file": {
"name": "ini-file",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"java": {
"name": "jdk",
"type": "github",
"owner": "openjdk",
"match": "^jdk-[0-9]{1,}\+[0-9]{1,}$",
},
"jq": {
"name": "jq",
"type": "github",
"owner": "stedolan",
"match": "^jq-[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"kubectl": {
"name": "kubectl",
"type": "github",
"owner": "kubernetes",
"match": "^kubernetes-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mariadb": {
"name": "server",
"type": "github",
"owner": "MariaDB",
"match": "^mariadb-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mc": {
"name": "mc",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"minio": {
"name": "minio",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"nginx": {
"name": "nginx",
"type": "github",
"owner": "nginx",
"match": "^release-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"node": {
"name": "node",
"type": "github",
"owner": "nodejs",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"pack": {
"name": "pack",
"type": "github",
"owner": "buildpacks",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"php": {
"name": "php-src",
"type": "github",
"owner": "php",
"match": "^php-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"podman": {
"name": "podman",
"type": "github",
"owner": "containers",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgresql": {
"name": "postgres",
"type": "github",
"owner": "postgres",
"match": "^REL_[0-9]{1,}_[0-9]{1,}$",
},
"python": {
"name": "cpython",
"type": "github",
"owner": "python",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"rabbitmq": {
"name": "rabbitmq-server",
"type": "github",
"owner": "rabbitmq",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis-sentinel": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"registry": {
"name": "distribution",
"type": "github",
"owner": "distribution",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ruby": {
"name": "ruby",
"type": "github",
"owner": "ruby",
"match": "^v[0-9]{1,}_[0-9]{1,}_[0-9]{1,}$",
},
"rust": {
"name": "rust",
"type": "github",
"owner": "rust-lang",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"telegraf": {
"name": "telegraf",
"type": "github",
"owner": "influxdata",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wait-for-port": {
"name": "wait-for-port",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wal-g": {
"name": "wal-g",
"type": "github",
"owner": "wal-g",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"yj": {
"name": "yj",
"type": "github",
"owner": "sclevine",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
}
github_tags_graphql = """
query {
repository(owner: "{owner}", name: "{name}") {
refs(refPrefix: "refs/tags/", first: 10, orderBy: {field: TAG_COMMIT_DATE, direction: DESC}) {
edges {
node {
name
target {
oid
... on Tag {
commitUrl
tagger {
date
}
}
}
}
}
}
}
}
"""
if __name__ == "__main__":
main()
| 28.731429
| 99
| 0.423528
|
import os
import re
import json
import requests
from datetime import datetime
github_headers = {'Authorization': 'token %s' % os.environ.get("GITHUB_TOKEN")}
repo_info_table = {
"vouch-proxy": {
"name": "vouch-proxy",
"type": "github",
"owner": "vouch",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis_exporter": {
"name": "redis_exporter",
"type": "github",
"owner": "oliver006",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mysqld_exporter": {
"name": "mysqld_exporter",
"type": "github",
"owner": "prometheus",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgres_exporter": {
"name": "postgres_exporter",
"type": "github",
"owner": "prometheus-community",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"caddy": {
"name": "caddy",
"type": "github",
"owner": "caddyserver",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"envtpl": {
"name": "envtpl",
"type": "github",
"owner": "subfuzion",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"erlang": {
"name": "otp",
"type": "github",
"owner": "erlang",
"match": "^OTP-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"fluentd": {
"name": "fluentd",
"type": "github",
"owner": "fluent",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"go": {
"name": "go",
"type": "github",
"owner": "golang",
"match": "^go[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"gosu": {
"name": "gosu",
"type": "github",
"owner": "tianon",
"match": "^[0-9]{1,}\.[0-9]{1,}$",
},
"grafana": {
"name": "grafana",
"type": "github",
"owner": "grafana",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"helm": {
"name": "helm",
"type": "github",
"owner": "helm",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"influxdb": {
"name": "influxdb",
"type": "github",
"owner": "influxdata",
"match": "^v[2-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ini-file": {
"name": "ini-file",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"java": {
"name": "jdk",
"type": "github",
"owner": "openjdk",
"match": "^jdk-[0-9]{1,}\+[0-9]{1,}$",
},
"jq": {
"name": "jq",
"type": "github",
"owner": "stedolan",
"match": "^jq-[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"kubectl": {
"name": "kubectl",
"type": "github",
"owner": "kubernetes",
"match": "^kubernetes-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mariadb": {
"name": "server",
"type": "github",
"owner": "MariaDB",
"match": "^mariadb-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mc": {
"name": "mc",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"minio": {
"name": "minio",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"nginx": {
"name": "nginx",
"type": "github",
"owner": "nginx",
"match": "^release-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"node": {
"name": "node",
"type": "github",
"owner": "nodejs",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"pack": {
"name": "pack",
"type": "github",
"owner": "buildpacks",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"php": {
"name": "php-src",
"type": "github",
"owner": "php",
"match": "^php-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"podman": {
"name": "podman",
"type": "github",
"owner": "containers",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgresql": {
"name": "postgres",
"type": "github",
"owner": "postgres",
"match": "^REL_[0-9]{1,}_[0-9]{1,}$",
},
"python": {
"name": "cpython",
"type": "github",
"owner": "python",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"rabbitmq": {
"name": "rabbitmq-server",
"type": "github",
"owner": "rabbitmq",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis-sentinel": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"registry": {
"name": "distribution",
"type": "github",
"owner": "distribution",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ruby": {
"name": "ruby",
"type": "github",
"owner": "ruby",
"match": "^v[0-9]{1,}_[0-9]{1,}_[0-9]{1,}$",
},
"rust": {
"name": "rust",
"type": "github",
"owner": "rust-lang",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"telegraf": {
"name": "telegraf",
"type": "github",
"owner": "influxdata",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wait-for-port": {
"name": "wait-for-port",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wal-g": {
"name": "wal-g",
"type": "github",
"owner": "wal-g",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"yj": {
"name": "yj",
"type": "github",
"owner": "sclevine",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
}
def create_github_tag(stack, tag_name):
sha = requests.get(
"https://api.github.com/repos/drycc/stacks/git/trees/main",
headers=github_headers,
).json()["sha"]
response = requests.post(
"https://api.github.com/repos/drycc/stacks/git/tags",
data = json.dumps({
"tag": tag_name,
"object": sha,
"message": f"new build for {stack}",
"type": "commit",
}),
headers=github_headers,
).json()
params = dict(ref=f"refs/tags/{tag_name}", sha=response['object']['sha'])
response = requests.post(
'https://api.github.com/repos/drycc/stacks/git/refs',
data=json.dumps(params),
headers=github_headers,
)
def create_github_issue(stack, tag_name):
strip_regex = "^[a-zA-Z\-_]{1,}"
replace_regex = "[a-zA-Z\+\-_]{1,}"
version = tag_name
if re.search(strip_regex, tag_name):
version = re.subn(strip_regex, "", tag_name)[0]
if re.search(replace_regex, version):
version = re.subn(replace_regex, ".", version)[0].strip(".")
if requests.get(
f"https://api.github.com/repos/drycc/stacks/git/ref/tags/{stack}@{version}",
headers=github_headers,
).status_code == 404:
create_github_tag(stack, f"{stack}@{version}")
link = f"https://github.com/search?q=org%3Adrycc+install-stack+{stack}&type=code"
requests.post(
"https://api.github.com/repos/drycc/stacks/issues",
data = json.dumps({
"title": f"new build for {stack}@{version}",
"body": f"Please judge whether the [referenced item]({link}) needs to be changed.",
"labels": ["tag"]
}),
headers=github_headers,
)
github_tags_graphql = """
query {
repository(owner: "{owner}", name: "{name}") {
refs(refPrefix: "refs/tags/", first: 10, orderBy: {field: TAG_COMMIT_DATE, direction: DESC}) {
edges {
node {
name
target {
oid
... on Tag {
commitUrl
tagger {
date
}
}
}
}
}
}
}
}
"""
def check_github_version(stack):
info = repo_info_table[stack]
response = requests.post(
"https://api.github.com/graphql",
data=json.dumps({
"query": github_tags_graphql.replace(
"{owner}", info["owner"]).replace("{name}", info["name"]),
}),
headers=github_headers,
)
for tag in response.json()["data"]["repository"]["refs"]["edges"]:
if "tagger" in tag["node"]["target"]:
date = datetime.strptime(
tag["node"]["target"]["tagger"]["date"][:19], "%Y-%m-%dT%H:%M:%S")
else:
date = datetime.strptime(
requests.get(
"https://api.github.com/repos/{}/{}/commits/{}".format(
info["owner"], info["name"], tag["node"]["target"]["oid"]
), headers=github_headers
).json()["commit"]["author"]["date"][:19],
"%Y-%m-%dT%H:%M:%S"
)
if re.match(info["match"], tag["node"]["name"]):
if (datetime.utcnow() - date).days < 5:
create_github_issue(stack, tag["node"]["name"])
else:
break
def main():
for stack in os.listdir(os.path.join(os.path.dirname(__file__), "..", "stacks")):
if stack not in repo_info_table:
raise NotImplementedError(f"{stack} not in repo_info_table")
else:
repo_type = repo_info_table[stack]["type"]
if repo_type != "github":
raise NotImplementedError(f"{repo_type} NotImplemented")
else:
check_github_version(stack)
if __name__ == "__main__":
main()
| 3,325
| 0
| 92
|
2ef6a7ce8c173936acfa269bb9cf748f58336568
| 106
|
py
|
Python
|
utils/__init__.py
|
rshube/music-vae
|
56b9ffdf759aa7d97ce7cf35dee54fd35d6cfc65
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
rshube/music-vae
|
56b9ffdf759aa7d97ce7cf35dee54fd35d6cfc65
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
rshube/music-vae
|
56b9ffdf759aa7d97ce7cf35dee54fd35d6cfc65
|
[
"MIT"
] | null | null | null |
from .args import ArgsWrapper
from .dataset import Dataset
from .consts import DATA_PATH, TRAINING_DATASET
| 35.333333
| 47
| 0.849057
|
from .args import ArgsWrapper
from .dataset import Dataset
from .consts import DATA_PATH, TRAINING_DATASET
| 0
| 0
| 0
|
ed5fa4746cd554bdb85545c30a4da0fac95423f4
| 440
|
py
|
Python
|
Tests/test.py
|
jkruse27/Foxhound
|
08a746a5335c2b71aad2fb08c86d1795ac24d476
|
[
"MIT"
] | null | null | null |
Tests/test.py
|
jkruse27/Foxhound
|
08a746a5335c2b71aad2fb08c86d1795ac24d476
|
[
"MIT"
] | null | null | null |
Tests/test.py
|
jkruse27/Foxhound
|
08a746a5335c2b71aad2fb08c86d1795ac24d476
|
[
"MIT"
] | null | null | null |
from Dataset import *
from datetime import *
import time
dataset = Dataset('TestData/Dados.csv')
begin_date = datetime.strptime('2021-08-2 12:00',"%Y-%m-%d %H:%M")
end_date = datetime.strptime('2021-08-7 12:00',"%Y-%m-%d %H:%M")
main_var = 'TU-11C:SS-HLS-Ax48NW5:Level-Mon'
start = time.time()
delays, corrs, names = dataset.correlate(main_var, begin_date, end_date, 0.2)
end = time.time()
print(end - start)
print(delays)
print(corrs)
| 24.444444
| 77
| 0.7
|
from Dataset import *
from datetime import *
import time
dataset = Dataset('TestData/Dados.csv')
begin_date = datetime.strptime('2021-08-2 12:00',"%Y-%m-%d %H:%M")
end_date = datetime.strptime('2021-08-7 12:00',"%Y-%m-%d %H:%M")
main_var = 'TU-11C:SS-HLS-Ax48NW5:Level-Mon'
start = time.time()
delays, corrs, names = dataset.correlate(main_var, begin_date, end_date, 0.2)
end = time.time()
print(end - start)
print(delays)
print(corrs)
| 0
| 0
| 0
|
99049aa52546cdb02a79df162cd3e4c306b133ba
| 315
|
py
|
Python
|
textX-LS/core/tests/test_utils.py
|
goto40/textX-LS
|
a6b357186f7adaf7c30aed4d543ae8d149c80a25
|
[
"MIT"
] | null | null | null |
textX-LS/core/tests/test_utils.py
|
goto40/textX-LS
|
a6b357186f7adaf7c30aed4d543ae8d149c80a25
|
[
"MIT"
] | null | null | null |
textX-LS/core/tests/test_utils.py
|
goto40/textX-LS
|
a6b357186f7adaf7c30aed4d543ae8d149c80a25
|
[
"MIT"
] | null | null | null |
import pytest
from textx_ls_core import utils
@pytest.mark.parametrize("uri, expected_ext", [
(None, ''),
('', ''),
('/test/path/file.txt', 'txt'),
('Textxfile', 'Textxfile')
])
| 22.5
| 47
| 0.657143
|
import pytest
from textx_ls_core import utils
@pytest.mark.parametrize("uri, expected_ext", [
(None, ''),
('', ''),
('/test/path/file.txt', 'txt'),
('Textxfile', 'Textxfile')
])
def test_get_file_extension(uri, expected_ext):
ext = utils.get_file_extension(uri)
assert ext == expected_ext
| 97
| 0
| 22
|
b75127cedc1c587e657854e2855292dbcfd3ea83
| 387
|
py
|
Python
|
setup.py
|
isears/openmrsapi
|
50e5329b1ecc74f8e4d94f71e4b0e0207ac705d6
|
[
"MIT"
] | 1
|
2018-10-28T10:14:22.000Z
|
2018-10-28T10:14:22.000Z
|
setup.py
|
isears/openmrsapi
|
50e5329b1ecc74f8e4d94f71e4b0e0207ac705d6
|
[
"MIT"
] | null | null | null |
setup.py
|
isears/openmrsapi
|
50e5329b1ecc74f8e4d94f71e4b0e0207ac705d6
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='openmrsapi',
version='0.1',
description='a library for interacting with openmrs api in python',
url='https://github.com/isears/openmrsapi',
author='Isaac Sears',
author_email='isaac.j.sears@gmail.com',
license='MIT',
packages=['openmrsapi'],
zip_safe=False,
install_requires=[
'requests'
]
)
| 22.764706
| 71
| 0.653747
|
from setuptools import setup
setup(
name='openmrsapi',
version='0.1',
description='a library for interacting with openmrs api in python',
url='https://github.com/isears/openmrsapi',
author='Isaac Sears',
author_email='isaac.j.sears@gmail.com',
license='MIT',
packages=['openmrsapi'],
zip_safe=False,
install_requires=[
'requests'
]
)
| 0
| 0
| 0
|
4e019d4a52876ce74dbd2768087cd44e8e77eb8b
| 101
|
py
|
Python
|
run.py
|
SPK-RPI/FlaskBlog
|
45c816a3720988e151b59dd5fbe5abab71e25fcc
|
[
"MIT"
] | null | null | null |
run.py
|
SPK-RPI/FlaskBlog
|
45c816a3720988e151b59dd5fbe5abab71e25fcc
|
[
"MIT"
] | null | null | null |
run.py
|
SPK-RPI/FlaskBlog
|
45c816a3720988e151b59dd5fbe5abab71e25fcc
|
[
"MIT"
] | null | null | null |
from blogposts import app
if __name__ == '__main__':
app.run(host='192.168.43.57',debug=True)
| 25.25
| 45
| 0.683168
|
from blogposts import app
if __name__ == '__main__':
app.run(host='192.168.43.57',debug=True)
| 0
| 0
| 0
|
e6d23aba45b82cb0b235f7c6849de37983d83785
| 6,277
|
py
|
Python
|
workers.py
|
Recursing/MySubredditsBot
|
5ca0f7cf9c210acc82009001cfb9915a2b62a95f
|
[
"MIT"
] | 6
|
2020-05-26T10:02:28.000Z
|
2021-12-07T18:11:51.000Z
|
workers.py
|
Recursing/MySubredditsBot
|
5ca0f7cf9c210acc82009001cfb9915a2b62a95f
|
[
"MIT"
] | 3
|
2022-01-11T10:10:53.000Z
|
2022-01-23T10:50:01.000Z
|
workers.py
|
Recursing/MySubredditsBot
|
5ca0f7cf9c210acc82009001cfb9915a2b62a95f
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import random
import time
from datetime import datetime
from typing import Any, Dict, Optional, Tuple
import reddit_adapter
import subscriptions_manager
import telegram_adapter
workers: Dict[Tuple[int, str], asyncio.Task[Any]] = {}
async def check_exceptions(refresh_period: int = 24 * 60 * 60):
"""
Check whether private or banned subs are now available
"""
while True:
unavailable_subs = subscriptions_manager.unavailable_subreddits()
for sub in unavailable_subs:
try:
try:
await reddit_adapter.new_posts(sub)
except (
reddit_adapter.SubredditPrivate,
reddit_adapter.SubredditBanned,
):
continue
old_subscribers = subscriptions_manager.get_old_subscribers(sub)
for chat_id in old_subscribers:
subscriptions_manager.subscribe(chat_id, sub, 31)
await telegram_adapter.send_message(
chat_id, f"{sub} is now available again"
)
subscriptions_manager.delete_exception(sub)
except Exception as e:
await telegram_adapter.send_exception(
e, f"Exception while checking unavailability of {sub}"
)
await asyncio.sleep(refresh_period)
| 38.27439
| 93
| 0.641071
|
import asyncio
import logging
import random
import time
from datetime import datetime
from typing import Any, Dict, Optional, Tuple
import reddit_adapter
import subscriptions_manager
import telegram_adapter
async def send_subscription_update(subreddit: str, chat_id: int, per_month: int):
# Send top unsent post from subreddit to chat_id
# per_month is used only to choose where to look for posts (see get_posts)
try:
post_iterator = await reddit_adapter.get_posts(subreddit, per_month)
if per_month > 1000:
post_iterator += await reddit_adapter.new_posts(subreddit)
for post in post_iterator:
if subscriptions_manager.already_sent(chat_id, post["id"]):
continue
if post["created_utc"] < time.time() - 86400 * 40:
continue
await telegram_adapter.send_post(chat_id, post, subreddit)
break
else:
logging.info(f"No post to send from {subreddit} to {chat_id}, {per_month=}")
except reddit_adapter.SubredditBanned:
if not subscriptions_manager.already_sent_exception(
chat_id, subreddit, "banned"
):
await telegram_adapter.send_message(
chat_id, f"r/{subreddit} has been banned"
)
subscriptions_manager.mark_exception_as_sent(chat_id, subreddit, "banned")
subscriptions_manager.unsubscribe(chat_id, subreddit)
except reddit_adapter.SubredditPrivate:
if not subscriptions_manager.already_sent_exception(
chat_id, subreddit, "private"
):
await telegram_adapter.send_message(
chat_id, f"r/{subreddit} has been made private"
)
subscriptions_manager.mark_exception_as_sent(chat_id, subreddit, "private")
subscriptions_manager.unsubscribe(chat_id, subreddit)
except Exception as e:
logging.error(f"{e!r} while sending sub updates, sleeping")
await telegram_adapter.send_exception(
e, f"send_subscription_update({subreddit}, {chat_id}, {per_month})"
)
time.sleep(60 * 2)
async def make_worker(
chat_id: int, subreddit: str, per_month: int, last_message: Optional[datetime]
):
period = 3600 * 24 * 31 / per_month
# Randomize the period a few seconds to prevent workers to sync up
period += random.random() * 10 - 5
# Before the first run sleep randomly a bit to offset the worker
if not last_message:
init_sleep = random.random() * period / 2
else:
already_elapsed = (datetime.utcnow() - last_message).total_seconds()
init_sleep = max(random.random() * 30, period - already_elapsed)
t0 = time.monotonic()
print(f"{chat_id}, {subreddit}, {per_month}, {period=}, {init_sleep=:.2f}")
await asyncio.sleep(init_sleep)
elapsed = time.monotonic() - t0
print(
f"{chat_id}, {subreddit} starting to send, slept {elapsed=:.2f} vs {init_sleep=:.2f}"
)
while True:
t0 = time.monotonic()
await send_subscription_update(subreddit, chat_id, per_month)
elapsed = time.monotonic() - t0
await asyncio.sleep(period - elapsed)
elapsed = time.monotonic() - t0
logging.info(
f"{elapsed=:.2f}s vs {period=:.2f} for woker {chat_id} {subreddit} {per_month}"
)
workers: Dict[Tuple[int, str], asyncio.Task[Any]] = {}
def stop_worker(chat_id: int, subreddit: str):
try:
print(f"stopping {chat_id} {subreddit}")
workers[(chat_id, subreddit)].cancel()
del workers[(chat_id, subreddit)]
except Exception as e:
logging.error(f"Cannot stop worker ({chat_id}, {subreddit}), {e!r}")
asyncio.create_task(
telegram_adapter.send_exception(
e, f"Cannot stop worker ({chat_id}, {subreddit})"
)
)
def start_worker(chat_id: int, subreddit: str, per_month: int):
if (chat_id, subreddit) in workers:
stop_worker(chat_id, subreddit)
last_message = subscriptions_manager.get_last_subscription_message(
chat_id, subreddit
)
workers[(chat_id, subreddit)] = asyncio.create_task(
make_worker(chat_id, subreddit, per_month, last_message)
)
def start_workers():
logging.info(f"{datetime.now()} Starting workers...")
subscriptions = subscriptions_manager.get_subscriptions()
while len(subscriptions) == 0:
print("Waiting for subscriptions...")
subscriptions = subscriptions_manager.get_subscriptions()
time.sleep(10)
random.shuffle(subscriptions)
for chat_id, subreddit, per_month in subscriptions:
last_message = subscriptions_manager.get_last_subscription_message(
chat_id, subreddit
)
print("Making worker: ", chat_id, subreddit, per_month, last_message)
workers[(chat_id, subreddit)] = asyncio.create_task(
make_worker(chat_id, subreddit, per_month, last_message)
)
async def check_exceptions(refresh_period: int = 24 * 60 * 60):
"""
Check whether private or banned subs are now available
"""
while True:
unavailable_subs = subscriptions_manager.unavailable_subreddits()
for sub in unavailable_subs:
try:
try:
await reddit_adapter.new_posts(sub)
except (
reddit_adapter.SubredditPrivate,
reddit_adapter.SubredditBanned,
):
continue
old_subscribers = subscriptions_manager.get_old_subscribers(sub)
for chat_id in old_subscribers:
subscriptions_manager.subscribe(chat_id, sub, 31)
await telegram_adapter.send_message(
chat_id, f"{sub} is now available again"
)
subscriptions_manager.delete_exception(sub)
except Exception as e:
await telegram_adapter.send_exception(
e, f"Exception while checking unavailability of {sub}"
)
await asyncio.sleep(refresh_period)
async def on_startup(_dispatcher: Any):
start_workers()
asyncio.create_task(check_exceptions())
| 4,698
| 0
| 138
|
c6b3d328f2d98b34f2cdd6bd4d215c9101ed10e3
| 7,039
|
py
|
Python
|
instruments/triggering.py
|
gronchi/mpts
|
5e33e885b099a337c0cadc78840dd2c216a6d9df
|
[
"MIT"
] | null | null | null |
instruments/triggering.py
|
gronchi/mpts
|
5e33e885b099a337c0cadc78840dd2c216a6d9df
|
[
"MIT"
] | null | null | null |
instruments/triggering.py
|
gronchi/mpts
|
5e33e885b099a337c0cadc78840dd2c216a6d9df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation of the Trigger Unit communication."""
import logging
import re
import socket
_log = logging.getLogger(__name__)
physical_names = {
'A2_Delay': r'Simmer_delay(1uS)',
'A4_Delay': r'Burst_delay(1uS)',
'A4_Number': r'Burst_number',
'A4_Period': r'Burst_period(1uS)',
'A5_Pulse': r'Trigger_Enable_pulse(1uS)',
'B1_Delay': r'ADC_Enable_delay(1uS)',
'B1_Pulse': r'ADC_Enable_pulse(1uS)',
'B2_Delay': r'CMOS_plasma_delay(1uS)',
'B2_Number': r'CMOS_Plasma_number',
'B2_Period': r'CMOS_Plasma_period(1uS)',
'B2_Pulse': r'CMOS_Plasma_pulse(1uS)',
'B4_Delay': r'CMOS_Laser_delay(0.1uS)',
'B4_Pulse': r'CMOS_Laser_pulse(0.1uS)',
'B5_Delay': r'II_Gate_Plasma_delay(0.1uS)',
'B5_Number': r'II_Gate_Plasma_number',
'B5_Period': r'II_Gate_Plasma_period(0.1uS)',
'B5_Pulse': r'II_Gate_Plasma_pulse(0.1uS)',
'B6_Delay': r'II_Plasma_Delay_delay(0.1uS)',
'B6_Pulse': r'II_Plasma_Delay_pulse(0.1uS)',
'B7_Delay': r'II_Gate_Laser_delay(0.1uS)',
'B7_Pulse': r'II_Gate_Laser_pulse(0.1uS)',
'B8_Delay': r'II_Flash_Bool_delay(1uS)',
'B8_Pulse': r'II_Flash_Bool_pulse(1uS)',
'B9_Delay': r'Flash_delay(1uS)',
'B9_Pulse': r'Flash_pulse(1uS)',
'B12_Delay': r'Pockels_delay(1uS)',
'B12_Number': r'Pockels_number',
'B12_Period': r'Pockels_period(1uS)',
'B12_Pulse': r'Pockels_pulse(1uS)',
'TS0_Delay': r'TS0_Delay(1uS)',
'TS0_Period': r'TS0_Period(1uS)',
'Enable_IOs': r'Enable_IOs',
'A1_SW_enable': r'A1_SW_enable',
'A2_SW_enable': r'A2_SW_enable',
'A4_SW_enable': r'A4_SW_enable',
'CMOSPOn': r'CMOSPOn',
'CMOSLOn': r'CMOSLOn'
}
try:
# For Python 3
logical_names = {v: k for k, v in physical_names.items()}
except:
# For Python 2
logical_names = dict((v, k) for k, v in physical_names.iteritems())
regex = re.compile('(\S+)[\s*]=[\s*]"(\S+)"')
| 35.913265
| 123
| 0.594687
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation of the Trigger Unit communication."""
import logging
import re
import socket
_log = logging.getLogger(__name__)
physical_names = {
'A2_Delay': r'Simmer_delay(1uS)',
'A4_Delay': r'Burst_delay(1uS)',
'A4_Number': r'Burst_number',
'A4_Period': r'Burst_period(1uS)',
'A5_Pulse': r'Trigger_Enable_pulse(1uS)',
'B1_Delay': r'ADC_Enable_delay(1uS)',
'B1_Pulse': r'ADC_Enable_pulse(1uS)',
'B2_Delay': r'CMOS_plasma_delay(1uS)',
'B2_Number': r'CMOS_Plasma_number',
'B2_Period': r'CMOS_Plasma_period(1uS)',
'B2_Pulse': r'CMOS_Plasma_pulse(1uS)',
'B4_Delay': r'CMOS_Laser_delay(0.1uS)',
'B4_Pulse': r'CMOS_Laser_pulse(0.1uS)',
'B5_Delay': r'II_Gate_Plasma_delay(0.1uS)',
'B5_Number': r'II_Gate_Plasma_number',
'B5_Period': r'II_Gate_Plasma_period(0.1uS)',
'B5_Pulse': r'II_Gate_Plasma_pulse(0.1uS)',
'B6_Delay': r'II_Plasma_Delay_delay(0.1uS)',
'B6_Pulse': r'II_Plasma_Delay_pulse(0.1uS)',
'B7_Delay': r'II_Gate_Laser_delay(0.1uS)',
'B7_Pulse': r'II_Gate_Laser_pulse(0.1uS)',
'B8_Delay': r'II_Flash_Bool_delay(1uS)',
'B8_Pulse': r'II_Flash_Bool_pulse(1uS)',
'B9_Delay': r'Flash_delay(1uS)',
'B9_Pulse': r'Flash_pulse(1uS)',
'B12_Delay': r'Pockels_delay(1uS)',
'B12_Number': r'Pockels_number',
'B12_Period': r'Pockels_period(1uS)',
'B12_Pulse': r'Pockels_pulse(1uS)',
'TS0_Delay': r'TS0_Delay(1uS)',
'TS0_Period': r'TS0_Period(1uS)',
'Enable_IOs': r'Enable_IOs',
'A1_SW_enable': r'A1_SW_enable',
'A2_SW_enable': r'A2_SW_enable',
'A4_SW_enable': r'A4_SW_enable',
'CMOSPOn': r'CMOSPOn',
'CMOSLOn': r'CMOSLOn'
}
try:
# For Python 3
logical_names = {v: k for k, v in physical_names.items()}
except:
# For Python 2
logical_names = dict((v, k) for k, v in physical_names.iteritems())
regex = re.compile('(\S+)[\s*]=[\s*]"(\S+)"')
class TriggerUnit():
MAX_MESSAGE_SIZE = 65536
def __init__(self, ip=None, port=15000, default_fps=None, debug=False):
"""Set up all communications with the camera.
When a new Phantom object is made it will broadcast out over the
network to find the camera and initialize command and data TCP
connections with it so that future interactions with the camera
work.
"""
self.name = ""
self.ip = ip
self.port = port
self._cmd_sock = None
self.connection_status = False
self.debug = debug
if ip is not None:
self.openConnection(self.ip, self.port)
def openConnection(self, ip=None, port=15000, cmd_timeout=1):
self.ip = ip or self.ip
self.port = port or self.port
try:
# Set up the command connection
print("Trying to connect to the Triggering unit - CompactRio (IP: %s, port: %s)." % (self.ip, self.port))
self._cmd_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._cmd_sock.settimeout(cmd_timeout)
self._cmd_sock.connect((self.ip, self.port))
self.connection_status = True
except:
print("Error trying to connect to the Triggering unit - CompactRio (IP: %s, port: %s)." % (self.ip, self.port))
self.connection_status = False
pass
def isConnected(self):
try:
# Makes a simple request and wait for the answer to check if the connection is working
self._cmd_sock.send(b'Mode = "?"\r\n')
recv = self._cmd_sock.recv(self.MAX_MESSAGE_SIZE).decode('latin-1')
if len(recv) > 0:
self.connection_status = True
return True
else:
self.connection_status = False
return False
except:
self.connection_status = False
return False
def closeConnection(self):
if self._cmd_sock is not None:
self._cmd_sock.close()
self._cmd_sock = None
self.connection_status = False
def _SendCommandAsync(self, cmd):
"""Send command without waiting for the response.
You must call ReceiveCommandResponse before calling this method again.
"""
_log.debug("SEND(%d): %s", len(cmd), cmd)
if self.debug:
print("cRio: >>%s" % cmd)
cmd = bytearray(cmd, 'latin-1') + b"\r\n"
total_sent = 0
while total_sent < len(cmd):
sent = self._cmd_sock.send(cmd[total_sent:])
if sent == 0:
raise Exception("Cannot send command")
total_sent += sent
def _ReceiveCommandResponse(self):
"""Reveice response from a command sent with SendCommandAsync."""
recv = ""
try:
while True:
block = self._cmd_sock.recv(self.MAX_MESSAGE_SIZE).decode('latin-1')
recv += block
if len(block) == 0 or (len(block) > 2 and block[-1] == "\n"):
break
# if "Err" in recv:
# raise Exception("Received error code:" + recv)
_log.debug("RECV(%d): %s", len(recv), recv.strip())
if self.debug:
print("cRio: <<%s" % recv.strip())
return recv.strip()
except ConnectionAbortedError:
self.closeConnection()
print("cRio: Connection Aborted Error")
return ""
except:
return ""
def _SendCommand(self, cmd):
"""Send a command to the camera, and return the response."""
self._SendCommandAsync(cmd)
return self._ReceiveCommandResponse()
def sendSettings(self, name, value):
"""Send a setting to the Triggering system. Returns error flag"""
if self.connection_status:
cmd = '%s = "%d"' % (name, value)
ans = self._SendCommand(cmd)
return False if 'Ok' in ans else True
def readSettings(self, name):
"""Reads a setting from the Triggering system.
Returns the value of the setting, or a blank string is error occurs."""
if self.connection_status:
cmd = '%s = "?"' % name
res = self._SendCommand(cmd)
match = regex.search(res)
if not match:
self.connection_status = False
raise Exception("Invalid response: %s", res)
return int(match.group(2))
else:
return None
def setMode(self, mode):
cmd = 'Mode = "%d"' % mode
ans = self._SendCommand(cmd)
return False if 'Ok' in ans else True
def readMode(self):
ans = self.readSettings("Mode")
return ans
def readStatus(self):
self.io_enabled = bool(self.readSettings("IOs_enabled"))
self.laser_ready = bool(self.readSettings("Laser_Ready_I"))
self.interlock = bool(self.readSettings("Interlock"))
return self.io_enabled, self.laser_ready, self.interlock
| 1,834
| 3,236
| 23
|
51d3878e39cd1b290a0eaeb08e3f64757d4ea419
| 108
|
py
|
Python
|
.config/autokey/data/chromium/text-editing/c-e-move-end-of-line.py
|
tonyaldon/uconfig
|
e2966389dc4bdaf10ef86f1625faf1b2caf687c6
|
[
"MIT"
] | 1
|
2021-01-14T19:10:38.000Z
|
2021-01-14T19:10:38.000Z
|
.config/autokey/data/chromium/text-editing/c-e-move-end-of-line.py
|
tonyaldon/uconfig
|
e2966389dc4bdaf10ef86f1625faf1b2caf687c6
|
[
"MIT"
] | null | null | null |
.config/autokey/data/chromium/text-editing/c-e-move-end-of-line.py
|
tonyaldon/uconfig
|
e2966389dc4bdaf10ef86f1625faf1b2caf687c6
|
[
"MIT"
] | 1
|
2021-01-14T19:10:40.000Z
|
2021-01-14T19:10:40.000Z
|
store.set_global_value('hotkey', '<ctrl>+e')
engine.set_return_value('<end>')
engine.run_script('chromium')
| 27
| 44
| 0.75
|
store.set_global_value('hotkey', '<ctrl>+e')
engine.set_return_value('<end>')
engine.run_script('chromium')
| 0
| 0
| 0
|
87ce95ba10a9e75ca88b201d1ff1e27dd415d9db
| 30,083
|
py
|
Python
|
latticegraph_designer/test/test_designer.py
|
luchko/latticegraph_designer
|
7fd25a5bdc0fce070cd1328c5d6f7113b6ec91e2
|
[
"MIT"
] | 24
|
2017-04-23T14:15:59.000Z
|
2021-05-12T04:33:47.000Z
|
latticegraph_designer/test/test_designer.py
|
luchko/latticegraph_designer
|
7fd25a5bdc0fce070cd1328c5d6f7113b6ec91e2
|
[
"MIT"
] | null | null | null |
latticegraph_designer/test/test_designer.py
|
luchko/latticegraph_designer
|
7fd25a5bdc0fce070cd1328c5d6f7113b6ec91e2
|
[
"MIT"
] | 8
|
2017-08-02T12:55:28.000Z
|
2021-10-10T14:54:38.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Very weak testing of the basic functionality using unittest and QTest"""
from __future__ import division
__author__ = "Ivan Luchko (luchko.ivan@gmail.com)"
__version__ = "1.0a1"
__date__ = "Apr 4, 2017"
__copyright__ = "Copyright (c) 2017, Ivan Luchko and Project Contributors "
import sys
import os
import subprocess
import unittest
# define pyQt version
try:
from PyQt4.QtGui import QApplication, QDialogButtonBox, QTextCursor
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
except ImportError:
try:
from PyQt5.QtWidgets import QApplication, QDialogButtonBox
from PyQt5.QtGui import QTextCursor
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
except ImportError:
raise ImportError("neither PyQt4 or PyQt5 is found")
from latticegraph_designer.app.main import MainWindow
from latticegraph_designer.app.dialogs import (DialogImportCryst, DialogDistSearch,
MyDialogPreferences, DialogEditXML)
from mpl_animationmanager import QDialogAnimManager
app = QApplication(sys.argv)
test_folder = "./latticegraph_designer/test/"
from latticegraph_designer.app.core import Vertex, Edge, UnitCell, Lattice, CrystalCluster
from latticegraph_designer.app.mpl_pane import GraphEdgesEditor
from matplotlib.backend_bases import KeyEvent, MouseEvent
import matplotlib.pyplot as plt
import numpy as np
class GeeMethodsTest(unittest.TestCase):
'''Test the mpl_pane GraphEdgesEditor methods'''
def test_USE_COLLECTIONS(self):
'''testing the usage of lineCollection for depicting edges'''
GraphEdgesEditor.USE_COLLECTIONS = True
self.setUp()
try:
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 6)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 6)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+6)
# select edge
_id = 3
self.gee.select_edge(_id)
self.assertTrue(self.gee.e_active_ind == _id)
# remove edge
self.gee.delete_active_edge_callback()
self.assertEqual(self.gee.UC.num_edges, 5)
self.assertEqual(len(self.gee.edges_lines), 5)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+5)
# clear edges
self.gee.clearEdges_callback()
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 0)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+0)
# add edge
self.addEdge(0, 4)
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(len(self.gee.edges_lines), 1)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+1)
except: # we have to set USE_COLLECTIONS=False for other tests
GraphEdgesEditor.USE_COLLECTIONS = False
raise
finally:
GraphEdgesEditor.USE_COLLECTIONS = False
class GeeInteractionTest(unittest.TestCase):
'''Test the mpl_pane keybounding and mouse manipulation'''
class MainWindowTest(unittest.TestCase):
'''Test the MainWindow GUI'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
# def test_terminalLaunch(self):
#
# p = subprocess.Popen(['graphdesigner','&'],
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#
# output, error = p.communicate()
#
## p = subprocess.call("graphdesigner", shell=True)
# p.kill()
#
# if p.returncode == 0:
# return output
# else:
# raise Exception(error)
# return "Error"
class PreferencesTest(unittest.TestCase):
'''Test the Preferences manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
class AnimaManagerTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
class CodeEditorTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
if __name__ == "__main__":
unittest.main()
| 40.164219
| 90
| 0.630123
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Very weak testing of the basic functionality using unittest and QTest"""
from __future__ import division
__author__ = "Ivan Luchko (luchko.ivan@gmail.com)"
__version__ = "1.0a1"
__date__ = "Apr 4, 2017"
__copyright__ = "Copyright (c) 2017, Ivan Luchko and Project Contributors "
import sys
import os
import subprocess
import unittest
# define pyQt version
try:
from PyQt4.QtGui import QApplication, QDialogButtonBox, QTextCursor
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
except ImportError:
try:
from PyQt5.QtWidgets import QApplication, QDialogButtonBox
from PyQt5.QtGui import QTextCursor
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
except ImportError:
raise ImportError("neither PyQt4 or PyQt5 is found")
from latticegraph_designer.app.main import MainWindow
from latticegraph_designer.app.dialogs import (DialogImportCryst, DialogDistSearch,
MyDialogPreferences, DialogEditXML)
from mpl_animationmanager import QDialogAnimManager
app = QApplication(sys.argv)
test_folder = "./latticegraph_designer/test/"
def printgraph(libFile):
try:
import pyalps
except ImportError:
print("ALPS package is not installed.")
return "ALPS package is not installed."
else:
testFile = '''LATTICE_LIBRARY=\"{}\"
LATTICE=\"test\"
L=2
W=2
H=2'''.format(libFile)
p = subprocess.Popen(['printgraph'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate(input=testFile)
if p.returncode == 0:
return output
else:
raise Exception(error)
return "Error"
from latticegraph_designer.app.core import Vertex, Edge, UnitCell, Lattice, CrystalCluster
from latticegraph_designer.app.mpl_pane import GraphEdgesEditor
from matplotlib.backend_bases import KeyEvent, MouseEvent
import matplotlib.pyplot as plt
import numpy as np
class GeeMethodsTest(unittest.TestCase):
'''Test the mpl_pane GraphEdgesEditor methods'''
def setUp(self):
lattice = Lattice(basisMatrix=np.array([[1,0,0],[0,1,0],[0,0,1.3]]).T)
self.UC = UnitCell()
self.UC.add_vertex(Vertex(0,0,[0.2,0.2,0.2]))
self.UC.add_vertex(Vertex(0,0,[0.3,0.3,0.6]))
self.UC.add_edge(Edge(0,1,(1,2),(0,0,0)))
self.UC.add_edge(Edge(0,2,(2,1),(0,0,1)))
self.UC.add_edge(Edge(0,0,(1,1),(1,0,0)))
self.UC.add_edge(Edge(0,0,(1,1),(0,1,0)))
self.UC.add_edge(Edge(0,0,(2,2),(1,0,0)))
self.UC.add_edge(Edge(0,0,(2,2),(0,1,0)))
self.cluster = CrystalCluster(self.UC,lattice,(2,2,2))
self.fig = plt.figure()
self.ax = self.fig.gca(projection='3d') # same as ax = Axes3D(fig)
self.gee = GraphEdgesEditor(self.ax, self.cluster, display_report=True)
def test_setUp(self):
self.setUp()
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 6)
self.assertEqual(len(self.ax.artists), 6+1+4*4+4*3) # arrows + new edge + edges
self.assertEqual(len(self.gee.edges_lines), 28)
def test_clear(self):
self.setUp()
self.gee.clearEdges_callback()
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 0)
def addEdge(self, source, target):
self.gee.v_source_ind = source
self.gee.v_target_ind = target
self.gee.add_edge()
def test_addRemoveEdges(self):
self.setUp()
self.gee.clearEdges_callback()
self.addEdge(0, 8)
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(len(self.gee.edges_lines), 8)
self.assertEqual(len(self.ax.artists), 6+1+8)
self.addEdge(0, 4)
self.assertEqual(self.gee.UC.num_edges, 2)
self.assertEqual(len(self.gee.edges_lines), 8+4)
self.assertEqual(len(self.ax.artists), 6+1+8+4)
self.gee.select_edge(1)
self.gee.delete_active_edge_callback()
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(len(self.gee.edges_lines), 4)
self.assertEqual(len(self.ax.artists), 6+1+4)
def test_edgeSelection(self):
self.setUp()
self.gee.clearEdges_callback()
self.addEdge(0, 8)
self.addEdge(0, 4)
# select edge
_id = 2
self.gee.select_edge(_id)
self.assertTrue(self.gee.e_active_ind == _id)
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == self.gee.color_active)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw_active)
# test unselect edge
self.gee.select_edge(None)
self.assertTrue(self.gee.e_active_ind is None)
color = self.gee.colors_e[self.UC.edges[_id].type]
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == color)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw)
# test edge unselection by selecting another edge
self.gee.select_edge(_id)
id_new = 1
self.gee.select_edge(id_new)
self.assertTrue(self.gee.e_active_ind == id_new)
for j in self.gee.edges.array_ind[id_new]:
self.assertTrue(self.gee.edges_lines[j].get_color() == self.gee.color_active)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw_active)
#check if previous active unselected
color = self.gee.colors_e[self.UC.edges[_id].type]
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == color)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw)
def test_searchActiveDistEdge(self):
self.setUp()
self.gee.clearEdges_callback()
self.addEdge(0, 8)
self.addEdge(0, 4)
self.assertEqual(self.gee.UC.num_edges, 1+1)
self.assertEqual(len(self.gee.edges_lines), 8+4)
self.assertEqual(len(self.ax.artists), 6+1+8+4)
self.gee.select_edge(2)
self.gee.searchActiveDistEdge_callback()
self.assertEqual(self.gee.UC.num_edges, 1+4) # 4 edges simmilar to 2 found
self.assertEqual(len(self.gee.edges_lines), 8+4*4)
self.assertEqual(len(self.ax.artists), 6+1+8+4*4)
def test_xml_ImportExport(self):
self.setUp()
# export to lib
fn = test_folder+"test_coreExport.xml"
self.cluster.export_toFile(fileName = fn, LATTICEGRAPH_name = "test")
self.gee.clearEdges_callback()
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 0)
# import from lib
self.cluster.import_fromFile(fileName = fn, LATTICEGRAPH_name = "test")
self.fig = plt.figure()
self.ax = self.fig.gca(projection='3d') # same as ax = Axes3D(fig)
self.gee = GraphEdgesEditor(self.ax, self.cluster, display_report=True)
# check initialization
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 6)
self.assertEqual(len(self.ax.artists), 6+1+4*4+4*3) # arrows + new edge + edges
self.assertEqual(len(self.gee.edges_lines), 28)
def test_USE_COLLECTIONS(self):
'''testing the usage of lineCollection for depicting edges'''
GraphEdgesEditor.USE_COLLECTIONS = True
self.setUp()
try:
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 6)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 6)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+6)
# select edge
_id = 3
self.gee.select_edge(_id)
self.assertTrue(self.gee.e_active_ind == _id)
# remove edge
self.gee.delete_active_edge_callback()
self.assertEqual(self.gee.UC.num_edges, 5)
self.assertEqual(len(self.gee.edges_lines), 5)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+5)
# clear edges
self.gee.clearEdges_callback()
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 0)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+0)
# add edge
self.addEdge(0, 4)
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(len(self.gee.edges_lines), 1)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+1)
except: # we have to set USE_COLLECTIONS=False for other tests
GraphEdgesEditor.USE_COLLECTIONS = False
raise
finally:
GraphEdgesEditor.USE_COLLECTIONS = False
class GeeInteractionTest(unittest.TestCase):
'''Test the mpl_pane keybounding and mouse manipulation'''
def setUp(self):
self.mainWindow = MainWindow(TEXT_MODE=True)
self.ax = self.mainWindow.ax
self.gee = self.mainWindow.gee
self.canvas = self.mainWindow.canvas
def test_keyBindings(self):
self.setUp()
# test ctrl+numKey - change active edge type to numkey
_id, new_type = 2, 5
self.gee.select_edge(_id)
self.canvas.key_press_event('ctrl+{}'.format(new_type))
self.gee.select_edge(None)
self.assertTrue(self.gee.UC.edges[_id].type == new_type)
color = self.gee.colors_e[new_type]
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == color)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw)
self.canvas.key_press_event('delete')
self.canvas.key_press_event('shift+delete')
self.canvas.key_press_event('ctrl+d')
# test display_report switch
_bool = self.gee.display_report
self.canvas.key_press_event('t')
self.assertTrue(self.gee.display_report != _bool)
self.canvas.key_press_event('t')
self.assertTrue(self.gee.display_report == _bool)
# test display_lattice switch
_bool = self.gee.display_lattice
self.canvas.key_press_event('n')
self.assertTrue(self.gee.display_lattice != _bool)
self.assertTrue(self.gee.latticeNet.get_visible() != _bool)
self.canvas.key_press_event('n')
self.assertTrue(self.gee.display_lattice == _bool)
self.assertTrue(self.gee.latticeNet.get_visible() == _bool)
# test display_arrows switch
_bool = self.gee.display_arrows
self.canvas.key_press_event('m')
self.assertTrue(self.gee.display_arrows != _bool)
for elem in self.gee.arrows:
self.assertTrue(elem.get_visible() != _bool)
self.canvas.key_press_event('m')
self.assertTrue(self.gee.display_arrows == _bool)
for elem in self.gee.arrows:
self.assertTrue(elem.get_visible() == _bool)
def test_mouseManipulation(self):
self.setUp()
self.gee.clearEdges_callback()
# simulate rotation
self.canvas.motion_notify_event(x=20, y=20)
self.canvas.button_press_event(x=20, y=20, button=1)
self.assertEqual(self.gee.v_source_ind, None)
self.canvas.motion_notify_event(x=20, y=20)
azim, elev = self.gee.ax.azim, self.gee.ax.elev
# rotate
self.assertTrue(self.gee.isRotated)
self.canvas.motion_notify_event(x=30, y=30)
self.assertTrue(self.gee.isRotated)
self.assertTrue(self.gee.ax.elev != elev)
self.assertTrue(self.gee.ax.azim != azim)
azim, elev = self.gee.ax.azim, self.gee.ax.elev
# rotate more
self.canvas.motion_notify_event(x=40, y=40)
self.assertTrue(self.gee.ax.elev != elev)
self.assertTrue(self.gee.ax.azim != azim)
# release button
self.canvas.button_release_event(x=40, y=40, button=1)
self.canvas.motion_notify_event(x=40, y=45)
# check
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(self.gee.e_active_ind, None)
self.assertEqual(self.gee.isRotated, False)
# vertice actiovation/deactivation test
source_ind = 0
x_data, y_data = self.gee.x_scr[source_ind], self.gee.y_scr[source_ind]
# vertice actiovation
self.canvas.motion_notify_event(x=x_data, y=y_data)
self.assertTrue(self.gee.v_active_ind == source_ind)
# vertice deactivation
self.canvas.motion_notify_event(x=30, y=30)
self.assertTrue(self.gee.v_active_ind == None)
# simulate unsuccessful edge creation atempt
# activate source vertex
self.canvas.motion_notify_event(x=x_data, y=y_data)
self.canvas.button_press_event(x=x_data, y=x_data, button=1)
self.assertEqual(self.gee.v_source_ind, source_ind)
# draw potential edge
self.canvas.motion_notify_event(x=30, y=30)
# release button at random spot
self.canvas.motion_notify_event(x=40, y=40)
self.canvas.button_release_event(x=40, y=40, button=1)
# check
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(self.gee.e_active_ind, None)
# simulate successful edge creation atempt
_id = 1
# activate source vertex
self.canvas.motion_notify_event(x=x_data, y=y_data)
self.canvas.button_press_event(x=x_data, y=x_data, button=1)
self.assertEqual(self.gee.v_source_ind, source_ind)
# draw potential edge
self.canvas.motion_notify_event(x=30, y=30)
# select target vertex
target_ind = 4
x_data, y_data = self.gee.x_scr[target_ind], self.gee.y_scr[target_ind]
self.canvas.motion_notify_event(x=x_data, y=y_data)
self.canvas.button_release_event(x=x_data, y=y_data, button=1)
# check
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(self.gee.e_active_ind, _id)
# unselect edge by clicking on the empty spot
self.canvas.motion_notify_event(x=30, y=30)
self.canvas.button_press_event(x=30, y=30, button=1)
self.canvas.button_release_event(x=30, y=30, button=1)
# check
self.assertTrue(self.gee.e_active_ind is None)
color = self.gee.colors_e[self.gee.UC.edges[_id].type]
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == color)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw)
# select the edge
source_ind, target_ind = 0, 4
x_data = (self.gee.x_scr[source_ind] + self.gee.x_scr[target_ind])/2
y_data = (self.gee.y_scr[source_ind] + self.gee.y_scr[target_ind])/2
# simulate selection
self.canvas.motion_notify_event(x=x_data, y=y_data)
self.canvas.button_press_event(x=x_data, y=y_data, button=1)
self.canvas.button_release_event(x=x_data, y=y_data, button=1)
self.assertTrue(self.gee.e_active_ind == _id)
for j in self.gee.edges.array_ind[_id]:
self.assertTrue(self.gee.edges_lines[j].get_color() == self.gee.color_active)
self.assertTrue(self.gee.edges_lines[j].get_linewidth() == self.gee.lw_active)
# search for edges having the same length as selected
self.mainWindow.action_AddSimEdges.trigger()
self.assertEqual(self.gee.UC.num_edges, 4)
self.assertEqual(self.gee.e_active_ind, None)
class MainWindowTest(unittest.TestCase):
'''Test the MainWindow GUI'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
# def test_terminalLaunch(self):
#
# p = subprocess.Popen(['graphdesigner','&'],
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#
# output, error = p.communicate()
#
## p = subprocess.call("graphdesigner", shell=True)
# p.kill()
#
# if p.returncode == 0:
# return output
# else:
# raise Exception(error)
# return "Error"
def test_ImportXML(self):
fn_input = os.path.abspath(test_folder+"testLib_input.xml")
self.mainWindow.importXML_fromFile(fn_input)
self.assertEqual(self.mainWindow.cluster.UC.num_vertices, 2)
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 6)
def test_ImportFromALPS_lib(self):
fn_input = os.path.abspath(test_folder+"testALPS_lib.xml")
self.mainWindow.importXML_fromFile(fn_input)
listLG = self.mainWindow.dlgSelectLG.list_LG_names
self.assertEqual(listLG.count(), 31)
cluster = None
for j in range(listLG.count()):
listLG.setCurrentItem(listLG.item(j))
self.assertTrue(self.mainWindow.cluster is not cluster)
cluster = self.mainWindow.cluster
def test_ImportCIF(self):
fn_cif = os.path.abspath(test_folder+"test.cif")
self.mainWindow.action_ImportCryst.trigger()
self.dlgImportCryst = self.mainWindow.dlgImportCryst
self.dlgImportCryst.process_cif(fn_cif, TESTING=True)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_a.text()), 20.753)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_b.text()), 7.517)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_c.text()), 6.4475)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_alpha.text()), 90.0)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_beta.text()), 103.21)
self.assertAlmostEqual(float(self.dlgImportCryst.lineEdit_gamma.text()), 90.0)
self.dlgImportCryst.importCrystal_callback()
self.assertEqual(self.mainWindow.cluster.UC.num_vertices, 8)
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 0)
def test_DistSearch(self):
self.test_ImportCIF()
self.assertEqual(self.mainWindow.cluster.UC.num_vertices, 8)
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 0)
# opne "edge length manager"
self.mainWindow.action_AddDistEdges.trigger()
self.dlgDistSearch = self.mainWindow.dlgDistSearch
lw = self.dlgDistSearch.listWidget
# add edges with length 5.514
data = {"bool": True, "type":0, "dist":5.514, "err":1}
lw.itemWidget(lw.item(0)).set_data(data)
self.dlgDistSearch.btnSearch.click()
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 16)
# add edges with length 7.55
data = {"bool": True, "type":1, "dist":7.55, "err":0.1}
lw.itemWidget(lw.item(1)).set_data(data)
self.dlgDistSearch.btnSearch.click()
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 16+4)
# export to XML lib
self.ExportXML(os.path.abspath(test_folder+"testLib_output.xml"))
# test adding new item
self.assertEqual(lw.count(), 3)
self.dlgDistSearch.btnAdd.click()
self.assertEqual(lw.count(), 4)
# select edges
lw.setCurrentItem(lw.item(1))
self.assertEqual(len(self.mainWindow.gee.e_activeDist_ids), 4)
lw.setCurrentItem(lw.item(0))
self.assertEqual(len(self.mainWindow.gee.e_activeDist_ids), 16)
# delete selected edges
self.dlgDistSearch.btnRemove.click()
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 4)
# delete more selected edges
lw.setCurrentItem(lw.item(0))
self.assertEqual(len(self.mainWindow.gee.e_activeDist_ids), 4)
self.mainWindow.action_DelEdge.trigger()
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 0)
self.dlgDistSearch.btnClose.click()
def ExportXML(self, fn_output):
self.mainWindow.fileNameXML = fn_output
self.mainWindow.LATTICEGRAPH_name = "test"
self.mainWindow.saveXML_callback()
self.assertNotEqual(printgraph(fn_output), "Error")
def test_ExportXML(self):
self.test_ImportXML()
fn_output = os.path.abspath(test_folder+"testLib_output.xml")
self.ExportXML(fn_output)
fn_benchmark = os.path.abspath(test_folder+"testLib_output_benchmark.xml")
self.assertEqual(printgraph(fn_output), printgraph(fn_benchmark))
def test_ExportIMG(self):
self.mainWindow.exportIMG(test_folder+"test.png")
self.assertTrue(os.path.exists(test_folder+"test.png"))
def test_resetSize(self):
self.test_ImportXML()
self.assertEqual(len(self.mainWindow.gee.ax.artists), 1+6+4*4+4*3)
self.mainWindow.spinBox_sizeL.setValue(3)
self.assertEqual(len(self.mainWindow.gee.ax.artists), 1+6+4*7+6*3)
self.mainWindow.spinBox_sizeW.setValue(3)
self.assertEqual(len(self.mainWindow.gee.ax.artists), 1+6+4*12+9*3)
self.mainWindow.spinBox_sizeH.setValue(3)
self.assertEqual(len(self.mainWindow.gee.ax.artists), 1+6+6*12+9*5)
def test_changeEdgeType(self):
self.test_ImportXML()
# select edge from listWidget
ind, _id = 1, 2
self.mainWindow.listEdges.setCurrentItem(self.mainWindow.listEdges.item(ind))
self.assertTrue(self.mainWindow.gee.e_active_ind == _id)
new_type = 5
self.mainWindow.spinBox_type.setValue(new_type)
self.mainWindow.btnChangeType.click()
self.assertEqual(self.mainWindow.UC.edges[_id].type, new_type)
# test changing the type using menu action and dialog
# select edge from listWidget
ind, _id = 2, 3
self.mainWindow.listEdges.setCurrentItem(self.mainWindow.listEdges.item(ind))
self.assertTrue(self.mainWindow.gee.e_active_ind == _id)
current_type = self.mainWindow.UC.edges[_id].type
# open change type dialog and test cancel
self.mainWindow.action_ChangeType.trigger()
self.assertEqual(self.mainWindow.dlg.spinBox_current.value(), current_type)
self.assertEqual(self.mainWindow.dlg.spinBox_new.value(), current_type)
new_type = 7
self.mainWindow.dlg.spinBox_new.setValue(new_type)
self.mainWindow.dlg.btnCancel.click()
self.assertEqual(self.mainWindow.UC.edges[_id].type, current_type)
# open change type dialog and test ok
self.mainWindow.action_ChangeType.trigger()
self.assertEqual(self.mainWindow.dlg.spinBox_current.value(), current_type)
self.assertEqual(self.mainWindow.dlg.spinBox_new.value(), current_type)
self.mainWindow.dlg.spinBox_new.setValue(new_type)
self.mainWindow.dlg.btnOk.click()
self.assertEqual(self.mainWindow.UC.edges[_id].type, new_type)
def test_changeTEXT_MODE(self):
_bool = self.mainWindow.TEXT_MODE
self.mainWindow.radioButton_output.toggle()
self.assertTrue(self.mainWindow.TEXT_MODE != _bool)
self.mainWindow.radioButton_output.toggle()
class PreferencesTest(unittest.TestCase):
'''Test the Preferences manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
def test_PrefManager(self):
self.mainWindow.action_Pref.trigger()
dlgPref = self.mainWindow.dlgPref
dlgPref.btnDefaults.click()
edgePref = dlgPref.prefWidget.edgePref
lw = edgePref.listWidget
try:
# compare gee pref and dialog widgets values
self.assertEqual(lw.count(), 10)
# edge linewidth
self.assertAlmostEqual(self.mainWindow.gee.lw,
edgePref.sliderSize.value()*7/100)
_type = 4
data = lw.get_itemData(_type)
self.assertEqual(self.mainWindow.gee.colors_e[_type], data["color"])
self.assertEqual(self.mainWindow.gee.visible_e[_type], data["bool"])
#change theme but not apply
dlgPref.comboBox.setCurrentIndex(2)
_type = 4
data = lw.get_itemData(_type)
data['bool'] = False
lw.set_itemData(_type, data)
self.assertNotEqual(self.mainWindow.gee.colors_e[_type], data["color"])
self.assertNotEqual(self.mainWindow.gee.visible_e[_type], data["bool"])
# add new preference item to the list
edgePref.btnAdd.click()
self.assertEqual(lw.count(), 11)
_type = 10
data = lw.get_itemData(_type)
# Apply and check changes in gee prefs
dlgPref.btnApply.click()
self.assertEqual(self.mainWindow.gee.colors_e[_type], data["color"])
self.assertEqual(self.mainWindow.gee.visible_e[_type], data["bool"])
finally:
dlgPref.btnDefaults.click()
# check changes
dlgPref.btnClose.click()
class AnimaManagerTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
def test_AnimManager(self):
self.mainWindow.action_ExportAnim.trigger()
self.dlgExportAnim = self.mainWindow.dlgExportAnim
self.dlgExportAnim.btnPause.click()
self.dlgExportAnim.btnStart.click()
# change dpi
self.dlgExportAnim.spinBox_dpi.setValue(50)
self.assertEqual(self.dlgExportAnim.dpi, 50)
# change fps
self.dlgExportAnim.spinBox_fps.setValue(10)
self.assertEqual(self.dlgExportAnim.fps, 10)
# change elevation
self.dlgExportAnim.spinBox_elev.setValue(10)
self.assertEqual(self.dlgExportAnim.elevation, 10)
# change rotation period
self.dlgExportAnim.spinBox_period_rot.setValue(30)
self.assertEqual(self.dlgExportAnim.period_rot, 30)
self.dlgExportAnim.spinBox_period_rot.setValue(3)
self.assertEqual(self.dlgExportAnim.period_rot, 3)
# stop
self.dlgExportAnim.btnStop.click()
# change initial azimut
self.dlgExportAnim.spinBox_azim.setValue(-50)
self.assertEqual(self.dlgExportAnim.zero_azim, -50)
# export animation
path = os.path.abspath(test_folder+"test")
self.dlgExportAnim.lineEdit_name.setText(path)
self.dlgExportAnim.btnExport.click()
self.assertTrue(os.path.exists(test_folder+"test.gif")
or os.path.exists(test_folder+"test.mp4"))
self.dlgExportAnim.btnClose.click()
class CodeEditorTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
def test_CodeEditor(self):
self.mainWindow.action_EditXML.trigger()
# insert xml data from another lib and apply
fn = os.path.abspath(test_folder+'triangular_network.xml')
with open(fn) as f:
self.mainWindow.dlgEditXML.codeEditor.setPlainText(f.read())
self.mainWindow.dlgEditXML.buttonBox.button(QDialogButtonBox.Apply).click()
# check changes in main window
self.assertEqual(self.mainWindow.cluster.UC.num_vertices, 96)
self.assertEqual(self.mainWindow.cluster.UC.num_edges, 288)
self.mainWindow.dlgEditXML.buttonBox.button(QDialogButtonBox.Close).click()
def test_edge_selection(self):
self.mainWindow.action_EditXML.trigger()
# select edge from listWidget
ind = 0
self.mainWindow.listEdges.setCurrentItem(self.mainWindow.listEdges.item(ind))
self.assertTrue(self.mainWindow.gee.e_active_ind == ind+1)
# select edge from listWidget
ind = 5
self.mainWindow.listEdges.setCurrentItem(self.mainWindow.listEdges.item(ind))
self.assertTrue(self.mainWindow.gee.e_active_ind == ind+1)
# select edge from listWidget
ind = 6
self.mainWindow.listEdges.setCurrentItem(self.mainWindow.listEdges.item(ind))
self.assertTrue(self.mainWindow.gee.e_active_ind is None)
# move cursor to the bottow of document
self.mainWindow.dlgEditXML.codeEditor.moveCursor(QTextCursor.End)
self.mainWindow.dlgEditXML.close()
if __name__ == "__main__":
unittest.main()
| 24,378
| 0
| 779
|
0e652ecfaead0fd3fcc5bf2bc1ba76e23ca4705a
| 92
|
py
|
Python
|
config_gen/admin.py
|
lkmhaqer/gtools-python
|
cff6d80525b78a4fadfb686566489fbe1687d889
|
[
"MIT"
] | 5
|
2016-10-31T17:46:17.000Z
|
2022-02-02T00:40:49.000Z
|
config_gen/admin.py
|
lkmhaqer/gtools-python
|
cff6d80525b78a4fadfb686566489fbe1687d889
|
[
"MIT"
] | 33
|
2018-05-09T06:07:50.000Z
|
2021-09-22T17:39:56.000Z
|
config_gen/admin.py
|
lkmhaqer/gtools-python
|
cff6d80525b78a4fadfb686566489fbe1687d889
|
[
"MIT"
] | 1
|
2020-05-14T21:44:25.000Z
|
2020-05-14T21:44:25.000Z
|
# file: config_gen/admin.py
from django.contrib import admin
# Register your models here.
| 15.333333
| 32
| 0.771739
|
# file: config_gen/admin.py
from django.contrib import admin
# Register your models here.
| 0
| 0
| 0
|
f6645b849fb773b521114eed6a43f40cbba427a2
| 8,363
|
py
|
Python
|
tests/integration/schema_registry/test_json_serializers.py
|
woodlee/confluent-kafka-python
|
c8a05dbd73e06ea310de7f28267fd4714f90ef8c
|
[
"Apache-2.0"
] | 1
|
2020-04-22T13:10:31.000Z
|
2020-04-22T13:10:31.000Z
|
tests/integration/schema_registry/test_json_serializers.py
|
woodlee/confluent-kafka-python
|
c8a05dbd73e06ea310de7f28267fd4714f90ef8c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/schema_registry/test_json_serializers.py
|
woodlee/confluent-kafka-python
|
c8a05dbd73e06ea310de7f28267fd4714f90ef8c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from confluent_kafka import TopicPartition
from confluent_kafka.error import ConsumeError, ValueSerializationError
from confluent_kafka.schema_registry.json_schema import (JSONSerializer,
JSONDeserializer)
def _testProduct_to_dict(product_obj, ctx):
"""
Returns testProduct instance in dict format.
Args:
product_obj (_TestProduct): testProduct instance.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
dict: product_obj as a dictionary.
"""
return {"productId": product_obj.product_id,
"productName": product_obj.name,
"price": product_obj.price,
"tags": product_obj.tags,
"dimensions": product_obj.dimensions,
"warehouseLocation": product_obj.location}
def _testProduct_from_dict(product_dict, ctx):
"""
Returns testProduct instance from its dict format.
Args:
product_dict (dict): testProduct in dict format.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
_TestProduct: product_obj instance.
"""
return _TestProduct(product_dict['productId'],
product_dict['productName'],
product_dict['price'],
product_dict['tags'],
product_dict['dimensions'],
product_dict['warehouseLocation'])
def test_json_record_serialization(kafka_cluster, load_file):
"""
Tests basic JsonSerializer and JsonDeserializer basic functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"productId": 1,
"productName": "An ice sculpture",
"price": 12.50,
"tags": ["cold", "ice"],
"dimensions": {
"length": 7.0,
"width": 12.0,
"height": 9.5
},
"warehouseLocation": {
"latitude": -78.75,
"longitude": 20.4
}}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([actual[k] == v for k, v in record.items()])
def test_json_record_serialization_incompatible(kafka_cluster, load_file):
"""
Tests Serializer validation functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 1,
"contractorName": "David Davidson",
"contractRate": 1250,
"trades": ["mason"]}
with pytest.raises(ValueSerializationError,
match=r"(.*) is a required property"):
producer.produce(topic, value=record, partition=0)
def test_json_record_serialization_no_title(kafka_cluster, load_file):
"""
Ensures ValueError raise if JSON Schema definition lacks Title annotation.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file('not_title.json')
with pytest.raises(ValueError,
match="Missing required JSON schema annotation title"):
JSONSerializer(schema_str, sr)
def test_json_record_serialization_custom(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr,
to_dict=_testProduct_to_dict)
value_deserializer = JSONDeserializer(schema_str,
from_dict=_testProduct_from_dict)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = _TestProduct(product_id=1,
name="The ice sculpture",
price=12.50,
tags=["cold", "ice"],
dimensions={"length": 7.0,
"width": 12.0,
"height": 9.5},
location={"latitude": -78.75,
"longitude": 20.4})
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([getattr(actual, attribute) == getattr(record, attribute)
for attribute in vars(record)])
def test_json_record_deserialization_mismatch(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("contractor.json")
schema_str2 = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str2)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 2,
"contractorName": "Magnus Edenhill",
"contractRate": 30,
"trades": ["pickling"]}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
with pytest.raises(
ConsumeError,
match="'productId' is a required property"):
consumer.poll()
| 32.667969
| 79
| 0.64044
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from confluent_kafka import TopicPartition
from confluent_kafka.error import ConsumeError, ValueSerializationError
from confluent_kafka.schema_registry.json_schema import (JSONSerializer,
JSONDeserializer)
class _TestProduct(object):
def __init__(self, product_id, name, price, tags, dimensions, location):
self.product_id = product_id
self.name = name
self.price = price
self.tags = tags
self.dimensions = dimensions
self.location = location
def _testProduct_to_dict(product_obj, ctx):
"""
Returns testProduct instance in dict format.
Args:
product_obj (_TestProduct): testProduct instance.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
dict: product_obj as a dictionary.
"""
return {"productId": product_obj.product_id,
"productName": product_obj.name,
"price": product_obj.price,
"tags": product_obj.tags,
"dimensions": product_obj.dimensions,
"warehouseLocation": product_obj.location}
def _testProduct_from_dict(product_dict, ctx):
"""
Returns testProduct instance from its dict format.
Args:
product_dict (dict): testProduct in dict format.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
_TestProduct: product_obj instance.
"""
return _TestProduct(product_dict['productId'],
product_dict['productName'],
product_dict['price'],
product_dict['tags'],
product_dict['dimensions'],
product_dict['warehouseLocation'])
def test_json_record_serialization(kafka_cluster, load_file):
"""
Tests basic JsonSerializer and JsonDeserializer basic functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"productId": 1,
"productName": "An ice sculpture",
"price": 12.50,
"tags": ["cold", "ice"],
"dimensions": {
"length": 7.0,
"width": 12.0,
"height": 9.5
},
"warehouseLocation": {
"latitude": -78.75,
"longitude": 20.4
}}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([actual[k] == v for k, v in record.items()])
def test_json_record_serialization_incompatible(kafka_cluster, load_file):
"""
Tests Serializer validation functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 1,
"contractorName": "David Davidson",
"contractRate": 1250,
"trades": ["mason"]}
with pytest.raises(ValueSerializationError,
match=r"(.*) is a required property"):
producer.produce(topic, value=record, partition=0)
def test_json_record_serialization_no_title(kafka_cluster, load_file):
"""
Ensures ValueError raise if JSON Schema definition lacks Title annotation.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file('not_title.json')
with pytest.raises(ValueError,
match="Missing required JSON schema annotation title"):
JSONSerializer(schema_str, sr)
def test_json_record_serialization_custom(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr,
to_dict=_testProduct_to_dict)
value_deserializer = JSONDeserializer(schema_str,
from_dict=_testProduct_from_dict)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = _TestProduct(product_id=1,
name="The ice sculpture",
price=12.50,
tags=["cold", "ice"],
dimensions={"length": 7.0,
"width": 12.0,
"height": 9.5},
location={"latitude": -78.75,
"longitude": 20.4})
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([getattr(actual, attribute) == getattr(record, attribute)
for attribute in vars(record)])
def test_json_record_deserialization_mismatch(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("contractor.json")
schema_str2 = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str2)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 2,
"contractorName": "Magnus Edenhill",
"contractRate": 30,
"trades": ["pickling"]}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
with pytest.raises(
ConsumeError,
match="'productId' is a required property"):
consumer.poll()
| 235
| 6
| 49
|
2207e7f8080442f83f42de7f49c9eb73c34f4d8c
| 2,041
|
py
|
Python
|
pyexcel_io/utils.py
|
kit-cat/pyexcel-io
|
ec62f384ef814d51d95bf2fb15b8a9b239249691
|
[
"BSD-3-Clause"
] | null | null | null |
pyexcel_io/utils.py
|
kit-cat/pyexcel-io
|
ec62f384ef814d51d95bf2fb15b8a9b239249691
|
[
"BSD-3-Clause"
] | null | null | null |
pyexcel_io/utils.py
|
kit-cat/pyexcel-io
|
ec62f384ef814d51d95bf2fb15b8a9b239249691
|
[
"BSD-3-Clause"
] | null | null | null |
"""
pyexcel_io.utils
~~~~~~~~~~~~~~~~~~~
utility functions
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.constants as constants
XLS_PLUGIN = "pyexcel-xls"
XLSX_PLUGIN = "pyexcel-xlsx"
ODS_PLUGIN = "pyexcel-ods"
ODS3_PLUGIN = "pyexcel-ods3"
XLSXW_PLUGIN = "pyexcel-xlsxw"
IO_ITSELF = "pyexcel-io"
AVAILABLE_READERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
AVAILABLE_WRITERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLSX_PLUGIN, XLSXW_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
def is_empty_array(array):
"""
Check if an array is an array of '' or not
"""
empty_array = [element for element in array if element != ""]
return len(empty_array) == 0
def swap_empty_string_for_none(array):
""" replace empty string fields with None """
def swap(value):
""" change empty string to None """
if value == "":
return None
else:
return value
return [swap(x) for x in array]
| 27.958904
| 65
| 0.687898
|
"""
pyexcel_io.utils
~~~~~~~~~~~~~~~~~~~
utility functions
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.constants as constants
XLS_PLUGIN = "pyexcel-xls"
XLSX_PLUGIN = "pyexcel-xlsx"
ODS_PLUGIN = "pyexcel-ods"
ODS3_PLUGIN = "pyexcel-ods3"
XLSXW_PLUGIN = "pyexcel-xlsxw"
IO_ITSELF = "pyexcel-io"
AVAILABLE_READERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
AVAILABLE_WRITERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLSX_PLUGIN, XLSXW_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
def _index_filter(current_index, start, limit=-1):
out_range = constants.SKIP_DATA
if current_index >= start:
out_range = constants.TAKE_DATA
if limit > 0 and out_range == constants.TAKE_DATA:
if current_index >= (start + limit):
out_range = constants.STOP_ITERATION
return out_range
def is_empty_array(array):
"""
Check if an array is an array of '' or not
"""
empty_array = [element for element in array if element != ""]
return len(empty_array) == 0
def swap_empty_string_for_none(array):
""" replace empty string fields with None """
def swap(value):
""" change empty string to None """
if value == "":
return None
else:
return value
return [swap(x) for x in array]
| 306
| 0
| 23
|
6b829467b894c5cb437c1071179fbb9d72d5b88b
| 11,431
|
py
|
Python
|
test_cube_solution.py
|
noreallyimfine/Rubiks-Cube
|
5d6bdffb2f1554453de94e5ea3efd2fc5db75a8a
|
[
"MIT"
] | 1
|
2020-07-08T14:14:26.000Z
|
2020-07-08T14:14:26.000Z
|
test_cube_solution.py
|
noreallyimfine/Rubiks-Cube
|
5d6bdffb2f1554453de94e5ea3efd2fc5db75a8a
|
[
"MIT"
] | null | null | null |
test_cube_solution.py
|
noreallyimfine/Rubiks-Cube
|
5d6bdffb2f1554453de94e5ea3efd2fc5db75a8a
|
[
"MIT"
] | null | null | null |
import unittest
from cube import RubiksCube
# rename that class
# test solution funcs <- make sure the tests arent interfering with each other
# def test_bottom_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_bottom_layer()
# print("Success")
# def test_middle_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.cube._solve_mid_layer()
# def test_top_cross_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_cross()
# def test_top_face_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_face()
# def test_top_corners_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_corners()
if __name__ == '__main__':
unittest.main()
| 47.235537
| 107
| 0.682442
|
import unittest
from cube import RubiksCube
# rename that class
class CubeSolutionTests(unittest.TestCase):
def setUp(self):
self.cube = RubiksCube()
# test solution funcs <- make sure the tests arent interfering with each other
def bottom_face_tester(self):
self.assertTrue(all(self.cube.bot_layer[bottom].sides['bottom'] for bottom in self.cube.bot_layer))
def bottom_layer_tester(self):
right_center = self.cube.mid_layer['right_center'].sides['right']
left_center = self.cube.mid_layer['left_center'].sides['left']
front_center = self.cube.mid_layer['front_center'].sides['front']
back_center = self.cube.mid_layer['back_center'].sides['back']
self.assertTrue(all(self.cube.bot_layer[bottom].sides['bottom'] for bottom in self.cube.bot_layer))
# all 'sides' match their corresponding center
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], right_center)
def middle_layer_tester(self):
front_center = self.cube.mid_layer['front_center'].sides['front']
left_center = self.cube.mid_layer['left_center'].sides['left']
right_center = self.cube.mid_layer['right_center'].sides['right']
back_center = self.cube.mid_layer['back_center'].sides['back']
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], front_center)
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], left_center)
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], back_center)
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], right_center)
def test_make_daisy(self):
bottom_center = self.cube.bot_layer['bottom_center'].sides['bottom']
self.cube._make_daisy()
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], bottom_center)
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], bottom_center)
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], bottom_center)
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], bottom_center)
def test_bottom_cross(self):
bottom_center = self.cube.bot_layer['bottom_center'].sides['bottom']
self.cube._bottom_cross()
self.assertEqual(self.cube.bot_layer['right_middle'].sides['bottom'], bottom_center)
self.assertEqual(self.cube.bot_layer['left_middle'].sides['bottom'], bottom_center)
self.assertEqual(self.cube.bot_layer['front_middle'].sides['bottom'], bottom_center)
self.assertEqual(self.cube.bot_layer['back_middle'].sides['bottom'], bottom_center)
# also test the other sides match their face
right_center = self.cube.mid_layer['right_center'].sides['right']
left_center = self.cube.mid_layer['left_center'].sides['left']
front_center = self.cube.mid_layer['front_center'].sides['front']
back_center = self.cube.mid_layer['back_center'].sides['back']
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], left_center)
def test_bottom_layer(self):
self.cube._solve_bot_layer()
self.bottom_layer_tester()
# def test_bottom_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_bottom_layer()
# print("Success")
def test_middle_layer(self):
self.cube._solve_mid_layer()
self.bottom_layer_tester()
self.middle_layer_tester()
# def test_middle_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.cube._solve_mid_layer()
def test_top_cross(self):
self.cube._solve_top_cross()
self.bottom_layer_tester()
self.middle_layer_tester()
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], 'y')
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], 'y')
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], 'y')
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], 'y')
# def test_top_cross_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_cross()
def test_top_face(self):
self.cube._solve_top_face()
self.bottom_layer_tester()
self.middle_layer_tester()
self.assertTrue(all(self.cube.top_layer[top].sides['top'] for top in self.cube.top_layer))
# def test_top_face_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_face()
def test_top_corners(self):
self.cube._solve_top_corners()
self.bottom_layer_tester()
self.middle_layer_tester()
# test all four corners match their centers.
# front
front_center = self.cube.mid_layer['front_center'].sides['front']
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], front_center)
# left
left_center = self.cube.mid_layer['left_center'].sides['left']
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], left_center)
# back
back_center = self.cube.mid_layer['back_center'].sides['back']
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], back_center)
# right
right_center = self.cube.mid_layer['right_center'].sides['right']
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], right_center)
# def test_top_corners_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_corners()
def test_solved_cube(self):
self.cube.solve_cube()
front_center = self.cube.mid_layer['front_center'].sides['front']
back_center = self.cube.mid_layer['back_center'].sides['back']
right_center = self.cube.mid_layer['right_center'].sides['right']
left_center = self.cube.mid_layer['left_center'].sides['left']
# top face
self.assertTrue(all(self.cube.top_layer[top].sides['top'] for top in self.cube.top_layer))
# bot face
self.bottom_face_tester()
# front face
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.top_layer['front_middle'].sides['front'], front_center)
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], front_center)
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], front_center)
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], front_center)
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], front_center)
# back face
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], back_center)
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.top_layer['back_middle'].sides['back'], back_center)
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], back_center)
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], back_center)
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], back_center)
# right face
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], right_center)
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.top_layer['right_middle'].sides['right'], right_center)
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], right_center)
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], right_center)
# left face
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], left_center)
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.top_layer['left_middle'].sides['left'], left_center)
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], left_center)
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], left_center)
def test_solved_cube_robustness(self, n=50):
for _ in range(n):
self.cube.initialize_cube()
self.cube.solve_cube()
if __name__ == '__main__':
unittest.main()
| 9,959
| 22
| 400
|
6c005a2859950869af5dc5c3f85c80d99083f121
| 1,824
|
py
|
Python
|
chap6/mysequence.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap6/mysequence.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap6/mysequence.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
#coding:utf-8
'''
filename:mysequence.py
chap:6
subject:20
conditions:inherit collections.abc.Sequence
新容器内的对象必须按照一定顺序排列
solution:class MySequence
'''
import collections
import numbers
class MySequence(collections.abc.Sequence):
'''必要方法 __getitem__,__len__'''
@staticmethod
def order(seq):
'''返回 按类别排序的序列'''
# print('seq:',seq)
source = list(seq)
# print('source:',source)
number_list = []
str_list = []
tuple_list = []
list_list = []
dict_list = []
set_list = []
other_list = []
d = {'numbers.Real':number_list,
'str':str_list,
'tuple':tuple_list,
'list':list_list,
'dict':dict_list,
'set':set_list}
for item in source:
for cls_string in d.keys():
if isinstance(item,eval(cls_string)):
d[cls_string].append(item)
break
else:
other_list.append(item)
# print('other_list :',other_list)
rst = []
lists = list(d.values())
for lst in lists:
# print('before sort:',lst)
lst.sort()
# print('after sort:',lst)
rst += lst
return rst+other_list
if __name__ == '__main__':
l = [1,2,(3,4,55),{'a','b'},{(11,11):111,'name':'laoqi'},(33,5),62,'python',9,'age']
a = MySequence(l)
print(l)
print(a)
print(len(a))
print(list(a))
| 25.690141
| 88
| 0.514803
|
#coding:utf-8
'''
filename:mysequence.py
chap:6
subject:20
conditions:inherit collections.abc.Sequence
新容器内的对象必须按照一定顺序排列
solution:class MySequence
'''
import collections
import numbers
class MySequence(collections.abc.Sequence):
'''必要方法 __getitem__,__len__'''
def __init__(self,seq):
self.seq = type(self).order(seq)
def __getitem__(self,index):
return self.seq[index]
def __len__(self):
return len(self.seq)
def __repr__(self):
return '{}:{}'.format(type(self).__name__,self.seq)
@staticmethod
def order(seq):
'''返回 按类别排序的序列'''
# print('seq:',seq)
source = list(seq)
# print('source:',source)
number_list = []
str_list = []
tuple_list = []
list_list = []
dict_list = []
set_list = []
other_list = []
d = {'numbers.Real':number_list,
'str':str_list,
'tuple':tuple_list,
'list':list_list,
'dict':dict_list,
'set':set_list}
for item in source:
for cls_string in d.keys():
if isinstance(item,eval(cls_string)):
d[cls_string].append(item)
break
else:
other_list.append(item)
# print('other_list :',other_list)
rst = []
lists = list(d.values())
for lst in lists:
# print('before sort:',lst)
lst.sort()
# print('after sort:',lst)
rst += lst
return rst+other_list
if __name__ == '__main__':
l = [1,2,(3,4,55),{'a','b'},{(11,11):111,'name':'laoqi'},(33,5),62,'python',9,'age']
a = MySequence(l)
print(l)
print(a)
print(len(a))
print(list(a))
| 165
| 0
| 105
|
dd363973ab415042f38d53ecd6eb3ea142076116
| 1,924
|
py
|
Python
|
src/syncro/__main__.py
|
cav71/syncro
|
2591dd1bd14b7b4bf2a8b2f0099c1d5140679d10
|
[
"MIT"
] | null | null | null |
src/syncro/__main__.py
|
cav71/syncro
|
2591dd1bd14b7b4bf2a8b2f0099c1d5140679d10
|
[
"MIT"
] | null | null | null |
src/syncro/__main__.py
|
cav71/syncro
|
2591dd1bd14b7b4bf2a8b2f0099c1d5140679d10
|
[
"MIT"
] | null | null | null |
"""starts a sync remote server
"""
import os
import getpass
import pathlib
import logging
import click
from . import cli
import paramiko
import paramiko.sftp_client
import syncro.support as support
import syncro.cli as cli
logger = logging.getLogger(__name__)
@click.command()
@click.argument("host")
@click.option('--password', hide_input=True)
@click.option('--username', default=lambda: getpass.getuser())
@cli.standard(quiet=True)
def main(host, username, password):
"hello world"
logger.debug("A")
logger.info("B")
logger.warning("C")
port = 22
print("one", username, password)
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.load_host_keys(pathlib.Path("~/.ssh/known_hosts").expanduser())
client.connect(host, port, username=username, password=password)
transport = client.get_transport()
transport.set_keepalive(2)
print(support.remote(transport, ["ls", "-la",])[1])
# @cli.add_logging()
# def two(*args, **kwargs):
# print("two", args, kwargs)
#
# @cli.add_logging(1, b=2)
# def three(*args, **kwargs):
# print("three", args, kwargs)
if __name__ == '__main__':
main()
| 22.904762
| 74
| 0.680873
|
"""starts a sync remote server
"""
import os
import getpass
import pathlib
import logging
import click
from . import cli
import paramiko
import paramiko.sftp_client
import syncro.support as support
import syncro.cli as cli
logger = logging.getLogger(__name__)
def add_arguments(parser):
parser.add_argument("host")
parser.add_argument("-u", "--username", default=getpass.getuser())
parser.add_argument("-p", "--password")
def process_options(options):
pass
def main(options):
host, port, username = options.host, 22, options.username
startup_delay_s = 2
print(support.remote(transport, ["ls", "-la",])[1])
#print(support.remote(transport, ["/bin/echo", "$$",]))
#print(support.remote(transport, ["/bin/echo", "$$",]))
sftp = paramiko.sftp_client.SFTPClient.from_transport(transport)
# transfer the remote server
sftp.put(pathlib.Path(__file__).parent / "remote.py", "remote.py")
# connect the secure end points
support.shell(transport)
@click.command()
@click.argument("host")
@click.option('--password', hide_input=True)
@click.option('--username', default=lambda: getpass.getuser())
@cli.standard(quiet=True)
def main(host, username, password):
"hello world"
logger.debug("A")
logger.info("B")
logger.warning("C")
port = 22
print("one", username, password)
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.load_host_keys(pathlib.Path("~/.ssh/known_hosts").expanduser())
client.connect(host, port, username=username, password=password)
transport = client.get_transport()
transport.set_keepalive(2)
print(support.remote(transport, ["ls", "-la",])[1])
# @cli.add_logging()
# def two(*args, **kwargs):
# print("two", args, kwargs)
#
# @cli.add_logging(1, b=2)
# def three(*args, **kwargs):
# print("three", args, kwargs)
if __name__ == '__main__':
main()
| 674
| 0
| 69
|
487ca8a68ee1d0de5cfebc5f311c950769200bd2
| 2,777
|
py
|
Python
|
maths/3n_plus_1.py
|
zhaiyu-cn/Python
|
30de2de32983524cbdacdc4b8949f381ca2ca347
|
[
"MIT"
] | null | null | null |
maths/3n_plus_1.py
|
zhaiyu-cn/Python
|
30de2de32983524cbdacdc4b8949f381ca2ca347
|
[
"MIT"
] | null | null | null |
maths/3n_plus_1.py
|
zhaiyu-cn/Python
|
30de2de32983524cbdacdc4b8949f381ca2ca347
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
#fenzhi1xiugai
def n31(a: int) -> Tuple[List[int], int]:
"""
Returns the Collatz sequence and its length of any positive integer.
>>> n31(4)
([4, 2, 1], 3)
"""
if not isinstance(a, int):
raise TypeError("Must be int, not {}".format(type(a).__name__))
if a < 1:
raise ValueError(f"Given integer must be greater than 1, not {a}")
path = [a]
while a != 1:
if a % 2 == 0:
a = a // 2
else:
a = 3 * a + 1
path += [a]
return path, len(path)
def test_n31():
"""
>>> test_n31()
"""
assert n31(4) == ([4, 2, 1], 3)
assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15)
assert n31(31) == (
[
31,
94,
47,
142,
71,
214,
107,
322,
161,
484,
242,
121,
364,
182,
91,
274,
137,
412,
206,
103,
310,
155,
466,
233,
700,
350,
175,
526,
263,
790,
395,
1186,
593,
1780,
890,
445,
1336,
668,
334,
167,
502,
251,
754,
377,
1132,
566,
283,
850,
425,
1276,
638,
319,
958,
479,
1438,
719,
2158,
1079,
3238,
1619,
4858,
2429,
7288,
3644,
1822,
911,
2734,
1367,
4102,
2051,
6154,
3077,
9232,
4616,
2308,
1154,
577,
1732,
866,
433,
1300,
650,
325,
976,
488,
244,
122,
61,
184,
92,
46,
23,
70,
35,
106,
53,
160,
80,
40,
20,
10,
5,
16,
8,
4,
2,
1,
],
107,
)
if __name__ == "__main__":
num = 4
path, length = n31(num)
print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
| 18.513333
| 83
| 0.28556
|
from typing import List, Tuple
#fenzhi1xiugai
def n31(a: int) -> Tuple[List[int], int]:
"""
Returns the Collatz sequence and its length of any positive integer.
>>> n31(4)
([4, 2, 1], 3)
"""
if not isinstance(a, int):
raise TypeError("Must be int, not {}".format(type(a).__name__))
if a < 1:
raise ValueError(f"Given integer must be greater than 1, not {a}")
path = [a]
while a != 1:
if a % 2 == 0:
a = a // 2
else:
a = 3 * a + 1
path += [a]
return path, len(path)
def test_n31():
"""
>>> test_n31()
"""
assert n31(4) == ([4, 2, 1], 3)
assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15)
assert n31(31) == (
[
31,
94,
47,
142,
71,
214,
107,
322,
161,
484,
242,
121,
364,
182,
91,
274,
137,
412,
206,
103,
310,
155,
466,
233,
700,
350,
175,
526,
263,
790,
395,
1186,
593,
1780,
890,
445,
1336,
668,
334,
167,
502,
251,
754,
377,
1132,
566,
283,
850,
425,
1276,
638,
319,
958,
479,
1438,
719,
2158,
1079,
3238,
1619,
4858,
2429,
7288,
3644,
1822,
911,
2734,
1367,
4102,
2051,
6154,
3077,
9232,
4616,
2308,
1154,
577,
1732,
866,
433,
1300,
650,
325,
976,
488,
244,
122,
61,
184,
92,
46,
23,
70,
35,
106,
53,
160,
80,
40,
20,
10,
5,
16,
8,
4,
2,
1,
],
107,
)
if __name__ == "__main__":
num = 4
path, length = n31(num)
print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
| 0
| 0
| 0
|
59c21007badf4ffd4068a898ea5b167e3df042ed
| 1,949
|
py
|
Python
|
trees/preprocessing.py
|
tejasvaidhyadev/PCFG_rules
|
3138bb2aa7ea098478b4b318b6b1b1872ec6ecfb
|
[
"MIT"
] | 1
|
2021-04-18T16:35:47.000Z
|
2021-04-18T16:35:47.000Z
|
trees/preprocessing.py
|
tejasvaidhyadev/PCFG_rules
|
3138bb2aa7ea098478b4b318b6b1b1872ec6ecfb
|
[
"MIT"
] | null | null | null |
trees/preprocessing.py
|
tejasvaidhyadev/PCFG_rules
|
3138bb2aa7ea098478b4b318b6b1b1872ec6ecfb
|
[
"MIT"
] | null | null | null |
from nltk import Tree
import nltk
import argparse
import pandas as pandas
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--infile', default='./ptb-collins.merge.txt', help="preprocessing tree")
#parser.add_argument('--seed', type=int, default=2004, help="random seed for initialization")
parser.add_argument('--outfile', default='./processed_ptb-collins.merge1.txt', help="file containing logs")
if (__name__ == "__main__"):
args = parser.parse_args()
trees_file = open(args.infile, 'r')
lines = trees_file.readlines()
list_lines = [line for line in lines]
trees_file.close()
processed_lines = []
for list_line in list_lines:
ls=[]
for tokens in list_line.split():
if tokens[0] == "(":
try:
if tokens[1] in string.ascii_letters:
tokens = rmsym('-',tokens)
tokens = rmsym('=', tokens)
tokens = rmsym('|', tokens)
tokens = rmsym('$', tokens)
tokens = rmsym('#', tokens)
tokens = rmsym('+', tokens)
except:
print("some bugs")
ls.append(tokens)
processed_line = " ".join(ls)
processed_lines.append(processed_line)
f=open(args.outfile,'w')
for ele in processed_lines:
f.write(ele+'\n')
f.close()
print("Pre-processing is done")
| 33.033898
| 107
| 0.554643
|
from nltk import Tree
import nltk
import argparse
import pandas as pandas
import pandas as pd
def get_brac_ind(word):
for i, brac in enumerate(word):
if brac ==')':
return(i)
def rmsym(sym, tokens):
if sym in tokens:
if ')'in tokens:
index_barc = get_brac_ind(tokens.split(sym,1)[1])
tokens = tokens.split(sym,1)[0] + tokens.split(sym,1)[1][index_barc:]
return tokens
else:
tokens = tokens.split(sym, 1)[0]
return tokens
return tokens
parser = argparse.ArgumentParser()
parser.add_argument('--infile', default='./ptb-collins.merge.txt', help="preprocessing tree")
#parser.add_argument('--seed', type=int, default=2004, help="random seed for initialization")
parser.add_argument('--outfile', default='./processed_ptb-collins.merge1.txt', help="file containing logs")
if (__name__ == "__main__"):
args = parser.parse_args()
trees_file = open(args.infile, 'r')
lines = trees_file.readlines()
list_lines = [line for line in lines]
trees_file.close()
processed_lines = []
for list_line in list_lines:
ls=[]
for tokens in list_line.split():
if tokens[0] == "(":
try:
if tokens[1] in string.ascii_letters:
tokens = rmsym('-',tokens)
tokens = rmsym('=', tokens)
tokens = rmsym('|', tokens)
tokens = rmsym('$', tokens)
tokens = rmsym('#', tokens)
tokens = rmsym('+', tokens)
except:
print("some bugs")
ls.append(tokens)
processed_line = " ".join(ls)
processed_lines.append(processed_line)
f=open(args.outfile,'w')
for ele in processed_lines:
f.write(ele+'\n')
f.close()
print("Pre-processing is done")
| 408
| 0
| 45
|
c568f9e20ad8595c2d19bead8e9a491e14ea46ac
| 6,792
|
py
|
Python
|
fedjax/aggregators/walsh_hadamard.py
|
dedsec-9/fedjax
|
1f577ab4d9aa494d2a73ed541efbb92eab350551
|
[
"Apache-2.0"
] | null | null | null |
fedjax/aggregators/walsh_hadamard.py
|
dedsec-9/fedjax
|
1f577ab4d9aa494d2a73ed541efbb92eab350551
|
[
"Apache-2.0"
] | null | null | null |
fedjax/aggregators/walsh_hadamard.py
|
dedsec-9/fedjax
|
1f577ab4d9aa494d2a73ed541efbb92eab350551
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Efficient Walsh-Hadamard transform in JAX."""
import math
from typing import Tuple, Union
import jax
import jax.numpy as jnp
import scipy
from fedjax.core.typing import PRNGKey, Params
@jax.jit
def walsh_hadamard_transform(
x: jnp.ndarray,
small_n: int = 2**7,
precision: Union[jax.lax.Precision, str] = 'highest') -> jnp.ndarray:
"""Efficient Walsh-Hadamard transform in JAX.
An accelerator friendly O(n log n) Walsh-Hadamard transform.
Args:
x: A vector. len(x) must be a power of 2.
small_n: Size to break x into. The default value is tuned on TPUv3. Must be
a power of 2 and > 1.
precision: Precision for general dot products.
Returns:
Transformed vector.
"""
if small_n <= 1:
raise ValueError(f'small_n must be > 1, got {small_n}')
# Let
# - A ⊗ B be the Kronecker product of A and B;
# - flat(X) be the vector obtained by flattening the rows of X of shape
# [M, N].
#
# We can show the following:
#
# (A ⊗ B^T) flat(X) = flat(A X B)
#
# Note that the Hadamard matrix H_{2^M 2^N} = H_{2^M} ⊗ H_{2^N}, and
# Hadamard matrices are symmetrical. Therefore, for a [2^M, 2^N] matrix X,
#
# H_{2^M 2^N} flat(X) = flat(H_{2^M} X H_{2^N})
#
# The idea can be generalized by breaking a Hadamard matrix into the Kronecker
# product of many small Hadamard matrices, and reshaping the vector input into
# a many-dimensional array, and running einsum on each dimension.
#
# Let the input vector be of length D, because our "small" Hadamard matrices
# are of size at most small_n x small_n, a constant, each einsum is O(D). We
# need to run log D einsums, thus the overall time complexity is O(D log D),
# same as the classical divide and conquer algorithm.
#
# However, thanks to efficient software & hardware implementations of einsum,
# we can often achieve far better speed than the classical algorithm on
# accelerators, at the same time producing a far simpler XLA HLO graph.
n = len(x)
# Find out the shape to reshape x into.
shape = []
while n > 1:
shape.append(min(n, small_n))
n //= small_n
shape.reverse()
num_dims = len(shape)
if num_dims + 1 >= 10:
# We will run out of dimension names in einsums.
raise ValueError(f'small_n={small_n} is too small for input size {n}')
y = x.reshape(shape)
# Hadamard matrices we will need.
hadamards = dict((d, hadamard_matrix(d, x.dtype)) for d in set(shape))
# einsum on each dimension.
for i, d in enumerate(shape):
y_dims = ''.join(str(j) for j in range(num_dims))
h_dims = f'{i}{num_dims + 1}'
out_dims = y_dims.replace(str(i), str(num_dims + 1), 1)
operands = f'{y_dims},{h_dims}->{out_dims}'
y = jnp.einsum(operands, y, hadamards[d], precision=precision)
return y.flatten()
def hadamard_matrix(n: int, dtype: jnp.dtype) -> jnp.ndarray:
"""Generates the Hadamard matrix.
Because there are JAX dtypes not supported in numpy, the equivalent function
in scipy can't be used directly.
Args:
n: Number of rows/columns of the Hadamard matrix. Must be a power of 2.
dtype: Output dtype.
Returns:
The Hadamard matrix of the given size and type.
"""
return jnp.array(scipy.linalg.hadamard(n), dtype)
@jax.jit
def structured_rotation(x: jnp.ndarray,
rng: PRNGKey) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes HD(x)/sqrt(d).
Here H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: array to be rotated.
rng: PRNGKey used for rotation.
Returns:
Rotated matrix and the original shape.
"""
x_flat = jnp.reshape(x, [-1])
d = 2**math.ceil(math.log2(x_flat.size))
w = jnp.pad(x_flat, (0, d - x.size))
rademacher = jax.random.rademacher(rng, w.shape)
return walsh_hadamard_transform(w * rademacher) / jnp.sqrt(d), jnp.array(
x.shape)
def inverse_structured_rotation(x: jnp.ndarray, rng: PRNGKey,
original_shape: jnp.ndarray) -> jnp.ndarray:
"""Computes (HD)^(-1)(x)/sqrt(d).
Here where H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: rotated array, which needs to be unrotated.
rng: PRNGKey used for rotation.
original_shape: desired shape of the output.
Returns:
Output of (HD)^(-1)(x)/sqrt(d) with appropriate shape.
"""
rademacher = jax.random.rademacher(rng, x.shape)
w = walsh_hadamard_transform(x) * rademacher / jnp.sqrt(x.size)
original_size = jnp.prod(original_shape)
y_flat = w.take(jnp.arange(original_size))
return jnp.reshape(y_flat, original_shape)
def structured_rotation_pytree(params: Params,
rng: PRNGKey) -> Tuple[Params, Params]:
"""Applies strucuted rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
Returns:
Pytrees of rotated arrays and shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
rngs = jax.random.split(rng, len(leaves))
rotated_leaves = []
shapes = []
for l, r in zip(leaves, rngs):
leaf, shape = structured_rotation(l, r)
rotated_leaves.append(leaf)
shapes.append(shape)
rotated_pytree = jax.tree_util.tree_unflatten(tree_def, rotated_leaves)
original_shapes_pytree = jax.tree_util.tree_unflatten(tree_def, shapes)
return rotated_pytree, original_shapes_pytree
def inverse_structured_rotation_pytree(params: Params, rng: PRNGKey,
shapes: Params) -> Params:
"""Applies inverse structured rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
shapes: pytree of shapes to be rotated.
Returns:
Inversely rotated pytree whose arrays are specified by input shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
leaves_shapes, _ = jax.tree_util.tree_flatten(shapes)
rngs = jax.random.split(rng, len(leaves))
new_leaves = []
for l, r, shape in zip(leaves, rngs, leaves_shapes):
new_leaves.append(inverse_structured_rotation(l, r, shape))
return jax.tree_util.tree_unflatten(tree_def, new_leaves)
| 33.294118
| 80
| 0.687574
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Efficient Walsh-Hadamard transform in JAX."""
import math
from typing import Tuple, Union
import jax
import jax.numpy as jnp
import scipy
from fedjax.core.typing import PRNGKey, Params
@jax.jit
def walsh_hadamard_transform(
x: jnp.ndarray,
small_n: int = 2**7,
precision: Union[jax.lax.Precision, str] = 'highest') -> jnp.ndarray:
"""Efficient Walsh-Hadamard transform in JAX.
An accelerator friendly O(n log n) Walsh-Hadamard transform.
Args:
x: A vector. len(x) must be a power of 2.
small_n: Size to break x into. The default value is tuned on TPUv3. Must be
a power of 2 and > 1.
precision: Precision for general dot products.
Returns:
Transformed vector.
"""
if small_n <= 1:
raise ValueError(f'small_n must be > 1, got {small_n}')
# Let
# - A ⊗ B be the Kronecker product of A and B;
# - flat(X) be the vector obtained by flattening the rows of X of shape
# [M, N].
#
# We can show the following:
#
# (A ⊗ B^T) flat(X) = flat(A X B)
#
# Note that the Hadamard matrix H_{2^M 2^N} = H_{2^M} ⊗ H_{2^N}, and
# Hadamard matrices are symmetrical. Therefore, for a [2^M, 2^N] matrix X,
#
# H_{2^M 2^N} flat(X) = flat(H_{2^M} X H_{2^N})
#
# The idea can be generalized by breaking a Hadamard matrix into the Kronecker
# product of many small Hadamard matrices, and reshaping the vector input into
# a many-dimensional array, and running einsum on each dimension.
#
# Let the input vector be of length D, because our "small" Hadamard matrices
# are of size at most small_n x small_n, a constant, each einsum is O(D). We
# need to run log D einsums, thus the overall time complexity is O(D log D),
# same as the classical divide and conquer algorithm.
#
# However, thanks to efficient software & hardware implementations of einsum,
# we can often achieve far better speed than the classical algorithm on
# accelerators, at the same time producing a far simpler XLA HLO graph.
n = len(x)
# Find out the shape to reshape x into.
shape = []
while n > 1:
shape.append(min(n, small_n))
n //= small_n
shape.reverse()
num_dims = len(shape)
if num_dims + 1 >= 10:
# We will run out of dimension names in einsums.
raise ValueError(f'small_n={small_n} is too small for input size {n}')
y = x.reshape(shape)
# Hadamard matrices we will need.
hadamards = dict((d, hadamard_matrix(d, x.dtype)) for d in set(shape))
# einsum on each dimension.
for i, d in enumerate(shape):
y_dims = ''.join(str(j) for j in range(num_dims))
h_dims = f'{i}{num_dims + 1}'
out_dims = y_dims.replace(str(i), str(num_dims + 1), 1)
operands = f'{y_dims},{h_dims}->{out_dims}'
y = jnp.einsum(operands, y, hadamards[d], precision=precision)
return y.flatten()
def hadamard_matrix(n: int, dtype: jnp.dtype) -> jnp.ndarray:
"""Generates the Hadamard matrix.
Because there are JAX dtypes not supported in numpy, the equivalent function
in scipy can't be used directly.
Args:
n: Number of rows/columns of the Hadamard matrix. Must be a power of 2.
dtype: Output dtype.
Returns:
The Hadamard matrix of the given size and type.
"""
return jnp.array(scipy.linalg.hadamard(n), dtype)
@jax.jit
def structured_rotation(x: jnp.ndarray,
rng: PRNGKey) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes HD(x)/sqrt(d).
Here H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: array to be rotated.
rng: PRNGKey used for rotation.
Returns:
Rotated matrix and the original shape.
"""
x_flat = jnp.reshape(x, [-1])
d = 2**math.ceil(math.log2(x_flat.size))
w = jnp.pad(x_flat, (0, d - x.size))
rademacher = jax.random.rademacher(rng, w.shape)
return walsh_hadamard_transform(w * rademacher) / jnp.sqrt(d), jnp.array(
x.shape)
def inverse_structured_rotation(x: jnp.ndarray, rng: PRNGKey,
original_shape: jnp.ndarray) -> jnp.ndarray:
"""Computes (HD)^(-1)(x)/sqrt(d).
Here where H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: rotated array, which needs to be unrotated.
rng: PRNGKey used for rotation.
original_shape: desired shape of the output.
Returns:
Output of (HD)^(-1)(x)/sqrt(d) with appropriate shape.
"""
rademacher = jax.random.rademacher(rng, x.shape)
w = walsh_hadamard_transform(x) * rademacher / jnp.sqrt(x.size)
original_size = jnp.prod(original_shape)
y_flat = w.take(jnp.arange(original_size))
return jnp.reshape(y_flat, original_shape)
def structured_rotation_pytree(params: Params,
rng: PRNGKey) -> Tuple[Params, Params]:
"""Applies strucuted rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
Returns:
Pytrees of rotated arrays and shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
rngs = jax.random.split(rng, len(leaves))
rotated_leaves = []
shapes = []
for l, r in zip(leaves, rngs):
leaf, shape = structured_rotation(l, r)
rotated_leaves.append(leaf)
shapes.append(shape)
rotated_pytree = jax.tree_util.tree_unflatten(tree_def, rotated_leaves)
original_shapes_pytree = jax.tree_util.tree_unflatten(tree_def, shapes)
return rotated_pytree, original_shapes_pytree
def inverse_structured_rotation_pytree(params: Params, rng: PRNGKey,
shapes: Params) -> Params:
"""Applies inverse structured rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
shapes: pytree of shapes to be rotated.
Returns:
Inversely rotated pytree whose arrays are specified by input shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
leaves_shapes, _ = jax.tree_util.tree_flatten(shapes)
rngs = jax.random.split(rng, len(leaves))
new_leaves = []
for l, r, shape in zip(leaves, rngs, leaves_shapes):
new_leaves.append(inverse_structured_rotation(l, r, shape))
return jax.tree_util.tree_unflatten(tree_def, new_leaves)
| 0
| 0
| 0
|
4638dc42bbd3edbdee5a59f874300563a19d6c35
| 939
|
py
|
Python
|
sweeper/tests/test_scheduler.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/tests/test_scheduler.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/tests/test_scheduler.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
import sweeper.utils as utils
import unittest
from pprint import PrettyPrinter
from scheduler.manager import create_schedule_plan
from sweeper import Workflow
pp = PrettyPrinter(indent=1)
if __name__ == '__main__':
unittest.main()
| 33.535714
| 96
| 0.759318
|
import sweeper.utils as utils
import unittest
from pprint import PrettyPrinter
from scheduler.manager import create_schedule_plan
from sweeper import Workflow
pp = PrettyPrinter(indent=1)
class SchedulerTest(unittest.TestCase):
def test(self):
wf = Workflow.read_workflow('examples/weird/workflow.yaml')
sched_plan = create_schedule_plan(wf)
utils.save_gantt_chart_data(sched_plan.schedule_mapping_list, filename='weird.csv')
wf = Workflow.read_workflow('examples/multicore/workflow.yaml')
sched_plan = create_schedule_plan(wf)
utils.save_gantt_chart_data(sched_plan.schedule_mapping_list, filename='multicore.csv')
wf = Workflow.read_workflow('examples/multilayer/workflow.yaml')
sched_plan = create_schedule_plan(wf)
utils.save_gantt_chart_data(sched_plan.schedule_mapping_list, filename='multilayer.csv')
if __name__ == '__main__':
unittest.main()
| 632
| 18
| 49
|
3602939acb4e7b232af47d615a2aed96778a8add
| 167
|
py
|
Python
|
REST API - Tensorflow/config/config.py
|
mauryas/DataScienceTasks
|
78cd4c47067101128de668a641b999d6fb406ab8
|
[
"MIT"
] | null | null | null |
REST API - Tensorflow/config/config.py
|
mauryas/DataScienceTasks
|
78cd4c47067101128de668a641b999d6fb406ab8
|
[
"MIT"
] | null | null | null |
REST API - Tensorflow/config/config.py
|
mauryas/DataScienceTasks
|
78cd4c47067101128de668a641b999d6fb406ab8
|
[
"MIT"
] | null | null | null |
BATCH_SIZE = 128
NUM_CLASSES = 10
EPOCHS = 20
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
# set if false if you want to use trained weights
TO_TRAIN = True
| 16.7
| 49
| 0.742515
|
BATCH_SIZE = 128
NUM_CLASSES = 10
EPOCHS = 20
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
# set if false if you want to use trained weights
TO_TRAIN = True
| 0
| 0
| 0
|
047a54b42de14b71ab0ea40349265e16014ece76
| 1,782
|
py
|
Python
|
exam/test_mcmc.py
|
odell/pyazr
|
3ea6a1f28b034fb56527b1d5f33b544ea3e53893
|
[
"MIT"
] | null | null | null |
exam/test_mcmc.py
|
odell/pyazr
|
3ea6a1f28b034fb56527b1d5f33b544ea3e53893
|
[
"MIT"
] | null | null | null |
exam/test_mcmc.py
|
odell/pyazr
|
3ea6a1f28b034fb56527b1d5f33b544ea3e53893
|
[
"MIT"
] | null | null | null |
'''
Calculates the 13C(a,n) cross section
"Free" parameters:
* partial width BGP (1/2+, neutron)
* level energy (3/2+)
* partial width (3/2+, neutron)
* partial width (3/2+, alpha)
'''
import os
import sys
from multiprocessing import Pool
import emcee
import numpy as np
from scipy import stats
import model
########################################
# We'll set up the sampler and get it started.
nw = 4*model.nd # number of walkers = 4 * number of sampled parameters
# Pick a point (theta) in parameter space around which we'll start each walker.
theta0 = [1.87, 2.3689, 35000, -0.61, 3.5002, 57500, -0.67, 3.5451, 45200,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# Each walkers needs its own starting position.
p0 = np.zeros((nw, model.nd))
for i in range(nw):
mu = theta0
sig = np.abs(theta0) * 0.01
p0[i, :] = stats.norm(mu, sig).rvs()
# We'll store the chain in test_mcmc.h5. (See emcee Backends documentation.)
backend = emcee.backends.HDFBackend('test_mcmc.h5')
backend.reset(nw, model.nd)
nsteps = 1000 # How many steps should each walker take?
nthin = 10 # How often should the walker save a step?
nprocs = 4 # How many Python processes do you want to allocate?
# AZURE2 and emcee are both parallelized. We'll restrict AZURE2 to 1 thread to
# simplify things.
os.environ['OMP_NUM_THREADS'] = '1'
# emcee allows the user to specify the way the ensemble generates proposals.
moves = [(emcee.moves.DESnookerMove(), 0.8), (emcee.moves.DEMove(), 0.2)]
with Pool(processes=nprocs) as pool:
sampler = emcee.EnsembleSampler(nw, model.nd, model.lnP, moves=moves, pool=pool,
backend=backend)
state = sampler.run_mcmc(p0, nsteps, thin_by=nthin, progress=True, tune=True)
| 33.622642
| 84
| 0.665544
|
'''
Calculates the 13C(a,n) cross section
"Free" parameters:
* partial width BGP (1/2+, neutron)
* level energy (3/2+)
* partial width (3/2+, neutron)
* partial width (3/2+, alpha)
'''
import os
import sys
from multiprocessing import Pool
import emcee
import numpy as np
from scipy import stats
import model
########################################
# We'll set up the sampler and get it started.
nw = 4*model.nd # number of walkers = 4 * number of sampled parameters
# Pick a point (theta) in parameter space around which we'll start each walker.
theta0 = [1.87, 2.3689, 35000, -0.61, 3.5002, 57500, -0.67, 3.5451, 45200,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# Each walkers needs its own starting position.
p0 = np.zeros((nw, model.nd))
for i in range(nw):
mu = theta0
sig = np.abs(theta0) * 0.01
p0[i, :] = stats.norm(mu, sig).rvs()
# We'll store the chain in test_mcmc.h5. (See emcee Backends documentation.)
backend = emcee.backends.HDFBackend('test_mcmc.h5')
backend.reset(nw, model.nd)
nsteps = 1000 # How many steps should each walker take?
nthin = 10 # How often should the walker save a step?
nprocs = 4 # How many Python processes do you want to allocate?
# AZURE2 and emcee are both parallelized. We'll restrict AZURE2 to 1 thread to
# simplify things.
os.environ['OMP_NUM_THREADS'] = '1'
# emcee allows the user to specify the way the ensemble generates proposals.
moves = [(emcee.moves.DESnookerMove(), 0.8), (emcee.moves.DEMove(), 0.2)]
with Pool(processes=nprocs) as pool:
sampler = emcee.EnsembleSampler(nw, model.nd, model.lnP, moves=moves, pool=pool,
backend=backend)
state = sampler.run_mcmc(p0, nsteps, thin_by=nthin, progress=True, tune=True)
| 0
| 0
| 0
|
40a3cc3ff516f442d1318816c63a4b6b1747419f
| 886
|
py
|
Python
|
tests/acceptance/test_FuzzerHeaders.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | 6
|
2017-10-11T18:56:05.000Z
|
2019-09-29T21:45:05.000Z
|
tests/acceptance/test_FuzzerHeaders.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | 3
|
2021-03-31T19:17:30.000Z
|
2021-12-13T20:16:23.000Z
|
tests/acceptance/test_FuzzerHeaders.py
|
AntonKuzminRussia/web-scout
|
5b8fed2c5917c9ecc210052703a65f1204f4b347
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import time
import re
runPath = os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + '/../../')
| 27.6875
| 82
| 0.602709
|
import subprocess
import os
import time
import re
runPath = os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + '/../../')
class Test_FuzzerHeaders(object):
dict_path = '/tmp/wstest.dict'
headers_file_path = '/tmp/wstest.headers_file'
conf_file_path = "/tmp/wstest.conf_file"
def get_results_count(self, output):
return len(re.findall('^(\t.+)', output, re.M))
def test_run(self):
fh = open(self.dict_path, 'w')
fh.write("http://wsat.local/fuzzer-headers.php")
fh.close()
output = subprocess.check_output([
'./ws.py',
'FuzzerHeaders',
'--urls-file',
self.dict_path,
])
print(output)
output = output.decode('utf8')
assert self.get_results_count(output) == 1
assert output.count("http://wsat.local/fuzzer-headers.php") == 1
| 531
| 197
| 23
|
938061deb839871bf82a14d5f17af8030ecc81be
| 1,070
|
py
|
Python
|
benchmarks/test_memory.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 900
|
2019-04-11T01:52:10.000Z
|
2020-09-02T11:09:14.000Z
|
benchmarks/test_memory.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 172
|
2019-04-15T18:04:55.000Z
|
2020-09-01T15:20:06.000Z
|
benchmarks/test_memory.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 28
|
2019-04-11T02:49:04.000Z
|
2020-08-27T09:47:49.000Z
|
from wasmer import engine, wat2wasm, Store, Module, Instance
from wasmer_compiler_cranelift import Compiler
TEST_BYTES = wat2wasm(
"""
(module
(memory 16)
(export "memory" (memory 0)))
"""
)
| 24.318182
| 60
| 0.686916
|
from wasmer import engine, wat2wasm, Store, Module, Instance
from wasmer_compiler_cranelift import Compiler
TEST_BYTES = wat2wasm(
"""
(module
(memory 16)
(export "memory" (memory 0)))
"""
)
def test_benchmark_memory_view_int8_get(benchmark):
store = Store(engine.JIT(Compiler))
module = Module(store, TEST_BYTES)
instance = Instance(module)
memory = instance.exports.memory.uint8_view()
def bench():
_ = memory[0]
benchmark(bench)
def test_benchmark_memory_view_memoryview_get(benchmark):
store = Store(engine.JIT(Compiler))
module = Module(store, TEST_BYTES)
instance = Instance(module)
memory = memoryview(instance.exports.memory.buffer)
def bench():
_ = memory[0]
benchmark(bench)
def test_benchmark_memory_view_bytearray_get(benchmark):
store = Store(engine.JIT(Compiler))
module = Module(store, TEST_BYTES)
instance = Instance(module)
memory = bytearray(instance.exports.memory.buffer)
def bench():
_ = memory[0]
benchmark(bench)
| 781
| 0
| 69
|
cde51dd207e4af6daac2687ccc96ff083f646eea
| 84
|
py
|
Python
|
min_cost.py
|
shahjaidev/xbin_gemini_baselines
|
9476ed3d3877f911368b9156ca1d8801a5fc7307
|
[
"BSD-3-Clause"
] | null | null | null |
min_cost.py
|
shahjaidev/xbin_gemini_baselines
|
9476ed3d3877f911368b9156ca1d8801a5fc7307
|
[
"BSD-3-Clause"
] | 4
|
2020-09-25T22:40:36.000Z
|
2022-02-09T23:38:58.000Z
|
min_cost.py
|
shahjaidev/xbin_gemini_baselines
|
9476ed3d3877f911368b9156ca1d8801a5fc7307
|
[
"BSD-3-Clause"
] | null | null | null |
A[p]= max( A[i]+ A[i:j]+ f(j,p) + f(i,p) ) +f(1,p)
for p in range(N):
| 7.636364
| 52
| 0.369048
|
A[p]= max( A[i]+ A[i:j]+ f(j,p) + f(i,p) ) +f(1,p)
for p in range(N):
| 0
| 0
| 0
|
54a1ba6b641654089ce1618dad8a300fe120c52f
| 586
|
py
|
Python
|
remove.py
|
bopde/Texter
|
5a23a9b8c14a5abb12eac70fabe0e9c6d193ccba
|
[
"MIT"
] | null | null | null |
remove.py
|
bopde/Texter
|
5a23a9b8c14a5abb12eac70fabe0e9c6d193ccba
|
[
"MIT"
] | 1
|
2020-09-06T03:53:52.000Z
|
2020-09-06T03:53:52.000Z
|
remove.py
|
bopde/Texter
|
5a23a9b8c14a5abb12eac70fabe0e9c6d193ccba
|
[
"MIT"
] | 1
|
2020-09-06T03:51:44.000Z
|
2020-09-06T03:51:44.000Z
|
if __name__ == '__main__':
remove_lines()
print ("done")
| 25.478261
| 68
| 0.607509
|
def count_lines(file='no_line_script.txt'):
with open(file, 'r') as file:
for index, line in enumerate(file):
pass
file.close()
return index + 1
def remove_lines(original='mormon.txt', empty='no_line_script.txt'):
script = open(original, 'r')
empty_script = open(empty, 'w')
script_length = count_lines(original)
for i in range(script_length):
line = script.readline()
if line.replace(" ","") not in ['\n','\r\n']:
empty_script.write(line)
if __name__ == '__main__':
remove_lines()
print ("done")
| 473
| 0
| 46
|
7d096c77e2bec49b6480167c88cbec8fe681cdcf
| 1,689
|
py
|
Python
|
mushroom_rl/utils/pybullet/joints_helper.py
|
PuzeLiu/mushroom-rl
|
99942b425e66b4ddcc26009d7105dde23841e95d
|
[
"MIT"
] | 344
|
2020-01-10T09:45:02.000Z
|
2022-03-30T09:48:28.000Z
|
mushroom_rl/utils/pybullet/joints_helper.py
|
PuzeLiu/mushroom-rl
|
99942b425e66b4ddcc26009d7105dde23841e95d
|
[
"MIT"
] | 44
|
2020-01-23T03:00:56.000Z
|
2022-03-25T17:14:22.000Z
|
mushroom_rl/utils/pybullet/joints_helper.py
|
PuzeLiu/mushroom-rl
|
99942b425e66b4ddcc26009d7105dde23841e95d
|
[
"MIT"
] | 93
|
2020-01-10T21:17:58.000Z
|
2022-03-31T17:58:52.000Z
|
import numpy as np
from .observation import PyBulletObservationType
| 37.533333
| 69
| 0.680876
|
import numpy as np
from .observation import PyBulletObservationType
class JointsHelper(object):
def __init__(self, client, indexer, observation_spec):
self._joint_pos_indexes = list()
self._joint_velocity_indexes = list()
joint_limits_low = list()
joint_limits_high = list()
joint_velocity_limits = list()
for joint_name, obs_type in observation_spec:
joint_idx = indexer.get_index(joint_name, obs_type)
if obs_type == PyBulletObservationType.JOINT_VEL:
self._joint_velocity_indexes.append(joint_idx[0])
model_id, joint_id = indexer.joint_map[joint_name]
joint_info = client.getJointInfo(model_id, joint_id)
joint_velocity_limits.append(joint_info[11])
elif obs_type == PyBulletObservationType.JOINT_POS:
self._joint_pos_indexes.append(joint_idx[0])
model_id, joint_id = indexer.joint_map[joint_name]
joint_info = client.getJointInfo(model_id, joint_id)
joint_limits_low.append(joint_info[8])
joint_limits_high.append(joint_info[9])
self._joint_limits_low = np.array(joint_limits_low)
self._joint_limits_high = np.array(joint_limits_high)
self._joint_velocity_limits = np.array(joint_velocity_limits)
def positions(self, state):
return state[self._joint_pos_indexes]
def velocities(self, state):
return state[self._joint_velocity_indexes]
def limits(self):
return self._joint_limits_low, self._joint_limits_high
def velocity_limits(self):
return self._joint_velocity_limits
| 1,456
| 6
| 157
|
1cfba7204b23ca9912e9c8b564ad860ef2ead703
| 2,622
|
py
|
Python
|
plenum/server/consensus/replica_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/server/consensus/replica_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/server/consensus/replica_service.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from plenum.server.replica_freshness_checker import FreshnessChecker
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.messages.node_messages import Checkpoint
from plenum.common.stashing_router import StashingRouter
from plenum.common.timer import TimerService
from plenum.server.consensus.checkpoint_service import CheckpointService
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.view_change_service import ViewChangeService
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.test.testing_utils import FakeSomething
class ReplicaService:
"""
This is a wrapper consensus-related services. Now it is intended mostly for
simulation tests, however in future it can replace actual Replica in plenum.
"""
| 52.44
| 102
| 0.670481
|
from typing import List
from plenum.server.replica_freshness_checker import FreshnessChecker
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.messages.node_messages import Checkpoint
from plenum.common.stashing_router import StashingRouter
from plenum.common.timer import TimerService
from plenum.server.consensus.checkpoint_service import CheckpointService
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.view_change_service import ViewChangeService
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.test.testing_utils import FakeSomething
class ReplicaService:
"""
This is a wrapper consensus-related services. Now it is intended mostly for
simulation tests, however in future it can replace actual Replica in plenum.
"""
def __init__(self, name: str, validators: List[str], primary_name: str,
timer: TimerService, bus: InternalBus, network: ExternalBus,
write_manager: WriteRequestManager,
bls_bft_replica: BlsBftReplica=None):
self._data = ConsensusSharedData(name, validators, 0)
self._data.primary_name = primary_name
config = getConfig()
stasher = StashingRouter(config.REPLICA_STASH_LIMIT, buses=[bus, network])
self._orderer = OrderingService(data=self._data,
timer=timer,
bus=bus,
network=network,
write_manager=write_manager,
bls_bft_replica=bls_bft_replica,
freshness_checker=FreshnessChecker(
freshness_timeout=config.STATE_FRESHNESS_UPDATE_INTERVAL),
stasher=stasher)
self._checkpointer = CheckpointService(self._data, bus, network, stasher,
write_manager.database_manager)
self._view_changer = ViewChangeService(self._data, timer, bus, network, stasher)
# TODO: This is just for testing purposes only
self._data.checkpoints.append(
Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0,
digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))
| 1,544
| 0
| 27
|
75bd145742ea44d33d3e21c667730b8c00552cde
| 126
|
py
|
Python
|
verify/checker/agc050/d.py
|
naskya/testcase-generator
|
02765184a275152e1d8c177f2028ca8db315cfee
|
[
"MIT"
] | 4
|
2020-09-23T07:11:41.000Z
|
2022-02-02T09:08:21.000Z
|
verify/checker/agc050/d.py
|
naskya/testcase-generator
|
02765184a275152e1d8c177f2028ca8db315cfee
|
[
"MIT"
] | 5
|
2021-08-29T18:23:01.000Z
|
2021-11-20T03:53:19.000Z
|
verify/checker/agc050/d.py
|
naskya/testcase-generator
|
02765184a275152e1d8c177f2028ca8db315cfee
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 15.75
| 36
| 0.492063
|
def main() -> None:
N, K = map(int, input().split())
assert 1 <= K <= N <= 40
if __name__ == '__main__':
main()
| 64
| 0
| 22
|
08a9caf7d6dc3a83ef5b08f80bf1f2c48969b19c
| 15,333
|
py
|
Python
|
tmglow/utils/parallel.py
|
zabaras/deep-turbulence
|
0daca5daada449d4ba16bce37b703e20b444b6bc
|
[
"MIT"
] | 20
|
2020-12-01T14:58:01.000Z
|
2022-03-15T07:40:10.000Z
|
tmglow/utils/parallel.py
|
zabaras/deep-turbulence
|
0daca5daada449d4ba16bce37b703e20b444b6bc
|
[
"MIT"
] | 2
|
2021-06-05T14:29:42.000Z
|
2022-03-04T15:57:40.000Z
|
tmglow/utils/parallel.py
|
zabaras/deep-turbulence
|
0daca5daada449d4ba16bce37b703e20b444b6bc
|
[
"MIT"
] | 4
|
2020-09-04T06:11:04.000Z
|
2021-09-05T10:47:16.000Z
|
'''
Utilities for training TM-Glow in parallel as well as calculating
the loss in parallel on different GPUs for memory purposes.
Original Implementation by Zhang, Rutgers University
https://medium.com/huggingface/training-larger-batches-practical-tips-on-1-gpu-multi-gpu-distributed-setups-ec88c3e51255
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097
doi: https://dx.doi.org/10.3934/fods.2020019
github: https://github.com/zabaras/deep-turbulence
=====
'''
import threading
import functools
from itertools import chain
from typing import Optional
import torch
from torch.autograd import Variable, Function
import torch.cuda.comm as comm
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from torch._utils import ExceptionWrapper
from torch.cuda._utils import _get_device_index
torch_ver = torch.__version__[:3]
__all__ = ['allreduce', 'DataParallelCriterion']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class DataParallelINNModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
# def gather(self, outputs, output_device):
# return outputs
def inn_parallel_apply(modules, inputs, kwargs_tup=None, devices=None, forward=True):
r"""Applies each `module` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
# Start thread for each GPU worker
# Distribute scattered inputs and arguements to each GPU
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage.
The targets are splitted across the specified devices by chunking in
the batch dimension.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
| 42.239669
| 120
| 0.640579
|
'''
Utilities for training TM-Glow in parallel as well as calculating
the loss in parallel on different GPUs for memory purposes.
Original Implementation by Zhang, Rutgers University
https://medium.com/huggingface/training-larger-batches-practical-tips-on-1-gpu-multi-gpu-distributed-setups-ec88c3e51255
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097
doi: https://dx.doi.org/10.3934/fods.2020019
github: https://github.com/zabaras/deep-turbulence
=====
'''
import threading
import functools
from itertools import chain
from typing import Optional
import torch
from torch.autograd import Variable, Function
import torch.cuda.comm as comm
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from torch._utils import ExceptionWrapper
from torch.cuda._utils import _get_device_index
torch_ver = torch.__version__[:3]
__all__ = ['allreduce', 'DataParallelCriterion']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class AllReduce(Function):
@staticmethod
def forward(ctx, num_inputs, *inputs):
ctx.num_inputs = num_inputs
ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
inputs = [inputs[i:i + num_inputs]
for i in range(0, len(inputs), num_inputs)]
# sort before reduce sum
inputs = sorted(inputs, key=lambda i: i[0].get_device())
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *inputs):
inputs = [i.data for i in inputs]
inputs = [inputs[i:i + ctx.num_inputs]
for i in range(0, len(inputs), ctx.num_inputs)]
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
class Reduce(Function):
@staticmethod
def forward(ctx, *inputs):
ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
inputs = sorted(inputs, key=lambda i: i.get_device())
return comm.reduce_add(inputs)
@staticmethod
def backward(ctx, gradOutput):
return Broadcast.apply(ctx.target_gpus, gradOutput)
class DataParallelINNModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if not 'forward' in kwargs.keys():
forward = True
else:
forward = kwargs['forward']
del kwargs['forward']
# Model needs to be on the source device!
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
# Returns a tuple of inputs for each GPU
# If an interable object is given, it will be recurrsively searched for a tensor or data object
# All tensors are split in the batch (0) dimension which other types are simply copied between GPUs
# See: pytorch/torch/nn/parallel/scatter_gather.py
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
# If there is only 1 GPU, no need to do anything fancy, just execute the model
if len(self.device_ids) == 1:
if(forward):
return self.module(*inputs[0], **kwargs[0])
else:
return self.module.sample(*inputs[0], **kwargs[0])
# Replicate model's from the source device to all GPUs
# replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# Model forward or sample in parallel
# Outpus are a list of tupled outputs per GPU
outputs = self.parallel_apply(self.replicas, inputs, kwargs, forward=forward)
# Gather output (This is overridden so outputs are NOT gathered)
# This is because we want to compute the loss on the GPU.
return outputs
def scatterModel(self, n_gpu:Optional[int] = None):
if not self.device_ids:
return
if n_gpu is None:
n_gpu = len(self.device_ids)
# Model needs to be on the source device!
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
# Replicate model's from the source device to all GPUs
self.replicas = self.replicate(self.module, self.device_ids[:n_gpu])
def scatterRecurrentStates(self, recFeatures):
recFeatures, _ = self.scatter(recFeatures, None, self.device_ids)
return recFeatures
def gatherLSTMStates(self, *inputs):
return gather(*inputs, self.src_device_obj)
def sample(self, *inputs, **kwargs):
kwargs['forward'] = False
return self.forward(*inputs, **kwargs)
# def gather(self, outputs, output_device):
# return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelINNModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def parallel_apply(self, replicas, inputs, kwargs, forward=True):
return inn_parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)], forward)
def inn_parallel_apply(modules, inputs, kwargs_tup=None, devices=None, forward=True):
r"""Applies each `module` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
if forward:
output = module(*input, **kwargs)
else:
output = module.sample(*input, **kwargs)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(
where="in replica {} on device {}".format(i, device))
# Start thread for each GPU worker
# Distribute scattered inputs and arguements to each GPU
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage.
The targets are splitted across the specified devices by chunking in
the batch dimension.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def forward(self, inputs, *targets, **kwargs):
# input should be already scattered
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids[:len(inputs)])
if len(self.device_ids) == 1:
return self.module(*(inputs + targets[0]), **kwargs[0])
# If the loss class is a nn.Module, replicate it.
# If its just an object class, use scatter.
# nn.Module should be used for a loss if there is device specific components needed for it
if(isinstance(self.module, torch.nn.Module)):
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
else:
replicas,_ = self.scatter(self.module, kwargs, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
#return Reduce.apply(*outputs) / len(outputs)
#return self.gather(outputs, self.output_device).mean()
# return self.gather(outputs, self.output_device)
# No gather
return outputs
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
if torch_ver != "0.3":
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
if torch_ver != "0.3":
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
if not isinstance(target, (list, tuple)):
target = (target,)
output = module(*(input + target), **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device),)
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
| 8,347
| 203
| 337
|
f16f4e21ca23bc2e9efafc489ea9440792f4eb81
| 692
|
py
|
Python
|
scripts/b2/attack.py
|
wwu-pdx/web-ctf
|
82774af40f613e99568d44cd83ea564e4ee68711
|
[
"MIT"
] | null | null | null |
scripts/b2/attack.py
|
wwu-pdx/web-ctf
|
82774af40f613e99568d44cd83ea564e4ee68711
|
[
"MIT"
] | null | null | null |
scripts/b2/attack.py
|
wwu-pdx/web-ctf
|
82774af40f613e99568d44cd83ea564e4ee68711
|
[
"MIT"
] | null | null | null |
import requests
#possible creds generated by level
#one of them is valid for one of your web app
from credentials import creds
#url='http://YOUR_INTERNAL_IP/login'
urls=['http://10.138.0.58/login', 'http://10.138.0.59/login','http://10.138.0.60/login']
for url in urls:
for u in creds:
#prepare data for post request
payload={'username':u,'password':creds[u]}
#send username and password through post method to web app url
post=requests.Session().post(url, data=payload)
#check if respond text contains invalid credentails
if 'Invalid credentials' not in post.text:
#print valid username and password
print(u+' '+creds[u]+' ' + url )
| 27.68
| 88
| 0.683526
|
import requests
#possible creds generated by level
#one of them is valid for one of your web app
from credentials import creds
#url='http://YOUR_INTERNAL_IP/login'
urls=['http://10.138.0.58/login', 'http://10.138.0.59/login','http://10.138.0.60/login']
for url in urls:
for u in creds:
#prepare data for post request
payload={'username':u,'password':creds[u]}
#send username and password through post method to web app url
post=requests.Session().post(url, data=payload)
#check if respond text contains invalid credentails
if 'Invalid credentials' not in post.text:
#print valid username and password
print(u+' '+creds[u]+' ' + url )
| 0
| 0
| 0
|
24ce65a8b75a7d0851bf08b90abe4613dff6d004
| 508
|
py
|
Python
|
Diena_8_dictionaries/d8_s28_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 8
|
2020-08-31T16:10:54.000Z
|
2021-11-24T06:37:37.000Z
|
Diena_8_dictionaries/d8_s28_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 8
|
2021-06-08T22:30:29.000Z
|
2022-03-12T00:48:55.000Z
|
Diena_8_dictionaries/d8_s28_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 12
|
2020-09-28T17:06:52.000Z
|
2022-02-17T12:12:46.000Z
|
from collections import Counter
text = "hubba bubba"
# def get_char_count(text):
# letters = {}
# for letter in text:
# letters[letter] = text.count(letter) # hidden loop in count
# return letters
print(get_char_count(text))
count = Counter(text)
print(count)
print(count.most_common())
| 20.32
| 74
| 0.596457
|
from collections import Counter
text = "hubba bubba"
# def get_char_count(text):
# letters = {}
# for letter in text:
# letters[letter] = text.count(letter) # hidden loop in count
# return letters
def get_char_count(text):
res_dict = {}
for c in text:
if c in res_dict:
res_dict[c] += 1
else:
res_dict[c] = 1
return res_dict
print(get_char_count(text))
count = Counter(text)
print(count)
print(count.most_common())
| 159
| 0
| 23
|
5a6aa7950ba84c32fc870d83a6d609d56f3f0e05
| 3,665
|
py
|
Python
|
unit_tests/test_file_accessor.py
|
LIN810116/neuroglancer-scripts
|
803768d0d550261efa508896eabf55f964cb16c9
|
[
"MIT"
] | 20
|
2017-09-19T10:18:06.000Z
|
2022-02-28T23:08:48.000Z
|
unit_tests/test_file_accessor.py
|
stephen-zhouyang/neuroglancer-scripts
|
3aa2591f3c9710fa0dae930e6270a3397345bf09
|
[
"MIT"
] | 18
|
2017-09-27T11:20:22.000Z
|
2022-02-08T09:12:18.000Z
|
unit_tests/test_file_accessor.py
|
stephen-zhouyang/neuroglancer-scripts
|
3aa2591f3c9710fa0dae930e6270a3397345bf09
|
[
"MIT"
] | 15
|
2017-03-17T12:56:28.000Z
|
2022-02-16T17:59:58.000Z
|
# Copyright (c) 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import pathlib
import pytest
from neuroglancer_scripts.file_accessor import FileAccessor
from neuroglancer_scripts.accessor import (
DataAccessError,
)
@pytest.mark.parametrize("flat", [False, True])
@pytest.mark.parametrize("gzip", [False, True])
| 35.931373
| 73
| 0.690859
|
# Copyright (c) 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import pathlib
import pytest
from neuroglancer_scripts.file_accessor import FileAccessor
from neuroglancer_scripts.accessor import (
DataAccessError,
)
@pytest.mark.parametrize("flat", [False, True])
@pytest.mark.parametrize("gzip", [False, True])
def test_file_accessor_roundtrip(tmpdir, gzip, flat):
a = FileAccessor(str(tmpdir), gzip=gzip, flat=flat)
fake_info = b'{"scales": [{"key": "key"}]}'
fake_chunk_buf = b"d a t a"
chunk_coords = (0, 1, 0, 1, 0, 1)
a.store_file("info", fake_info, mime_type="application/json")
assert a.fetch_file("info") == fake_info
a.store_chunk(fake_chunk_buf, "key", chunk_coords,
mime_type="application/octet-stream")
assert a.fetch_chunk("key", chunk_coords) == fake_chunk_buf
chunk_coords2 = (0, 1, 0, 1, 1, 2)
a.store_chunk(fake_chunk_buf, "key", chunk_coords2,
mime_type="image/jpeg")
assert a.fetch_chunk("key", chunk_coords2) == fake_chunk_buf
def test_file_accessor_file_exists(tmpdir):
a = FileAccessor(str(tmpdir))
assert a.file_exists("nonexistent_file") is False
(tmpdir / "real_file").open("w") # create an empty file
assert a.file_exists("real_file") is True
assert a.file_exists("nonexistent_dir/file") is False
def test_file_accessor_nonexistent_directory():
a = FileAccessor("/nonexistent/directory")
with pytest.raises(DataAccessError):
a.fetch_file("info")
with pytest.raises(DataAccessError):
a.store_file("info", b"")
chunk_coords = (0, 1, 0, 1, 0, 1)
with pytest.raises(DataAccessError):
a.fetch_chunk("key", chunk_coords)
with pytest.raises(DataAccessError):
a.store_chunk(b"", "key", chunk_coords)
def test_file_accessor_errors(tmpdir):
# tmpdir from pytest is missing features of pathlib
tmpdir = pathlib.Path(str(tmpdir))
a = FileAccessor(str(tmpdir))
chunk_coords = (0, 1, 0, 1, 0, 1)
with pytest.raises(DataAccessError):
a.fetch_file("info")
with pytest.raises(DataAccessError):
a.fetch_chunk("key", chunk_coords)
inaccessible_file = tmpdir / "inaccessible"
inaccessible_file.touch(mode=0o000, exist_ok=False)
with pytest.raises(DataAccessError):
a.fetch_file("inaccessible")
inaccessible_chunk = tmpdir / "inaccessible_key" / "0-1_0-1_0-1"
inaccessible_chunk.parent.mkdir(mode=0o000)
with pytest.raises(DataAccessError):
a.fetch_chunk("inaccessible_key", chunk_coords)
with pytest.raises(DataAccessError):
a.store_chunk(b"", "inaccessible_key", chunk_coords)
with pytest.raises(DataAccessError):
a.file_exists("inaccessible_key/dummy")
with pytest.raises(DataAccessError):
a.store_file("inaccessible_key/dummy", b"")
# Allow pytest to remove tmpdir with os.rmtree
inaccessible_chunk.parent.chmod(mode=0o755)
invalid_gzip_file = tmpdir / "invalid.gz"
with invalid_gzip_file.open("w") as f:
f.write("not gzip compressed")
with pytest.raises(DataAccessError):
print(a.fetch_file("invalid"))
a.store_file("existing", b"")
with pytest.raises(DataAccessError):
a.store_file("existing", b"", overwrite=False)
a.store_file("existing", b"", overwrite=True)
with pytest.raises(ValueError):
a.file_exists("../forbidden")
with pytest.raises(ValueError):
a.fetch_file("../forbidden")
with pytest.raises(ValueError):
a.store_file("../forbidden", b"")
| 3,135
| 0
| 91
|
fc0bcb37fa10554fd99039b30774a06ba241b342
| 1,761
|
py
|
Python
|
public/shader/220303_1752.py
|
pome-ta/soundShader4twigl
|
abdb42fbda96981e8c2d71696f4f76049796ffad
|
[
"MIT"
] | null | null | null |
public/shader/220303_1752.py
|
pome-ta/soundShader4twigl
|
abdb42fbda96981e8c2d71696f4f76049796ffad
|
[
"MIT"
] | null | null | null |
public/shader/220303_1752.py
|
pome-ta/soundShader4twigl
|
abdb42fbda96981e8c2d71696f4f76049796ffad
|
[
"MIT"
] | null | null | null |
// シェーダー空手のやつ
//# https://thebookofshaders.com/05/kynd.png
#define BPM 90.0
const float PI = acos(-1.0);
const float TAU = PI * 2.0;
/* sound common */
float timeToBeat(float t) {return t / 60.0 * BPM;}
float beatToTime(float b) {return b / BPM * 60.0;}
float sine(float phase) {
return sin(TAU * phase);
}
float pitch(float scale) {
return 440.0 * pow(2.0, scale / 12.0);
}
vec2 mainSound(float time) {
float bpm = timeToBeat(time);
float tempo = sine((mod(bpm, 4.0) >= 1.0 ? 440.0 : 880.0) * time) * exp(-1e2 * fract(bpm));
float sound = 0.0;
//#float tone = sin( 6.2831 * 440.0 * time );
//#float env = fract(-bpm);
float f = fract(bpm);
float s = sin(PI * bpm / 2.0);
float tone = 0.0;
float env = 0.0;
//tone = sine(beatToTime(bpm) * pitch(0.0));
tone = sine(beatToTime(bpm) * 64.0);
env = 1.0 - pow(abs(s), 0.5);
//env = 1.0 - pow(abs(s), 1.0);
//env = 1.0 - pow(abs(s), 3.5);
//env = pow(cos(PI * s / 2.0), 0.5);
//env = pow(cos(PI * s / 2.0), 1.0);
//env = pow(cos(PI * s / 2.0), 3.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 0.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 1.0);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 3.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 0.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 1.0);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 3.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 0.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 1.0);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 3.5);
float w = smoothstep(1.0, -1.0, tan(bpm * PI));
env = sin(w * TAU);
sound += tone * env;
sound += tempo;
//#if (abs(sound) > 1.0) sound /= abs(sound);
return vec2(sound);
}
| 24.123288
| 93
| 0.508234
|
// シェーダー空手のやつ
//# https://thebookofshaders.com/05/kynd.png
#define BPM 90.0
const float PI = acos(-1.0);
const float TAU = PI * 2.0;
/* sound common */
float timeToBeat(float t) {return t / 60.0 * BPM;}
float beatToTime(float b) {return b / BPM * 60.0;}
float sine(float phase) {
return sin(TAU * phase);
}
float pitch(float scale) {
return 440.0 * pow(2.0, scale / 12.0);
}
vec2 mainSound(float time) {
float bpm = timeToBeat(time);
float tempo = sine((mod(bpm, 4.0) >= 1.0 ? 440.0 : 880.0) * time) * exp(-1e2 * fract(bpm));
float sound = 0.0;
//#float tone = sin( 6.2831 * 440.0 * time );
//#float env = fract(-bpm);
float f = fract(bpm);
float s = sin(PI * bpm / 2.0);
float tone = 0.0;
float env = 0.0;
//tone = sine(beatToTime(bpm) * pitch(0.0));
tone = sine(beatToTime(bpm) * 64.0);
env = 1.0 - pow(abs(s), 0.5);
//env = 1.0 - pow(abs(s), 1.0);
//env = 1.0 - pow(abs(s), 3.5);
//env = pow(cos(PI * s / 2.0), 0.5);
//env = pow(cos(PI * s / 2.0), 1.0);
//env = pow(cos(PI * s / 2.0), 3.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 0.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 1.0);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 3.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 0.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 1.0);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 3.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 0.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 1.0);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 3.5);
float w = smoothstep(1.0, -1.0, tan(bpm * PI));
env = sin(w * TAU);
sound += tone * env;
sound += tempo;
//#if (abs(sound) > 1.0) sound /= abs(sound);
return vec2(sound);
}
| 0
| 0
| 0
|
5b92ff2f553aa52e41855cc180f1b6e34b52287c
| 5,310
|
py
|
Python
|
moto/ebs/responses.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
moto/ebs/responses.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
moto/ebs/responses.py
|
andormarkus/moto
|
67cda6d7d6f42118ccd7e2170e7ff0a1f92fa6a6
|
[
"Apache-2.0"
] | null | null | null |
"""Handles incoming ebs requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import ebs_backends
class EBSResponse(BaseResponse):
"""Handler for EBS requests and responses."""
@property
def ebs_backend(self):
"""Return backend instance specific for this region."""
return ebs_backends[self.region]
def start_snapshot(self):
"""
The following parameters are not yet implemented: ParentSnapshotId, ClientToken, Encrypted, KmsKeyArn, Timeout
"""
params = json.loads(self.body)
volume_size = params.get("VolumeSize")
tags = params.get("Tags")
description = params.get("Description")
snapshot = self.ebs_backend.start_snapshot(
volume_size=volume_size,
tags=tags,
description=description,
)
return 200, {}, json.dumps(snapshot.to_json())
def complete_snapshot(self, request, full_url, headers):
"""
The following parameters are not yet supported: ChangedBlocksCount, Checksum, ChecksumAlgorithm, ChecksumAggregationMethod
"""
self.setup_class(request, full_url, headers)
snapshot_id = full_url.split("/")[-1]
status = self.ebs_backend.complete_snapshot(snapshot_id=snapshot_id)
return 200, {}, json.dumps(status)
def put_snapshot_block(self, full_url, headers):
"""
The following parameters are currently not taken into account: DataLength, Progress.
The Checksum and ChecksumAlgorithm are taken at face-value, but no validation takes place.
"""
snapshot_id = full_url.split("/")[-3]
block_index = full_url.split("/")[-1]
block_data = self.body
headers = {k.lower(): v for k, v in headers.items()}
checksum = headers.get("x-amz-checksum")
checksum_algorithm = headers.get("x-amz-checksum-algorithm")
data_length = headers.get("x-amz-data-length")
checksum, checksum_algorithm = self.ebs_backend.put_snapshot_block(
snapshot_id=snapshot_id,
block_index=block_index,
block_data=block_data,
checksum=checksum,
checksum_algorithm=checksum_algorithm,
data_length=data_length,
)
return (
200,
{
"x-amz-Checksum": checksum,
"x-amz-Checksum-Algorithm": checksum_algorithm,
},
"{}",
)
def list_snapshot_blocks(self):
"""
The following parameters are not yet implemented: NextToken, MaxResults, StartingBlockIndex
"""
snapshot_id = self.path.split("/")[-2]
snapshot = self.ebs_backend.list_snapshot_blocks(
snapshot_id=snapshot_id,
)
blocks = [
{"BlockIndex": idx, "BlockToken": b.block_token}
for idx, b in snapshot.blocks.items()
]
return (
200,
{},
json.dumps(
dict(
Blocks=blocks,
VolumeSize=snapshot.volume_size,
BlockSize=snapshot.block_size,
)
),
)
| 36.122449
| 130
| 0.593597
|
"""Handles incoming ebs requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import ebs_backends
class EBSResponse(BaseResponse):
"""Handler for EBS requests and responses."""
@property
def ebs_backend(self):
"""Return backend instance specific for this region."""
return ebs_backends[self.region]
def snapshots(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "POST":
return self.start_snapshot()
def snapshot_block(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "PUT":
return self.put_snapshot_block(full_url, headers)
if request.method == "GET":
return self.get_snapshot_block()
def snapshot_blocks(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "GET":
return self.list_snapshot_blocks()
def start_snapshot(self):
"""
The following parameters are not yet implemented: ParentSnapshotId, ClientToken, Encrypted, KmsKeyArn, Timeout
"""
params = json.loads(self.body)
volume_size = params.get("VolumeSize")
tags = params.get("Tags")
description = params.get("Description")
snapshot = self.ebs_backend.start_snapshot(
volume_size=volume_size,
tags=tags,
description=description,
)
return 200, {}, json.dumps(snapshot.to_json())
def complete_snapshot(self, request, full_url, headers):
"""
The following parameters are not yet supported: ChangedBlocksCount, Checksum, ChecksumAlgorithm, ChecksumAggregationMethod
"""
self.setup_class(request, full_url, headers)
snapshot_id = full_url.split("/")[-1]
status = self.ebs_backend.complete_snapshot(snapshot_id=snapshot_id)
return 200, {}, json.dumps(status)
def put_snapshot_block(self, full_url, headers):
"""
The following parameters are currently not taken into account: DataLength, Progress.
The Checksum and ChecksumAlgorithm are taken at face-value, but no validation takes place.
"""
snapshot_id = full_url.split("/")[-3]
block_index = full_url.split("/")[-1]
block_data = self.body
headers = {k.lower(): v for k, v in headers.items()}
checksum = headers.get("x-amz-checksum")
checksum_algorithm = headers.get("x-amz-checksum-algorithm")
data_length = headers.get("x-amz-data-length")
checksum, checksum_algorithm = self.ebs_backend.put_snapshot_block(
snapshot_id=snapshot_id,
block_index=block_index,
block_data=block_data,
checksum=checksum,
checksum_algorithm=checksum_algorithm,
data_length=data_length,
)
return (
200,
{
"x-amz-Checksum": checksum,
"x-amz-Checksum-Algorithm": checksum_algorithm,
},
"{}",
)
def get_snapshot_block(self):
snapshot_id = self.path.split("/")[-3]
block_index = self.path.split("/")[-1]
block = self.ebs_backend.get_snapshot_block(
snapshot_id=snapshot_id,
block_index=block_index,
)
headers = {
"x-amz-Checksum": block.checksum,
"x-amz-Checksum-Algorithm": block.checksum_algorithm,
"x-amz-Data-Length": block.data_length,
}
return 200, headers, block.block_data
def snapshot_changed_blocks(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
first_snapshot_id = self._get_params().get("firstSnapshotId")
second_snapshot_id = self.path.split("/")[-2]
changed_blocks, snapshot = self.ebs_backend.list_changed_blocks(
first_snapshot_id=first_snapshot_id,
second_snapshot_id=second_snapshot_id,
)
blocks = [
{"BlockIndex": idx, "FirstBlockToken": x, "SecondBlockToken": y}
for idx, (x, y) in changed_blocks.items()
]
return (
200,
{},
json.dumps(
dict(
ChangedBlocks=blocks,
VolumeSize=snapshot.volume_size,
BlockSize=snapshot.block_size,
)
),
)
def list_snapshot_blocks(self):
"""
The following parameters are not yet implemented: NextToken, MaxResults, StartingBlockIndex
"""
snapshot_id = self.path.split("/")[-2]
snapshot = self.ebs_backend.list_snapshot_blocks(
snapshot_id=snapshot_id,
)
blocks = [
{"BlockIndex": idx, "BlockToken": b.block_token}
for idx, b in snapshot.blocks.items()
]
return (
200,
{},
json.dumps(
dict(
Blocks=blocks,
VolumeSize=snapshot.volume_size,
BlockSize=snapshot.block_size,
)
),
)
| 1,916
| 0
| 135
|
cd1504154b866217ffd7e307fc7fa1ad19e62e68
| 2,285
|
py
|
Python
|
test/test_ac.py
|
RL-Starterpack/rl-starterpack
|
af61d08eb00b0c960b103eb469a73ba2f47b9990
|
[
"MIT"
] | 25
|
2020-11-23T15:26:04.000Z
|
2021-07-21T09:24:16.000Z
|
test/test_ac.py
|
RL-Starterpack/rl-starterpack
|
af61d08eb00b0c960b103eb469a73ba2f47b9990
|
[
"MIT"
] | null | null | null |
test/test_ac.py
|
RL-Starterpack/rl-starterpack
|
af61d08eb00b0c960b103eb469a73ba2f47b9990
|
[
"MIT"
] | 9
|
2021-03-28T07:45:48.000Z
|
2022-03-22T12:43:10.000Z
|
import unittest
import torch.nn
from rl_starterpack import AC, OpenAIGym, experiment
| 40.803571
| 97
| 0.644201
|
import unittest
import torch.nn
from rl_starterpack import AC, OpenAIGym, experiment
class TestAC(unittest.TestCase):
def test_cartpole(self):
env = OpenAIGym(level='CartPole', max_timesteps=100)
hidden_size = 16
actor_fn = (lambda: torch.nn.Sequential(
torch.nn.Linear(in_features=env.state_space['shape'][0], out_features=hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(in_features=hidden_size, out_features=env.action_space['num_values'])
))
critic_fn = (lambda: torch.nn.Sequential(
torch.nn.Linear(in_features=env.state_space['shape'][0], out_features=hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(in_features=hidden_size, out_features=1)
))
agent = AC(
state_space=env.state_space, action_space=env.action_space,
actor_fn=actor_fn, actor_learning_rate=1e-3,
critic_fn=critic_fn, critic_learning_rate=1e-3
)
experiment.train(agent=agent, env=env, num_episodes=10)
experiment.evaluate(agent=agent, env=env, num_episodes=10)
agent.close()
env.close()
def test_pendulum(self):
env = OpenAIGym(level='Pendulum', max_timesteps=100)
hidden_size = 16
actor_fn = (lambda: torch.nn.Sequential(
torch.nn.Linear(in_features=env.state_space['shape'][0], out_features=hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(in_features=hidden_size, out_features=env.action_space['shape'][0])
))
critic_fn = (lambda: torch.nn.Sequential(
torch.nn.Linear(in_features=env.state_space['shape'][0], out_features=hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(in_features=hidden_size, out_features=1)
))
agent = AC(
state_space=env.state_space, action_space=env.action_space,
actor_fn=actor_fn, actor_learning_rate=1e-3,
critic_fn=critic_fn, critic_learning_rate=1e-3,
discount=0.95, compute_advantage=True, normalize_returns=True
)
experiment.train(agent=agent, env=env, num_episodes=10)
experiment.evaluate(agent=agent, env=env, num_episodes=10)
agent.close()
env.close()
| 2,109
| 11
| 77
|
b0411ae28aa8350a4d5efdcf252faf88416873a6
| 795
|
py
|
Python
|
netmiko/centec/centec_os.py
|
josephwhite13/netmiko
|
c08c5ebb3484383f034e22b9576f88be07525f72
|
[
"MIT"
] | 2
|
2021-07-15T17:55:55.000Z
|
2021-07-25T23:56:12.000Z
|
netmiko/centec/centec_os.py
|
rockenwind/netmiko
|
24291029d0cdd5af660475ac1093a2dcd1c08af2
|
[
"MIT"
] | 1
|
2020-12-29T13:14:54.000Z
|
2020-12-29T13:14:54.000Z
|
netmiko/centec/centec_os.py
|
rockenwind/netmiko
|
24291029d0cdd5af660475ac1093a2dcd1c08af2
|
[
"MIT"
] | 1
|
2022-01-28T00:51:41.000Z
|
2022-01-28T00:51:41.000Z
|
"""Centec OS Support"""
from netmiko.cisco_base_connection import CiscoBaseConnection
import time
| 25.645161
| 76
| 0.681761
|
"""Centec OS Support"""
from netmiko.cisco_base_connection import CiscoBaseConnection
import time
class CentecOSBase(CiscoBaseConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
self.disable_paging()
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def save_config(self, cmd="write", confirm=False, confirm_response=""):
"""Save config: write"""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
class CentecOSSSH(CentecOSBase):
pass
class CentecOSTelnet(CentecOSBase):
pass
| 0
| 625
| 69
|
23f54cc870cf05d60c13ef1c334edaf8af01defa
| 301
|
py
|
Python
|
40 Algorithm challenge/challenge 3.py
|
T0dCNg/All-In-One
|
f86d7f46d3a4fafde5c5d087cffe1e3414870c48
|
[
"Unlicense"
] | 1
|
2022-01-27T16:28:51.000Z
|
2022-01-27T16:28:51.000Z
|
40 Algorithm challenge/challenge 3.py
|
T0dCNg/All-In-One
|
f86d7f46d3a4fafde5c5d087cffe1e3414870c48
|
[
"Unlicense"
] | null | null | null |
40 Algorithm challenge/challenge 3.py
|
T0dCNg/All-In-One
|
f86d7f46d3a4fafde5c5d087cffe1e3414870c48
|
[
"Unlicense"
] | null | null | null |
#Challenge 3
#The program asks the user to inputtheir surname and then their first name.
#The program then outputsthe user’s first name and then their surname separately.
name2 = input("please enter your surname: ")
name1 = input("please enter your first name: ")
print(name2)
print(name1)
| 30.1
| 82
| 0.744186
|
#Challenge 3
#The program asks the user to inputtheir surname and then their first name.
#The program then outputsthe user’s first name and then their surname separately.
name2 = input("please enter your surname: ")
name1 = input("please enter your first name: ")
print(name2)
print(name1)
| 0
| 0
| 0
|
0b2070746fa1ea5966242d1e72b2c334bf6327ef
| 37,346
|
py
|
Python
|
importers/instagram.py
|
audacious-software/Passive-Data-Kit-External-Data
|
2cfa93f31f53dcce8bb38f99144d993dd1cd4a9b
|
[
"Apache-2.0"
] | 1
|
2021-07-07T21:13:53.000Z
|
2021-07-07T21:13:53.000Z
|
importers/instagram.py
|
audacious-software/Passive-Data-Kit-External-Data
|
2cfa93f31f53dcce8bb38f99144d993dd1cd4a9b
|
[
"Apache-2.0"
] | 8
|
2020-11-18T15:29:17.000Z
|
2022-02-24T20:38:27.000Z
|
importers/instagram.py
|
audacious-software/Passive-Data-Kit-External-Data
|
2cfa93f31f53dcce8bb38f99144d993dd1cd4a9b
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=line-too-long
from __future__ import print_function
import json
import re
import traceback
import zipfile
import arrow
import pytz
from passive_data_kit.models import DataPoint
from passive_data_kit_external_data.models import annotate_field
from ..utils import hash_content, encrypt_content, create_engagement_event, queue_batch_insert, include_data
# Older format?
| 51.582873
| 270
| 0.695041
|
# pylint: disable=line-too-long
from __future__ import print_function
import json
import re
import traceback
import zipfile
import arrow
import pytz
from passive_data_kit.models import DataPoint
from passive_data_kit_external_data.models import annotate_field
from ..utils import hash_content, encrypt_content, create_engagement_event, queue_batch_insert, include_data
def process_ads_viewed(request_identifier, ads_viewed_raw):
ads_viewed = json.loads(ads_viewed_raw)
if isinstance(ads_viewed, dict) is False:
return
if ('impressions_history_ads_seen' in ads_viewed) is False:
return
for ad_viewed in ads_viewed['impressions_history_ads_seen']:
created = arrow.get(ad_viewed['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, ad_viewed):
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-ad-viewed', request_identifier, ad_viewed, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='advertising', start=created)
def process_posts_viewed(request_identifier, posts_viewed_raw):
posts_viewed = json.loads(posts_viewed_raw)
if isinstance(posts_viewed, dict) is False:
return
if ('impressions_history_posts_seen' in posts_viewed) is False:
return
for post_viewed in posts_viewed['impressions_history_posts_seen']:
created = arrow.get(post_viewed['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, post_viewed):
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-post-viewed', request_identifier, post_viewed, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='post', start=created)
def process_suggested_accounts_viewed(request_identifier, suggested_accounts_viewed_raw): # pylint: disable=invalid-name
suggested_accounts_viewed = json.loads(suggested_accounts_viewed_raw)
if isinstance(suggested_accounts_viewed, dict) is False:
return
if ('impressions_history_chaining_seen' in suggested_accounts_viewed) is False:
return
for account_viewed in suggested_accounts_viewed['impressions_history_chaining_seen']:
created = arrow.get(account_viewed['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, account_viewed):
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-profile-viewed', request_identifier, account_viewed, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='profile', start=created)
def process_videos_watched(request_identifier, videos_watched_raw):
videos_watched = json.loads(videos_watched_raw)
if isinstance(videos_watched, dict) is False:
return
if ('impressions_history_videos_watched' in videos_watched) is False:
return
for video_watched in videos_watched['impressions_history_videos_watched']:
created = arrow.get(video_watched['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, video_watched):
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-video-watched', request_identifier, video_watched, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created)
def process_post_comments(request_identifier, post_comments_raw):
post_comments = json.loads(post_comments_raw)
if isinstance(post_comments, dict) is False:
return
if ('comments_media_comments' in post_comments) is False:
return
for post_comment in post_comments['comments_media_comments']:
post_comment['encrypted_title'] = encrypt_content(post_comment['title'].encode('utf-8'))
del post_comment['title']
post_comment['string_list_data']['encrypted_value'] = encrypt_content(post_comment['string_list_data']['value'].encode('utf-8'))
annotate_field(post_comment['string_list_data'], 'value', post_comment['string_list_data']['value'])
del post_comment['string_list_data']['value']
created = arrow.get(post_comment['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, post_comment):
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-comment-posted', request_identifier, post_comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
def process_posts_made(request_identifier, posts_made_raw):
posts_made = json.loads(posts_made_raw)
if isinstance(posts_made, list) is False:
return
for post in posts_made:
created = arrow.get(post['media'][0]['creation_timestamp']).datetime
if include_data(request_identifier, created, post):
for media in post['media']:
media['encrypted_title'] = encrypt_content(media['title'].encode('utf-8'))
annotate_field(media, 'title', media['title'])
del media['title']
try:
del media['media_metadata']['photo_metadata']['exif_data']
except KeyError:
pass
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-post', request_identifier, post, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='post', start=created)
def process_liked_comments(request_identifier, liked_comments_raw):
liked_comments = json.loads(liked_comments_raw)
if isinstance(liked_comments, dict) is False:
return
if ('likes_comment_likes' in liked_comments) is False:
return
for liked_comment in liked_comments['likes_comment_likes']:
created = arrow.get(liked_comment['string_map_data']['timestamp']).datetime
if include_data(request_identifier, created, liked_comment):
liked_comment['encrypted_title'] = encrypt_content(liked_comment['title'].encode('utf-8'))
del liked_comment['title']
liked_comment['string_list_data']['encrypted_href'] = encrypt_content(liked_comment['string_list_data']['href'].encode('utf-8'))
del liked_comment['string_list_data']['href']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-comment-like', request_identifier, liked_comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_liked_posts(request_identifier, liked_posts_raw):
liked_posts = json.loads(liked_posts_raw)
if isinstance(liked_posts, dict) is False:
return
if ('likes_media_likes' in liked_posts) is False:
return
for liked_post in liked_posts['likes_media_likes']:
created = arrow.get(liked_post['string_map_data']['timestamp']).datetime
if include_data(request_identifier, created, liked_post):
liked_post['encrypted_title'] = encrypt_content(liked_post['title'].encode('utf-8'))
del liked_post['title']
liked_post['string_list_data']['encrypted_href'] = encrypt_content(liked_post['string_list_data']['href'].encode('utf-8'))
del liked_post['string_list_data']['href']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-post-like', request_identifier, liked_post, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_login_activity(request_identifier, login_activity_raw):
login_activity = json.loads(login_activity_raw)
if isinstance(login_activity, dict) is False:
return
if ('account_history_login_history' in login_activity) is False:
return
for login in login_activity['account_history_login_history']:
created = arrow.get(login['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, login):
login['string_map_data']['IP Address']['encrypted_value'] = encrypt_content(login['string_map_data']['IP Address']['value'] .encode('utf-8'))
del login['string_map_data']['IP Address']['value']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-login', request_identifier, login, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='login', start=created)
def process_account_history(request_identifier, history_raw):
history = json.loads(history_raw)
if isinstance(history, dict) is False:
return
for login in history['login_history']:
created = arrow.get(login['timestamp']).datetime
if include_data(request_identifier, created, login):
login['ip_address_encrypted_value'] = encrypt_content(login['ip_address'] .encode('utf-8'))
del login['ip_address']
del login['device_id']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-login', request_identifier, login, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='login', start=created)
def process_stories(request_identifier, stories_raw):
stories = json.loads(stories_raw)
if isinstance(stories, dict) is False:
return
for action_type, actions in stories.iteritems():
for action in actions:
created = arrow.get(action[0]).datetime
if include_data(request_identifier, created, action):
payload = {
'action_type': action_type,
'target_encrypted_value': encrypt_content(action[1].encode('utf-8'))
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-story-action', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='story', start=created)
def process_logout_activity(request_identifier, logout_activity_raw):
logout_activity = json.loads(logout_activity_raw)
if isinstance(logout_activity, dict) is False:
return
if ('account_history_logout_history' in logout_activity) is False:
return
for logout in logout_activity['account_history_logout_history']:
created = arrow.get(logout['string_map_data']['Time']['timestamp']).datetime
if include_data(request_identifier, created, logout):
logout['string_map_data']['IP Address']['encrypted_value'] = encrypt_content(logout['string_map_data']['IP Address']['value'] .encode('utf-8'))
del logout['string_map_data']['IP Address']['value']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-login', request_identifier, logout, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='logout', start=created)
def process_messages_new(request_identifier, username, messages_raw):
messages = json.loads(messages_raw)
if isinstance(messages, dict) is False:
return
for message in messages['messages']:
created = arrow.get(message['timestamp_ms'] / 1000).datetime
if include_data(request_identifier, created, message):
pdk_message = {
'pdk_recipients_count': len(messages['participants']) - 1,
'pdk_hashed_senderId': hash_content(message['sender_name'].encode('utf-8')),
'pdk_encrypted_sender': encrypt_content(message['sender_name'].encode('utf-8')),
'created_at': message['timestamp_ms']
}
if 'content' in message and message['content'] is not None:
annotate_field(pdk_message, 'content', message['content'])
pdk_message['pdk_encrypted_content'] = encrypt_content(message['content'].encode('utf-8'))
if 'share' in message:
pdk_message['pdk_encrypted_media_url'] = encrypt_content(message['share']['link'].encode('utf-8'))
if 'share_text' in message['share']:
annotate_field(pdk_message, 'share_text', message['share']['share_text'])
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-direct-message', request_identifier, pdk_message, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
if message['sender_name'] == username:
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='message', start=created)
else:
create_engagement_event(source='instagram', identifier=request_identifier, incoming_engagement=1.0, engagement_type='message', start=created)
# Older format?
def process_comments(request_identifier, comments_raw):
comments = json.loads(comments_raw)
if isinstance(comments, dict) is False:
return
for key in comments:
comment_list = comments[key]
for comment in comment_list:
created = arrow.get(comment[0]).replace(tzinfo=pytz.timezone('US/Pacific')).datetime
if include_data(request_identifier, created, comment):
comment_point = {}
comment_point['pdk_encrypted_comment'] = encrypt_content(comment[1].encode('utf-8'))
annotate_field(comment_point, 'comment', comment[1])
comment_point['pdk_hashed_profile'] = hash_content(comment[2])
comment_point['pdk_encrypted_profile'] = encrypt_content(comment[2].encode('utf-8'))
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-comment', request_identifier, comment_point, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
def process_media(request_identifier, media_raw):
media = json.loads(media_raw)
if 'photos' in media:
for photo in media['photos']:
created = arrow.get(photo['taken_at']).replace(tzinfo=pytz.timezone('US/Pacific')).datetime
if include_data(request_identifier, created, photo):
photo['pdk_encrypted_caption'] = encrypt_content(photo['caption'].encode('utf-8'))
annotate_field(photo, 'caption', photo['caption'])
del photo['caption']
if 'location' in photo:
photo['pdk_encrypted_location'] = encrypt_content(photo['location'].encode('utf-8'))
annotate_field(photo, 'location', photo['location'])
del photo['location']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-photo', request_identifier, photo, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='photo', start=created)
if 'videos' in media:
for video in media['videos']:
created = arrow.get(video['taken_at']).replace(tzinfo=pytz.timezone('US/Pacific')).datetime
if include_data(request_identifier, created, video):
video['pdk_encrypted_caption'] = encrypt_content(video['caption'].encode('utf-8'))
annotate_field(video, 'caption', video['caption'])
del video['caption']
if 'location' in video:
video['pdk_encrypted_location'] = encrypt_content(video['location'].encode('utf-8'))
annotate_field(video, 'location', video['location'])
del video['location']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-video', request_identifier, video, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='video', start=created)
def process_likes(request_identifier, likes_raw):
likes = json.loads(likes_raw)
if isinstance(likes, dict) is False:
return
keys = ['media', 'comment']
for key in keys:
likes_list = likes[key + '_likes']
for like in likes_list:
created = arrow.get(like[0]).datetime
if include_data(request_identifier, created, like):
reaction = {
'timestamp': like[0],
'pdk_hashed_target': hash_content(like[1].encode('utf-8'))
}
reaction['content_type'] = key
reaction['reaction'] = 'like'
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_messages(request_identifier, username, messages_raw):
conversations = json.loads(messages_raw)
if isinstance(conversations, list) is False:
return
for conversation in conversations:
hashed_participants = []
for participant in conversation['participants']:
if participant != username:
hashed_participants = hash_content(participant)
for message in conversation['conversation']:
created = arrow.get(message['created_at']).datetime
if include_data(request_identifier, created, message):
pdk_message = {
'pdk_recipients_count': len(conversation['participants']) - 1,
'pdk_hashed_senderId': hash_content(message['sender']),
'pdk_encrypted_sender': encrypt_content(message['sender'].encode('utf-8')),
'pdk_hashed_participants': hashed_participants,
'created_at': message['created_at']
}
if 'text' in message and message['text'] is not None:
annotate_field(pdk_message, 'text', message['text'])
pdk_message['pdk_encrypted_text'] = encrypt_content(message['text'].encode('utf-8'))
if 'media_url' in message:
pdk_message['pdk_encrypted_media_url'] = encrypt_content(message['media_url'].encode('utf-8'))
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-direct-message', request_identifier, pdk_message, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
if message['sender'] == username:
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='message', start=created)
else:
create_engagement_event(source='instagram', identifier=request_identifier, incoming_engagement=1.0, engagement_type='message', start=created)
def process_seen_content(request_identifier, seen_raw):
seen = json.loads(seen_raw)
if isinstance(seen, dict) is False:
return
for item_seen in seen['chaining_seen']:
created = arrow.get(item_seen['timestamp']).datetime
if include_data(request_identifier, created, item_seen):
reaction = {
'timestamp': item_seen['timestamp'],
'pdk_hashed_target': hash_content(item_seen['username'].encode('utf-8'))
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-page-visit', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='page', start=created)
if 'ads_clicked' in seen:
for item_seen in seen['ads_clicked']:
created = arrow.get(item_seen['timestamp']).datetime
if include_data(request_identifier, created, item_seen):
ad_clicked = {
'timestamp': item_seen['timestamp'],
'pdk_hashed_target': hash_content(item_seen['caption'].encode('utf-8'))
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-ad-clicked', request_identifier, ad_clicked, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='advertising', start=created)
def process_searches(request_identifier, searches_raw):
searches = json.loads(searches_raw)
if isinstance(searches, dict) is False:
return
for search in searches['main_search_history']:
created = arrow.get(search['time']).datetime
if include_data(request_identifier, created, search):
search_click = {
'timestamp': search['time'],
'pdk_hashed_target': hash_content(search['search_click'].encode('utf-8'))
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-search-click', request_identifier, search_click, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='search', start=created)
def process_connections_events(request_identifier, json_string): # pylint: disable=too-many-branches
friends_history = json.loads(json_string)
incoming_contact_categories = [
'follow_requests_sent',
'following',
]
for contact_category in incoming_contact_categories:
if contact_category in friends_history:
for contact, contact_date in friends_history[contact_category].iteritems():
created = arrow.get(contact_date).datetime
if include_data(request_identifier, created, contact):
payload = {
'pdk_encrypted_username': encrypt_content(contact.encode('utf-8')),
'connection_date': contact_date
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-requested-contact', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='requested-contact', start=created)
if 'following_hashtags' in friends_history:
for contact, contact_date in friends_history['following_hashtags'].iteritems():
created = arrow.get(contact_date).datetime
if include_data(request_identifier, created, contact):
payload = {
'pdk_encrypted_hashtag': encrypt_content(contact.encode('utf-8')),
'connection_date': contact_date
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-followed-topic', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='followed-topic', start=created)
if 'followers' in friends_history:
for contact, contact_date in friends_history['followers'].iteritems():
created = arrow.get(contact_date).datetime
if include_data(request_identifier, created, contact):
payload = {
'pdk_encrypted_username': encrypt_content(contact.encode('utf-8')),
'connection_date': contact_date
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-added-contact', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, incoming_engagement=0.5, engagement_type='added-contact', start=created)
if 'dismissed_suggested_users' in friends_history:
for contact, contact_date in friends_history['dismissed_suggested_users'].iteritems():
created = arrow.get(contact_date).datetime
if include_data(request_identifier, created, contact):
payload = {
'pdk_encrypted_username': encrypt_content(contact.encode('utf-8')),
'connection_date': contact_date
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-deleted-contact', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, incoming_engagement=0.5, engagement_type='deleted-contact', start=created)
if 'blocked_users' in friends_history:
for contact, contact_date in friends_history['blocked_users'].iteritems():
created = arrow.get(contact_date).datetime
if include_data(request_identifier, created, contact):
payload = {
'pdk_encrypted_username': encrypt_content(contact.encode('utf-8')),
'connection_date': contact_date
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-blocked-contact', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='blocked-contact', start=created)
def process_save_events(request_identifier, json_string):
save_history = json.loads(json_string)
if 'saved_media' in save_history:
for saved_item in save_history['saved_media']:
created = arrow.get(saved_item[0]).datetime
if include_data(request_identifier, created, saved_item):
payload = {
'pdk_encrypted_username': encrypt_content(saved_item[1].encode('utf-8')),
'save_date': saved_item[0]
}
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-instagram-saved-media', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='instagram', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='saved-media', start=created)
def import_data(request_identifier, path): # pylint: disable=too-many-branches, too-many-statements
content_bundle = zipfile.ZipFile(path)
skip_files = [
'autofill.json',
'uploaded_contacts.json',
'checkout.json',
'profile.json',
'settings.json',
'information_about_you.json',
'devices.json',
'shopping.json',
'guides.json',
]
for content_file in content_bundle.namelist():
try:
if content_file.endswith('/'):
pass
elif 'no-data' in content_file:
pass
elif 'media/archived_posts' in content_file:
pass
elif content_file in skip_files:
pass
elif content_file.endswith('.mp4'):
pass
elif content_file.endswith('.m4a'):
pass
elif content_file.endswith('.jpg'):
pass
elif re.match(r'^messages\/.*\/message_.*\.html', content_file):
pass
elif re.match(r'^comments\.json', content_file):
process_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^stories_activities\.json', content_file):
process_stories(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^connections\.json', content_file):
process_connections_events(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^saved\.json', content_file):
process_save_events(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^media\.json', content_file):
process_media(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes\.json', content_file):
process_likes(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^seen_content\.json', content_file):
process_seen_content(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^searches\.json', content_file):
process_searches(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^messages\.json', content_file):
profile_json = json.loads(content_bundle.open('profile.json').read())
username = profile_json['username']
process_messages(request_identifier, username, content_bundle.open(content_file).read())
elif re.match(r'^ads_and_content\/ads_viewed\.json', content_file):
process_ads_viewed(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^ads_and_content\/posts_viewed\.json', content_file):
process_posts_viewed(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^ads_and_content\/suggested_accounts_viewed\.json', content_file):
process_suggested_accounts_viewed(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^ads_and_content\/videos_watched\.json', content_file):
process_videos_watched(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^comments\/post_comments\.json', content_file):
process_post_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^posts\/post_.*\.json', content_file):
process_posts_made(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes\/liked_comments.json', content_file):
process_liked_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^login_and_account_creation\/login_activity.json', content_file):
process_login_activity(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^account_history.json', content_file):
process_account_history(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^messages\/.*\/message_.*\.json', content_file):
try:
profile_json = json.loads(content_bundle.open('account_information/personal_information.json').read())
username = profile_json['profile_user'][0]['string_map_data']['Name']['value']
process_messages_new(request_identifier, username, content_bundle.open(content_file).read())
except KeyError:
pass
else:
print('INSTAGRAM[' + request_identifier + ']: Unable to process: ' + content_file + ' -- ' + str(content_bundle.getinfo(content_file).file_size))
except: # pylint: disable=bare-except
traceback.print_exc()
return False
return True
def external_data_metadata(generator_identifier, point): # pylint: disable=unused-argument
if generator_identifier.startswith('pdk-external-instagram') is False:
return None
metadata = {}
metadata['service'] = 'Instagram'
metadata['event'] = generator_identifier
if generator_identifier == 'pdk-external-instagram-comment':
metadata['event'] = 'Added Comment'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Text'
elif generator_identifier == 'pdk-external-instagram-photo':
metadata['event'] = 'Photo Upload'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Image'
elif generator_identifier == 'pdk-external-instagram-video':
metadata['event'] = 'Video Upload'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Video'
return metadata
def update_data_type_definition(definition):
if 'pdk-external-instagram-photo' in definition['passive-data-metadata.generator-id']['observed']:
if 'pdk_encrypted_caption' in definition:
del definition['pdk_encrypted_caption']['observed']
definition['pdk_encrypted_caption']['is_freetext'] = True
definition['pdk_encrypted_caption']['pdk_variable_name'] = 'Encrypted photo caption'
definition['pdk_encrypted_caption']['pdk_variable_description'] = 'Encrypted caption of the photo, saved for use later (with proper authorizations and keys).'
if 'taken_at' in definition:
del definition['taken_at']['observed']
definition['taken_at']['is_freetext'] = False
definition['taken_at']['pdk_variable_name'] = 'Photo capture timestamp'
definition['taken_at']['pdk_variable_description'] = 'ISO-8601 timestamp of the time the photo was originally taken.'
if 'path' in definition:
del definition['path']['observed']
definition['path']['is_freetext'] = False
definition['path']['pdk_variable_name'] = 'Photo path in export file'
definition['path']['pdk_variable_description'] = 'File path of the photo file in the uploaded Instagram data export.'
| 36,402
| 0
| 552
|
cbbfc63bb3d1bd594dcc6409654a03119267b06e
| 563
|
py
|
Python
|
setup.py
|
GambitResearch/use_logging
|
6d941fbf4c452fb56a919e5975a373aaaf65123e
|
[
"MIT"
] | 1
|
2018-04-23T14:01:08.000Z
|
2018-04-23T14:01:08.000Z
|
setup.py
|
GambitResearch/use_logging
|
6d941fbf4c452fb56a919e5975a373aaaf65123e
|
[
"MIT"
] | 1
|
2020-11-14T03:37:39.000Z
|
2020-11-14T03:37:39.000Z
|
setup.py
|
GambitResearch/use_logging
|
6d941fbf4c452fb56a919e5975a373aaaf65123e
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
import os
from setuptools import find_packages
DIR = os.path.dirname(__file__)
with open(os.path.join(DIR, "README.md")) as f:
readme = f.read().splitlines()
setup(
name='use_logging',
version='0.0.1',
packages=find_packages(include='use_logging*'),
url='https://github.com/GambitResearch/use_logging',
author='Daniel Royde',
author_email='danielroyde@gmail.com',
description=readme[6],
long_description='\n'.join(readme[3:]).lstrip(),
keywords=['Python', 'Logging'],
scripts=['bin/use_logging'],
license='MIT',
)
| 23.458333
| 53
| 0.726465
|
from distutils.core import setup
import os
from setuptools import find_packages
DIR = os.path.dirname(__file__)
with open(os.path.join(DIR, "README.md")) as f:
readme = f.read().splitlines()
setup(
name='use_logging',
version='0.0.1',
packages=find_packages(include='use_logging*'),
url='https://github.com/GambitResearch/use_logging',
author='Daniel Royde',
author_email='danielroyde@gmail.com',
description=readme[6],
long_description='\n'.join(readme[3:]).lstrip(),
keywords=['Python', 'Logging'],
scripts=['bin/use_logging'],
license='MIT',
)
| 0
| 0
| 0
|
9312ffaac9b56887ad3243e2f099b34e9c902882
| 2,415
|
py
|
Python
|
crawler/crawler_instance/constants/constant.py
|
msmannan00/Genesis-Auto-Crawler
|
c0cf79a0fc7a12e056108ffc24faf0d3baa949ad
|
[
"MIT"
] | 1
|
2020-03-02T17:19:50.000Z
|
2020-03-02T17:19:50.000Z
|
crawler/crawler_instance/constants/constant.py
|
msmannan00/Genesis-Auto-Crawler
|
c0cf79a0fc7a12e056108ffc24faf0d3baa949ad
|
[
"MIT"
] | null | null | null |
crawler/crawler_instance/constants/constant.py
|
msmannan00/Genesis-Auto-Crawler
|
c0cf79a0fc7a12e056108ffc24faf0d3baa949ad
|
[
"MIT"
] | null | null | null |
from pathlib import Path
| 30.961538
| 113
| 0.735404
|
from pathlib import Path
class RAW_PATH_CONSTANTS:
S_SIGWIN_PATH = str(Path(__file__).parent.parent.parent.parent.parent) + "/cygwin64/bin/bash.exe --login"
S_PROJECT_PATH = str(Path(__file__).parent.parent.parent.parent)
S_RAW_PATH = S_PROJECT_PATH + "/"
S_DATASET_PATH = "/crawler/crawler_services/raw/crawled_classifier_websites.csv"
S_CRAWLER_IMAGE_CACHE_PATH = str(Path(__file__).parent.parent.parent.parent.parent) + "/crawler_image_cache/"
S_LOGS_DIRECTORY = str(Path(__file__).parent.parent.parent.parent.parent) +"/logs/"
class TOR_CONSTANTS:
S_SHELL_CONFIG_PATH = RAW_PATH_CONSTANTS.S_PROJECT_PATH + "/crawler/crawler_services/raw/config_script.sh"
S_TOR_PATH = RAW_PATH_CONSTANTS.S_PROJECT_PATH + "/genesis_onion_proxy"
S_TOR_PROXY_PATH = S_TOR_PATH + "/9052"
class CRAWL_SETTINGS_CONSTANTS:
# Allowed Extentions
S_DOC_TYPES = [".pdf", ".msword", ".document", ".docx", ".doc"]
# Local URL
S_START_URL = "https://drive.google.com/uc?export=download&id=1ZG7D2NsI-NrVyp3SDq9q4zcrgFi3jhaG"
# Total Thread Instances Allowed
S_MAX_THREAD_COUNT_PER_INSTANCE = 30
# Total Thread Instances Allowed
S_UPDATE_STATUS_TIMEOUT = 300
S_UPDATE_STATUS_URL = "https://167.86.99.31/update_status/?pRequest=m_crawler"
# Time Delay to Invoke New Url Requests
S_ICRAWL_INVOKE_DELAY = 2
S_CRAWLER_INVOKE_DELAY = 2
S_ICRAWL_IMAGE_INVOKE_DELAY = 2
S_TOR_NEW_CIRCUIT_INVOKE_DELAY = 300
S_LOCAL_FILE_CRAWLER_INVOKE_DELAY = 1
S_LOCAL_FILE_CRAWLER_INVOKE_DELAY_LONG = 10
# Max Allowed Depth
S_MAX_ALLOWED_DEPTH = 2
S_DEFAULT_DEPTH = 0
# Max URL Timeout
S_URL_TIMEOUT = 11170
S_HEADER_TIMEOUT = 30
# Max Host Queue Size
S_MAX_HOST_QUEUE_SIZE = 100
S_MAX_SUBHOST_QUEUE_SIZE = 100
# Max URL Size
S_MAX_URL_SIZE = 480
# Backup Time
S_BACKUP_TIME_DELAY = 86400
S_BACKUP_FETCH_LIMIT = 50
# Min Image Content Size
S_MIN_CONTENT_LENGTH = 50000
# User Agent
S_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0'
# Crawl Catagory
S_THREAD_CATEGORY_GENERAL = "general"
S_THREAD_CATEGORY_UNKNOWN = "unknown"
# Max Static Images
S_STATIC_PARSER_LIST_MAX_SIZE = 10
S_MIN_CONTENT_LENGTH = 50000
# Duplication Fuzzy Logic Score
S_HOST_DATA_FUZZY_SCORE = 75
S_SUB_HOST_DATA_FUZZY_SCORE = 50
| 0
| 2,320
| 69
|
880060e846ba0c4e439ab8b8472884c5df0da377
| 5,795
|
py
|
Python
|
Bongard-LOGO_Baselines/models/classifier.py
|
simitii/Bongard-LOGO
|
45a0ab244c809b32bcba139b7273b8ec5aa0708c
|
[
"MIT"
] | 5
|
2020-08-19T01:50:11.000Z
|
2020-08-25T13:58:23.000Z
|
Bongard-LOGO_Baselines/models/classifier.py
|
simitii/Bongard-LOGO
|
45a0ab244c809b32bcba139b7273b8ec5aa0708c
|
[
"MIT"
] | null | null | null |
Bongard-LOGO_Baselines/models/classifier.py
|
simitii/Bongard-LOGO
|
45a0ab244c809b32bcba139b7273b8ec5aa0708c
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
import models
import utils
from .models import register
@register('classifier')
@register('linear-classifier')
@register('nn-classifier')
@register('moco')
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, encoder, encoder_args, K=65536, m=0.999, T=0.07, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# feature embedding size is the output fc dimension
self.encoder_q = models.make(encoder, **encoder_args)
self.encoder_k = models.make(encoder, **encoder_args)
dim = self.encoder_q.out_dim
self.encoder = self.encoder_q # use encoder_q for downstream tasks
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
"""
batch_size = x.shape[0]
# random shuffle index
idx_shuffle = torch.randperm(batch_size).long().cuda()
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
return x[idx_shuffle], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
"""
return x[idx_unshuffle]
def forward(self, im_q, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
| 30.824468
| 104
| 0.603451
|
import math
import torch
import torch.nn as nn
import models
import utils
from .models import register
@register('classifier')
class Classifier(nn.Module):
def __init__(self, encoder, encoder_args,
classifier, classifier_args):
super().__init__()
self.encoder = models.make(encoder, **encoder_args)
classifier_args['in_dim'] = self.encoder.out_dim
self.classifier = models.make(classifier, **classifier_args)
def forward(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
@register('linear-classifier')
class LinearClassifier(nn.Module):
def __init__(self, in_dim, n_classes):
super().__init__()
self.linear = nn.Linear(in_dim, n_classes)
def forward(self, x):
return self.linear(x)
@register('nn-classifier')
class NNClassifier(nn.Module):
def __init__(self, in_dim, n_classes, metric='cos', temp=None):
super().__init__()
self.proto = nn.Parameter(torch.empty(n_classes, in_dim))
nn.init.kaiming_uniform_(self.proto, a=math.sqrt(5))
if temp is None:
if metric == 'cos':
temp = nn.Parameter(torch.tensor(10.))
else:
temp = 1.0
self.metric = metric
self.temp = temp
def forward(self, x):
return utils.compute_logits(x, self.proto, self.metric, self.temp)
@register('moco')
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, encoder, encoder_args, K=65536, m=0.999, T=0.07, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# feature embedding size is the output fc dimension
self.encoder_q = models.make(encoder, **encoder_args)
self.encoder_k = models.make(encoder, **encoder_args)
dim = self.encoder_q.out_dim
self.encoder = self.encoder_q # use encoder_q for downstream tasks
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0, batch_size # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1)
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
"""
batch_size = x.shape[0]
# random shuffle index
idx_shuffle = torch.randperm(batch_size).long().cuda()
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
return x[idx_shuffle], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
"""
return x[idx_unshuffle]
def forward(self, im_q, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
| 1,334
| 29
| 258
|
86930328405a67946a08f93b1eefb3fea203cf53
| 2,328
|
py
|
Python
|
bigtop-packages/src/charm/hive/layer-hive/tests/02-smoke-test.py
|
sekikn2/bigtop
|
f183d9430b7c8c98d475a379d6980b16bcbb6c0b
|
[
"Apache-2.0"
] | 371
|
2015-01-19T05:42:51.000Z
|
2022-03-27T14:46:52.000Z
|
bigtop-packages/src/charm/hive/layer-hive/tests/02-smoke-test.py
|
sekikn2/bigtop
|
f183d9430b7c8c98d475a379d6980b16bcbb6c0b
|
[
"Apache-2.0"
] | 491
|
2015-03-10T17:25:02.000Z
|
2022-03-30T12:22:44.000Z
|
bigtop-packages/src/charm/hive/layer-hive/tests/02-smoke-test.py
|
sekikn2/bigtop
|
f183d9430b7c8c98d475a379d6980b16bcbb6c0b
|
[
"Apache-2.0"
] | 395
|
2015-01-02T20:53:01.000Z
|
2022-03-21T08:49:08.000Z
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Hadoop/Hive deployment and smoke test for the Apache Bigtop Hive service.
"""
@classmethod
def test_hive(self):
"""
Validate Hive by running the smoke-test action.
"""
uuid = self.hive.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Hive smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
| 38.163934
| 81
| 0.672251
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Hadoop/Hive deployment and smoke test for the Apache Bigtop Hive service.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('hive')
cls.d.add('namenode', 'hadoop-namenode')
cls.d.add('plugin', 'hadoop-plugin')
cls.d.add('resourcemanager', 'hadoop-resourcemanager')
cls.d.add('slave', 'hadoop-slave')
cls.d.relate('hive:hadoop', 'plugin:hadoop-plugin')
cls.d.relate('plugin:namenode', 'namenode:namenode')
cls.d.relate('plugin:resourcemanager', 'resourcemanager:resourcemanager')
cls.d.relate('resourcemanager:namenode', 'namenode:namenode')
cls.d.relate('slave:namenode', 'namenode:datanode')
cls.d.relate('slave:resourcemanager', 'resourcemanager:nodemanager')
cls.d.setup(timeout=3600)
cls.d.sentry.wait_for_messages({'hive': re.compile('ready')},
timeout=3600)
cls.hive = cls.d.sentry['hive'][0]
def test_hive(self):
"""
Validate Hive by running the smoke-test action.
"""
uuid = self.hive.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Hive smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
| 888
| 0
| 26
|
3bfe0be62167e97ac097f4e3fa534d9db53cec4b
| 1,887
|
py
|
Python
|
BlueKumquatAutoDiff/tests/test_simpleautodiff.py
|
cs107-blue-kumquat/cs107-FinalProject
|
8fba44103ca0c48969b712de8bd8ac0e80ec3806
|
[
"MIT"
] | null | null | null |
BlueKumquatAutoDiff/tests/test_simpleautodiff.py
|
cs107-blue-kumquat/cs107-FinalProject
|
8fba44103ca0c48969b712de8bd8ac0e80ec3806
|
[
"MIT"
] | 10
|
2021-11-18T14:56:04.000Z
|
2021-12-11T23:23:47.000Z
|
BlueKumquatAutoDiff/tests/test_simpleautodiff.py
|
cs107-blue-kumquat/cs107-FinalProject
|
8fba44103ca0c48969b712de8bd8ac0e80ec3806
|
[
"MIT"
] | 1
|
2021-11-18T08:38:35.000Z
|
2021-11-18T08:38:35.000Z
|
import pytest
from BlueKumquatAutoDiff.autodiff import *
| 29.030769
| 69
| 0.585586
|
import pytest
from BlueKumquatAutoDiff.autodiff import *
def test_init():
dict_val = {'x': 1,'y': 2}
list_functs = ['x * 8', 'x*y']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
dict_val = {'x': 1}
list_functs = ['x * 8', 2]
with pytest.raises(TypeError):
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
def test_elem_func():
dict_val = {'x': 0.2}
list_functs = ['log(x)', 'sqrt(x)', 'exp(x)',
'sin(x)', 'cos(x)', 'tan(x)',
'arcsin(x)', 'arccos(x)', 'arctan(x)',
'sinh(x)', 'cosh(x)', 'tanh(x)',
'sigmoid(x)']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
def test_elem_func_plus():
dict_val = {'x': 0.2, 'y':0.5}
list_functs = ['log(x+y)', 'sqrt(x+y)', 'exp(x+y)',
'sin(x+y)', 'cos(x+y)', 'tan(x+y)',
'arcsin(x+y)', 'arccos(x+y)', 'arctan(x+y)',
'sinh(x+y)', 'cosh(x+y)', 'tanh(x+y)',
'sigmoid(x+y)']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
def test_elem_func_times():
dict_val = {'x': 0.2, 'y':0.5}
list_functs = ['log(x*y)', 'sqrt(x*y)', 'exp(x*y)',
'sin(x*y)', 'cos(x*y)', 'tan(x*y)',
'arcsin(x*y)', 'arccos(x*y)', 'arctan(x*y)',
'sinh(x*y)', 'cosh(x*y)', 'tanh(x*y)',
'sigmoid(x*y)']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
def test_repr():
dict_val = {'x': 1}
list_functs = ['x * 8']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
temp = "---AutoDifferentiation---\n" \
"Value: {'x': 1}\n\n" \
"Function 1: \nExpression = x * 8\nValue = 8\nGradient = [8.]\n\n"
assert repr(auto_diff_test) == temp
def test_str():
dict_val = {'x': 1}
list_functs = ['x * 8']
auto_diff_test = SimpleAutoDiff(dict_val, list_functs)
temp = "---AutoDifferentiation---\n" \
"Value: {'x': 1}\n\n" \
"Function 1: \nExpression = x * 8\nValue = 8\nGradient = [8.]\n\n"
assert str(auto_diff_test) == temp
| 1,687
| 0
| 138
|
2c283e5675178b2b85b5f4e137d34e9bb198f24c
| 1,512
|
py
|
Python
|
speed_friending_matcher/core/person.py
|
machinekoder/speed-friending-matcher
|
c1d63c44e198497662c561342b64f597cb8623e9
|
[
"MIT"
] | 4
|
2018-05-04T15:44:09.000Z
|
2019-03-17T04:54:20.000Z
|
speed_friending_matcher/core/person.py
|
machinekoder/speed-friending-and-dating-matcher
|
c1d63c44e198497662c561342b64f597cb8623e9
|
[
"MIT"
] | 12
|
2017-10-21T10:42:28.000Z
|
2019-02-27T10:33:04.000Z
|
speed_friending_matcher/core/person.py
|
machinekoder/speed-friending-matcher
|
c1d63c44e198497662c561342b64f597cb8623e9
|
[
"MIT"
] | 8
|
2018-01-02T10:14:19.000Z
|
2022-01-11T03:17:04.000Z
|
# -*- coding: utf-8 -*-
from aenum import Flag
from .results import Results
| 20.432432
| 70
| 0.578042
|
# -*- coding: utf-8 -*-
from aenum import Flag
from .results import Results
class MatchingFlags(Flag):
no_flags = 0
match_all = 1
class Person(object):
def __init__(
self,
name,
number,
marked_numbers,
flags=MatchingFlags.no_flags,
email=None,
phone=None,
):
if type(flags) is not MatchingFlags:
raise TypeError('Must use MatchingOptions')
if type(marked_numbers) is not set:
raise TypeError('marked_numbers must be a set')
self._number = number
self._name = name
self._email = email
self._phone = phone
self._flags = flags
self._marked_numbers = marked_numbers
self._matches = set()
self._results = Results()
@property
def number(self):
return self._number
@property
def name(self):
return self._name
@property
def email(self):
return self._email
@property
def phone(self):
return self._phone
@property
def matches(self):
return self._matches
@property
def marked_numbers(self):
return self._marked_numbers
@property
def flags(self):
return self._flags
@property
def results(self):
return self._results
def __repr__(self):
return 'Person (name: %s, number: %s, marked_numbers: %s)' % (
self._name,
self._number,
self._marked_numbers,
)
| 967
| 421
| 46
|
569055396f68e7bd133e087451933a357b5e7f8c
| 689
|
py
|
Python
|
neura_hs/core/management/commands/update_db.py
|
ysaron/NeuraHS
|
46b77feba459ef04c9adaebb2af11d6c0b5c9c87
|
[
"MIT"
] | null | null | null |
neura_hs/core/management/commands/update_db.py
|
ysaron/NeuraHS
|
46b77feba459ef04c9adaebb2af11d6c0b5c9c87
|
[
"MIT"
] | null | null | null |
neura_hs/core/management/commands/update_db.py
|
ysaron/NeuraHS
|
46b77feba459ef04c9adaebb2af11d6c0b5c9c87
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
import time
from core.services.update import Updater
| 32.809524
| 93
| 0.669086
|
from django.core.management.base import BaseCommand
import time
from core.services.update import Updater
class Command(BaseCommand):
help = 'Update the card database'
def add_arguments(self, parser):
parser.add_argument('-r', '--rewrite', action='store_true', help='Rewrite all cards')
def handle(self, *args, **options):
start = time.perf_counter()
upd = Updater(self.stdout.write, rewrite=options['rewrite'])
upd.update()
end = time.perf_counter()
self.stdout.write(f'Database update took {end - start:.2f}s')
self.stdout.write('Renders need to be updated:')
self.stdout.write(' '.join(upd.to_be_updated))
| 461
| 98
| 23
|
a6d758ca7000afd5d7e09cef42c9adc4d7d8629e
| 347
|
py
|
Python
|
tests/python/test_nltk.py
|
lcnja/modulefiles-1
|
2bbaf48089ef55f0a8247e6c365103f2b852feec
|
[
"Apache-2.0"
] | 6
|
2016-03-17T13:59:43.000Z
|
2021-07-08T06:15:33.000Z
|
tests/python/test_nltk.py
|
lcnja/modulefiles-1
|
2bbaf48089ef55f0a8247e6c365103f2b852feec
|
[
"Apache-2.0"
] | null | null | null |
tests/python/test_nltk.py
|
lcnja/modulefiles-1
|
2bbaf48089ef55f0a8247e6c365103f2b852feec
|
[
"Apache-2.0"
] | 2
|
2016-08-10T16:04:15.000Z
|
2016-10-13T17:38:24.000Z
|
import nltk
import sys
sentence = """At eight o'clock on Thursday morning Arthur didn't feel very good."""
tokens = nltk.word_tokenize(sentence)
if tokens != ['At', 'eight', "o'clock", 'on', 'Thursday', 'morning',
'Arthur', 'did', "n't", 'feel', 'very', 'good', '.']:
sys.stderr.write("Error in tokenization")
sys.exit(1)
| 31.545455
| 83
| 0.613833
|
import nltk
import sys
sentence = """At eight o'clock on Thursday morning Arthur didn't feel very good."""
tokens = nltk.word_tokenize(sentence)
if tokens != ['At', 'eight', "o'clock", 'on', 'Thursday', 'morning',
'Arthur', 'did', "n't", 'feel', 'very', 'good', '.']:
sys.stderr.write("Error in tokenization")
sys.exit(1)
| 0
| 0
| 0
|
82a5222a023194785deed9788f9a013c3fa11c0a
| 84
|
py
|
Python
|
vyperlogix/products/data.py
|
raychorn/chrome_gui
|
f1fade70b61af12ee43c55c075aa9cfd32caa962
|
[
"CC0-1.0"
] | 1
|
2020-09-29T01:36:33.000Z
|
2020-09-29T01:36:33.000Z
|
vyperlogix/products/data.py
|
raychorn/chrome_gui
|
f1fade70b61af12ee43c55c075aa9cfd32caa962
|
[
"CC0-1.0"
] | null | null | null |
vyperlogix/products/data.py
|
raychorn/chrome_gui
|
f1fade70b61af12ee43c55c075aa9cfd32caa962
|
[
"CC0-1.0"
] | null | null | null |
import os
_data_path_prefix = lambda name:os.sep.join(['www.VyperLogix.com',name])
| 21
| 72
| 0.761905
|
import os
_data_path_prefix = lambda name:os.sep.join(['www.VyperLogix.com',name])
| 0
| 0
| 0
|
fb5560a1846af690e1d1431d238f8f3bb488cacc
| 1,139
|
py
|
Python
|
setup.py
|
ideonate/sammy
|
8a8b9fae28899429c1ad1e51e4ff5d6f7f6047ec
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ideonate/sammy
|
8a8b9fae28899429c1ad1e51e4ff5d6f7f6047ec
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ideonate/sammy
|
8a8b9fae28899429c1ad1e51e4ff5d6f7f6047ec
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
import pkg_resources
from setuptools import setup, find_packages
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
version = '0.4.2'
setup(
name='sammy',
version=version,
description="Python library for generating AWS SAM "
"(Serverless Application Model) templates with validation.",
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only"
],
keywords='serverless, cloudformation, sam',
author='Brian Jinwright',
author_email='opensource@ipoots.com',
maintainer='Brian Jinwright',
packages=find_packages(),
url='https://github.com/capless/sammy',
license='GNU General Public License v3.0',
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
)
| 31.638889
| 76
| 0.67691
|
import pathlib
import pkg_resources
from setuptools import setup, find_packages
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
version = '0.4.2'
setup(
name='sammy',
version=version,
description="Python library for generating AWS SAM "
"(Serverless Application Model) templates with validation.",
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only"
],
keywords='serverless, cloudformation, sam',
author='Brian Jinwright',
author_email='opensource@ipoots.com',
maintainer='Brian Jinwright',
packages=find_packages(),
url='https://github.com/capless/sammy',
license='GNU General Public License v3.0',
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
)
| 0
| 0
| 0
|
41f3cc563c60d973d5785841494c8b5887325556
| 1,241
|
py
|
Python
|
iii_reproducibility/mlflow_tracking_example.py
|
production-ml-book-team/prod-ml-book
|
b6886f5f2a79ddac9accf6b056041deb08e47217
|
[
"MIT"
] | null | null | null |
iii_reproducibility/mlflow_tracking_example.py
|
production-ml-book-team/prod-ml-book
|
b6886f5f2a79ddac9accf6b056041deb08e47217
|
[
"MIT"
] | null | null | null |
iii_reproducibility/mlflow_tracking_example.py
|
production-ml-book-team/prod-ml-book
|
b6886f5f2a79ddac9accf6b056041deb08e47217
|
[
"MIT"
] | 1
|
2020-03-01T06:04:02.000Z
|
2020-03-01T06:04:02.000Z
|
import numpy
import sklearn.naive_bayes
import sklearn.feature_extraction.text
import sklearn.pipeline
# New additions
import mlflow.sklearn
mlflow.set_tracking_uri("http://atrium.datmo.com")
mlflow.set_experiment("training_module")
...
train_and_evaluate_model()
| 33.540541
| 91
| 0.691378
|
import numpy
import sklearn.naive_bayes
import sklearn.feature_extraction.text
import sklearn.pipeline
# New additions
import mlflow.sklearn
mlflow.set_tracking_uri("http://atrium.datmo.com")
mlflow.set_experiment("training_module")
...
def train_and_evaluate_model():
with mlflow.start_run():
# Load dataset:
docs, labels = load_labeled_data_set()
train_docs, train_labels, test_docs, test_labels = partition_data_set(docs, labels)
# Train classifier:
mlflow.log_param('classifier', "naive bayes")
mlflow.log_param('code commit id', version)
classifier = sklearn.pipeline.Pipeline([
("vect", sklearn.feature_extraction.text.CountVectorizer()),
("tfidf", sklearn.feature_extraction.text.TfidfTransformer()),
("clf", sklearn.naive_bayes.MultinomialNB()),
])
classifier.fit(train_docs, train_labels)
# Evaluate classifier:
predicted_labels = classifier.predict(test_docs)
accuracy = numpy.mean(predicted_labels == test_labels)
print("Accuracy = %s" % (accuracy,))
mlflow.log_metric('accuracy', accuracy)
mlflow.sklearn.log_model(classifier, "model")
train_and_evaluate_model()
| 952
| 0
| 23
|
a669c97df041bcfe9de6ff7b4f23af94ace2cc3f
| 811
|
py
|
Python
|
dice_spider_2/spider/test_mysql_connector.py
|
guanxin0206/dice_crawler
|
0b929b49911dc92d718905b2e1112c91e15ef2cd
|
[
"BSD-2-Clause"
] | 1
|
2017-04-05T23:34:06.000Z
|
2017-04-05T23:34:06.000Z
|
dice_spider_2/spider/test_mysql_connector.py
|
guanxin0206/dice_crawler
|
0b929b49911dc92d718905b2e1112c91e15ef2cd
|
[
"BSD-2-Clause"
] | null | null | null |
dice_spider_2/spider/test_mysql_connector.py
|
guanxin0206/dice_crawler
|
0b929b49911dc92d718905b2e1112c91e15ef2cd
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Created on Jun 14, 2017
@author: xinguan
'''
# import mysql.connector
import mysql.connector
create_dice_jobs = (
"CREATE TABLE IF NOT EXISTS `dice_jobs` ("
" `job_unique_id` varchar(50) NOT NULL,"
" `job_title` text NOT NULL,"
" `job_url` text NOT NULL,"
" `company` text NOT NULL,"
" `post_date` date NOT NULL,"
" `job_description` text NOT NULL,"
" PRIMARY KEY (`job_unique_id`)"
") ENGINE=InnoDB")
cnx = mysql.connector.connect(user='root', password='u6a3pwhe',
host='127.0.0.1',
database='dice_test')
cursor = cnx.cursor()
try:
cursor.execute(create_dice_jobs)
cnx.commit()
except mysql.connector.Error as err:
print err
cnx.rollback()
finally:
cursor.close()
cnx.close()
| 24.575758
| 63
| 0.604192
|
'''
Created on Jun 14, 2017
@author: xinguan
'''
# import mysql.connector
import mysql.connector
create_dice_jobs = (
"CREATE TABLE IF NOT EXISTS `dice_jobs` ("
" `job_unique_id` varchar(50) NOT NULL,"
" `job_title` text NOT NULL,"
" `job_url` text NOT NULL,"
" `company` text NOT NULL,"
" `post_date` date NOT NULL,"
" `job_description` text NOT NULL,"
" PRIMARY KEY (`job_unique_id`)"
") ENGINE=InnoDB")
cnx = mysql.connector.connect(user='root', password='u6a3pwhe',
host='127.0.0.1',
database='dice_test')
cursor = cnx.cursor()
try:
cursor.execute(create_dice_jobs)
cnx.commit()
except mysql.connector.Error as err:
print err
cnx.rollback()
finally:
cursor.close()
cnx.close()
| 0
| 0
| 0
|
36c3ae068b1b22355b24edc20d0a52286b45a4ff
| 1,922
|
py
|
Python
|
src/utils.py
|
showmethecoin/upbit-trader
|
f29f0e6074c4cc2296ca098f7d53cac5dd780881
|
[
"Apache-2.0"
] | 8
|
2021-04-30T08:30:21.000Z
|
2022-03-08T13:35:36.000Z
|
src/utils.py
|
showmethecoin/upbit-trader
|
f29f0e6074c4cc2296ca098f7d53cac5dd780881
|
[
"Apache-2.0"
] | 4
|
2021-04-30T09:39:35.000Z
|
2021-05-08T15:24:08.000Z
|
src/utils.py
|
showmethecoin/upbit-trader
|
f29f0e6074c4cc2296ca098f7d53cac5dd780881
|
[
"Apache-2.0"
] | 4
|
2021-11-24T06:53:40.000Z
|
2022-03-21T04:20:31.000Z
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import asyncio as aio
from multiprocessing import set_start_method
def get_logger(print_format: str = '[%(asctime)s.%(msecs)03d: %(levelname).1s %(filename)s:%(lineno)s] %(message)s',
date_format: str = '%Y-%m-%d %H:%M:%S',
print: bool = True,
save: bool = True,
save_path: str = 'upbit-trader.log'):
''' Logger Configuration'''
log = logging.getLogger()
# Setup logger level
log.setLevel(logging.INFO)
# Setup logger format
formatter = logging.Formatter(fmt=print_format, datefmt=date_format)
# Setup logger handler
if print:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if save:
if save_path == 'upbit-trader.log' and not sys.platform.startswith('win'):
file_handler = logging.FileHandler('upbit-trader.log')
else:
file_handler = logging.FileHandler(save_path)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
return log
| 34.945455
| 116
| 0.664412
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import asyncio as aio
from multiprocessing import set_start_method
def get_logger(print_format: str = '[%(asctime)s.%(msecs)03d: %(levelname).1s %(filename)s:%(lineno)s] %(message)s',
date_format: str = '%Y-%m-%d %H:%M:%S',
print: bool = True,
save: bool = True,
save_path: str = 'upbit-trader.log'):
''' Logger Configuration'''
log = logging.getLogger()
# Setup logger level
log.setLevel(logging.INFO)
# Setup logger format
formatter = logging.Formatter(fmt=print_format, datefmt=date_format)
# Setup logger handler
if print:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if save:
if save_path == 'upbit-trader.log' and not sys.platform.startswith('win'):
file_handler = logging.FileHandler('upbit-trader.log')
else:
file_handler = logging.FileHandler(save_path)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
return log
def get_file_path(filename: str):
if getattr(sys, "frozen", False):
# The application is frozen.
datadir = os.path.dirname(sys.executable)
else:
# The application is not frozen.
datadir = os.path.dirname(__file__)
return os.path.join(datadir, filename)
def set_windows_selector_event_loop_global():
# NOTE Windows 운영체제 환경에서 Python 3.7+부터 발생하는 EventLoop RuntimeError 관련 처리
py_ver = int(f"{sys.version_info.major}{sys.version_info.minor}")
if py_ver > 37 and sys.platform.startswith('win'):
aio.set_event_loop_policy(aio.WindowsSelectorEventLoopPolicy())
def set_multiprocessing_context():
if sys.platform == 'darwin' and getattr(sys, "frozen", False):
set_start_method('fork')
| 719
| 0
| 69
|
4632d1de447aed60d67507867bf7386bdd3879cb
| 1,001
|
py
|
Python
|
multivitamin/applications/utils.py
|
keishinkickback/multivitamin
|
edc49267fc5a25182da0e7c7c8bba398225437e5
|
[
"Apache-2.0"
] | 8
|
2019-05-08T20:27:41.000Z
|
2021-04-19T15:17:22.000Z
|
multivitamin/applications/utils.py
|
keishinkickback/multivitamin
|
edc49267fc5a25182da0e7c7c8bba398225437e5
|
[
"Apache-2.0"
] | 9
|
2019-05-17T19:16:50.000Z
|
2022-03-11T23:46:55.000Z
|
multivitamin/applications/utils.py
|
keishinkickback/multivitamin
|
edc49267fc5a25182da0e7c7c8bba398225437e5
|
[
"Apache-2.0"
] | 4
|
2019-05-07T18:00:51.000Z
|
2019-06-22T02:35:51.000Z
|
import os
def load_idmap(idmap_file):
"""Load tab-separated idmap file containing label index and label string
Args:
idmap_file (str): filepath to idmap
Returns:
dict: labelmap (key=index, value=string)
"""
if not os.path.exists(idmap_file):
raise FileExistsError(idmap_file)
labelmap = {}
with open(idmap_file, "r") as rf:
for row in rf:
row = row.split("\t")
labelmap[int(row[0])] = row[1].strip()
return labelmap
| 26.342105
| 76
| 0.625375
|
import os
def load_idmap(idmap_file):
"""Load tab-separated idmap file containing label index and label string
Args:
idmap_file (str): filepath to idmap
Returns:
dict: labelmap (key=index, value=string)
"""
if not os.path.exists(idmap_file):
raise FileExistsError(idmap_file)
labelmap = {}
with open(idmap_file, "r") as rf:
for row in rf:
row = row.split("\t")
labelmap[int(row[0])] = row[1].strip()
return labelmap
def load_label_prototxt(prototxt_file):
from google.protobuf import text_format
from caffe.proto import caffe_pb2 as cpb2
with open(prototxt_file) as f:
labelmap_aux = cpb2.LabelMap()
text_format.Merge(str(f.read()), labelmap_aux)
num_labels = len(labelmap_aux.item)
labelmap = {}
for item in labelmap_aux.item:
index = item.label
label = item.display_name
labelmap[index] = label
return labelmap
| 468
| 0
| 23
|
c43cbd4a0728073a28c019995d9c2e09b5784273
| 5,055
|
py
|
Python
|
gracebot/tests/test_VOEvent.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | 7
|
2019-05-16T20:08:11.000Z
|
2021-10-07T03:15:00.000Z
|
gracebot/tests/test_VOEvent.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | 24
|
2019-07-07T06:14:12.000Z
|
2021-09-21T18:50:50.000Z
|
gracebot/tests/test_VOEvent.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from unittest.mock import Mock, patch
import pytest
from pytest import approx
from functions import mpc_to_mly
from voevent import VOEventFromXml, VOEventFromEventId
import tests.voevent_test_data as test_data
import ligo
from ligo.gracedb.exceptions import HTTPError
@patch("ligo.gracedb.rest.GraceDb.voevents")
@patch("ligo.gracedb.rest.GraceDb.get")
@patch("ligo.gracedb.rest.GraceDb.get")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("event_id")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("mock_event_file")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("real_event_file")
| 27.774725
| 88
| 0.680317
|
from unittest import TestCase
from unittest.mock import Mock, patch
import pytest
from pytest import approx
from functions import mpc_to_mly
from voevent import VOEventFromXml, VOEventFromEventId
import tests.voevent_test_data as test_data
import ligo
from ligo.gracedb.exceptions import HTTPError
@patch("ligo.gracedb.rest.GraceDb.voevents")
def test_getting_event_from_event_id(mock_get):
mock_get.return_value = Mock(ok=True)
expected_event_json = test_data.expected_event_json
mock_get.return_value.json.return_value = expected_event_json
voevent = VOEventFromEventId()
actual_event_json = voevent._get_voevents_json("S190521r")
assert actual_event_json == expected_event_json["voevents"]
def test_sort_voevent_newest_should_be_first():
voevent = VOEventFromEventId()
unsorted_json = test_data.unsorted_json_S190521r
actual_json = voevent._sort_voevents_newest_first(unsorted_json)
assert actual_json == test_data.sorted_json_S190521r
@patch("ligo.gracedb.rest.GraceDb.get")
def test_get_latest_voevent(mock_get):
mock_get.return_value = Mock(ok=True)
mock_get.side_effect = load_data_S190521r
voevent = VOEventFromEventId()
result = voevent._try_get_latest_voevent(test_data.sorted_json_S190521r)
expected = test_data.read_xml("gracebot/tests/data/S190521r-2-Initial.xml")
assert result == expected
def load_data_S190521r(event_url):
return test_data.voevents_S190521r[event_url]
@patch("ligo.gracedb.rest.GraceDb.get")
def test_get_latest_voevent_should_raise_http_error_on_latest_voevent_and_get_older_one(
mock_get
):
mock_get.return_value = Mock(ok=True)
mock_get.side_effect = (
HTTPError(status=400, reason="Bad Request", message="Bad Request"),
test_data.read_xml("gracebot/tests/data/S190517h-2-Initial.xml"),
)
voevent = VOEventFromEventId()
result = voevent._try_get_latest_voevent(test_data.sorted_json_S190517h)
expected = test_data.read_xml("gracebot/tests/data/S190517h-2-Initial.xml")
assert result == expected
@pytest.fixture(scope="class")
def event_id(request):
with patch.object(
VOEventFromEventId,
"_get_voevents_json",
return_value=test_data.unsorted_json_S190521r,
):
with patch.object(
ligo.gracedb.rest.GraceDb,
"get",
return_value="gracebot/tests/data/S190521r-2-Initial.xml",
):
voe = VOEventFromEventId()
voe.get("S190521r")
request.cls.voe = voe
yield
@pytest.mark.usefixtures("event_id")
class TestFromEventId(TestCase):
def test_distance(self):
assert self.voe.distance == approx(mpc_to_mly(1136.13018), abs=1e-4)
def test_distance_std(self):
assert self.voe.distance_std == approx(mpc_to_mly(279.257795), abs=1e-4)
def test_id(self):
assert self.voe.id.lower() == "s190521r"
def test_short_names(self):
assert self.voe.seen_by_short == ["H1", "L1"]
def test_long_names(self):
assert self.voe.seen_by_long == ["Hanford", "Livingston"]
def test_p_astro(self):
expected = {
"BNS": 0.0,
"NSBH": 0.0,
"BBH": 0.9993323440548098,
"MassGap": 0.0,
"Terrestrial": 0.0006676559451902493,
}
assert self.voe.p_astro == approx(expected, abs=1e-5)
@pytest.fixture(scope="class")
def mock_event_file(request):
voe = VOEventFromXml()
voe.get("gracebot/tests/data/MS181101ab-1-Preliminary.xml")
request.cls.voe = voe
yield
@pytest.mark.usefixtures("mock_event_file")
class EventFromMockEvent(TestCase):
def test_distance(self):
assert self.voe.distance == approx(mpc_to_mly(39.7699960), abs=1e-4)
def test_distance_std(self):
assert self.voe.distance_std == approx(mpc_to_mly(8.30843505), abs=1e-4)
def test_id(self):
assert self.voe.id.lower() == "s190602aq"
def test_p_astro(self):
expected = {
"BNS": 0.95,
"NSBH": 0.01,
"BBH": 0.03,
"MassGap": 0.0,
"Terrestrial": 0.01,
}
assert self.voe.p_astro == approx(expected, abs=1e-5)
@pytest.fixture(scope="class")
def real_event_file(request):
voe = VOEventFromXml()
voe.get("gracebot/tests/data/S190701ah-3-Update.xml")
request.cls.voe = voe
yield
@pytest.mark.usefixtures("real_event_file")
class TestFromRealEventFile(TestCase):
def test_distance(self):
assert self.voe.distance == approx(mpc_to_mly(1848.9383223), abs=1e-4)
def test_distance_std(self):
assert self.voe.distance_std == approx(mpc_to_mly(445.5334849617994), abs=1e-4)
def test_id(self):
assert self.voe.id.lower() == "s190701ah"
def test_p_astro(self):
expected = {
"BNS": 0.0,
"NSBH": 0.0,
"BBH": 0.9343726,
"MassGap": 0.0,
"Terrestrial": 0.0656274,
}
assert self.voe.p_astro == approx(expected, abs=1e-5)
| 3,731
| 42
| 619
|
942215801e0b0bade4c9c10887697c879a9d34b9
| 15,908
|
py
|
Python
|
AccountPickup/tests/test_views.py
|
hhauer/myinfo
|
210a2caeff570650462f8774385aa9223b3bc858
|
[
"MIT"
] | 2
|
2015-08-04T21:17:09.000Z
|
2019-11-16T23:41:56.000Z
|
AccountPickup/tests/test_views.py
|
hhauer/myinfo
|
210a2caeff570650462f8774385aa9223b3bc858
|
[
"MIT"
] | null | null | null |
AccountPickup/tests/test_views.py
|
hhauer/myinfo
|
210a2caeff570650462f8774385aa9223b3bc858
|
[
"MIT"
] | 2
|
2020-11-03T15:45:04.000Z
|
2021-08-30T11:29:44.000Z
|
__author__ = 'Justin McClure'
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from random import choice
from lib.api_calls import APIException
# Note: Wait view will probably be removed in the future
| 43.583562
| 109
| 0.651622
|
__author__ = 'Justin McClure'
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from random import choice
from lib.api_calls import APIException
class AccountPickupViewsTestCase(TestCase):
fixtures = ['AccountPickup_views_test_data.json']
HOST = 'testserver'
INDEX = reverse('AccountPickup:index')
AUP = reverse('AccountPickup:aup')
ODIN = reverse('AccountPickup:odin')
ALIAS = reverse('AccountPickup:alias')
CONTACT = reverse('AccountPickup:contact_info')
NEXT = reverse('AccountPickup:next_step')
WAIT = reverse('AccountPickup:wait_for_provisioning')
PICK = reverse('MyInfo:pick_action')
RAND_IP = ["127.0.0." + str(i) for i in range(1, 256)]
class APIndexTestCase(AccountPickupViewsTestCase):
def test_index_get(self):
r = self.client.get(self.INDEX)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertFalse(r.context['form'].is_bound)
self.assertIn('error', r.context)
self.assertEqual(r.context['error'], '')
self.assertNotIn('_auth_user_id', self.client.session)
def test_index_post(self):
# test bad form input
form = {'birth_date': '01/02/1903', 'auth_pass': 'password1'}
r = self.client.post(self.INDEX, data=form)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertFalse(r.context['form'].is_valid())
self.assertIn('error', r.context)
self.assertEqual(r.context['error'], '')
self.assertNotIn('_auth_user_id', self.client.session)
# Test known bad user stub
form['id_number'] = '000000000'
r = self.client.post(self.INDEX, data=form)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertTrue(r.context['form'].is_valid())
self.assertIn('error', r.context)
self.assertNotEqual(r.context['error'], '')
self.assertNotIn('_auth_user_id', self.client.session)
# Test good stub
form['id_number'] = '123456789'
r = self.client.post(self.INDEX, data=form)
self.assertIn('_auth_user_id', self.client.session)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# Test session flushing
s = self.client.session.session_key
_ = self.client.post(self.INDEX, data=form)
self.assertNotEqual(self.client.session.session_key, s)
class APLoggedInTestCase(AccountPickupViewsTestCase):
login_data = {}
@classmethod
def setUpTestData(cls):
super(APLoggedInTestCase, cls).setUpTestData()
cls.class_client = Client(REMOTE_ADDR=choice(cls.RAND_IP))
_ = cls.class_client.post(cls.INDEX, data=cls.login_data, follow=True)
def setUp(self):
self.assertIn('identity', self.class_client.session)
self.client = self.class_client
class APAupTestCase(APLoggedInTestCase):
login_data = {'id_number': '111111111', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
def test_aup_next(self):
# test no agreement
r = self.client.get(self.NEXT, follow=True)
self.assertRedirects(r, self.AUP, host=self.HOST)
# test outdated agreement
c = Client(REMOTE_ADD=choice(self.RAND_IP))
data = {'id_number': '111111112', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = c.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.AUP, host=self.HOST)
def test_aup_get(self):
# Test get
r = self.client.get(self.AUP)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertFalse(r.context['form'].is_bound)
def test_aup_post(self):
# Test bad input
r = self.client.post(self.AUP, {'accepted': False})
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertFalse(r.context['form'].is_valid())
# Test good input
r = self.client.post(self.AUP, {'accepted': True})
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# test forwarding for already completed
r = self.client.get(self.AUP)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
class APOdinTestCase(APLoggedInTestCase):
login_data = {'id_number': '222222222', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
def test_odin_next(self):
r = self.client.get(self.NEXT, follow=True)
self.assertRedirects(r, self.ODIN, host=self.HOST)
def test_odin_get(self):
# Test get
r = self.client.get(self.ODIN)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertFalse(r.context['form'].is_bound)
def test_odin_post(self):
# Test bad input
r = self.client.post(self.ODIN, {'name': '9001'})
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertFalse(r.context['form'].is_valid())
# Test good input
r = self.client.post(self.ODIN, {'name': '1'})
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# Test forwarding if already completed
r = self.client.get(self.ODIN)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
def test_odin_api_fail(self):
# Truename down
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '000000001', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
_ = self.client.post(self.INDEX, data=data, follow=True)
self.assertRaises(APIException, self.client.get, self.ODIN)
# IIQ down
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data['id_number'] = '000000002'
_ = self.client.post(self.INDEX, data=data, follow=True)
data = {'name': '0'}
self.assertRaises(APIException, self.client.post, self.ODIN, data=data)
class APAliasTestCase(APLoggedInTestCase):
login_data = {'id_number': '333333333', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
def test_alias_next(self):
r = self.client.get(self.NEXT, follow=True)
self.assertRedirects(r, self.ALIAS, host=self.HOST)
def test_alias_get(self):
# Test get
r = self.client.get(self.ALIAS)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertFalse(r.context['form'].is_bound)
def test_alias_post(self):
# Test bad input
r = self.client.post(self.ALIAS, {'alias': '9001'})
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertFalse(r.context['form'].is_valid())
self.assertNotIn('EMAIL_ALIAS', self.client.session['identity'])
# Test good input
r = self.client.post(self.ALIAS, {'alias': '1'})
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
self.assertIn('EMAIL_ALIAS', self.client.session['identity'])
# Test forwarding if step complete
r = self.client.get(self.ALIAS)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# Test 'No Alias' post
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '333333334', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.ALIAS, host=self.HOST)
r = self.client.post(self.ALIAS, {'alias': '0'})
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
self.assertNotIn('EMAIL_ALIAS', self.client.session['identity'])
# Test forwarding if step complete
r = self.client.get(self.ALIAS)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
def test_alias_api_fail(self):
# Truename down
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '000000001', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
_ = self.client.post(self.INDEX, data=data, follow=True)
self.assertRaises(APIException, self.client.get, self.ALIAS)
# IIQ down
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data['id_number'] = '000000002'
_ = self.client.post(self.INDEX, data=data, follow=True)
data = {'alias': '1'}
self.assertRaises(APIException, self.client.post, self.ALIAS, data=data)
def test_alias_skipping(self):
# No alias, first time through myinfo
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '000000005', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.ALIAS, host=self.HOST)
_ = self.client.post(self.ALIAS, data={'alias': '0'})
# No alias, subsequent visits
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, reverse('MyInfo:pick_action'), host=self.HOST)
# Alias exists, first time through myinfo
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '000000006', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, reverse('MyInfo:pick_action'), host=self.HOST)
class APContactTestCase(APLoggedInTestCase):
login_data = {'id_number': '444444444', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
def test_contact_next(self):
r = self.client.get(self.NEXT, follow=True)
self.assertRedirects(r, self.CONTACT, host=self.HOST)
def test_contact_get(self):
# Test get
r = self.client.get(self.CONTACT)
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertFalse(r.context['form'].is_bound)
def test_contact_post(self):
# Test bad input
r = self.client.post(self.CONTACT, {'foo': 'bar'})
self.assertEqual(r.status_code, 200)
self.assertIn('form', r.context)
self.assertTrue(r.context['form'].is_bound)
self.assertFalse(r.context['form'].is_valid())
# Test good input
data = {'alternate_email': 'email@test.com', 'cell_phone': '503-867-5309'}
r = self.client.post(self.CONTACT, data)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# Test forwarding if step complete
r = self.client.get(self.CONTACT)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# Test only one contact method input
# * Only email
# * * Login
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '444444445', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.CONTACT, host=self.HOST)
# * * Post
data = {'alternate_email': 'email@test.com'}
r = self.client.post(self.CONTACT, data)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# * * Test forwarding if step complete
r = self.client.get(self.CONTACT)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# * Only Phone
# * * Login
self.client = Client(REMOTE_ADDR=choice(self.RAND_IP))
data = {'id_number': '444444446', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.CONTACT, host=self.HOST)
# * * Post
data = {'cell_phone': '503-867-5309'}
r = self.client.post(self.CONTACT, data)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
# * * Test forwarding if step complete
r = self.client.get(self.CONTACT)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
class APWaitTestCase(AccountPickupViewsTestCase):
# Note: Wait view will probably be removed in the future
def test_wait(self):
data = {'id_number': '555555555', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True)
self.assertRedirects(r, self.WAIT, host=self.HOST)
# _ = self.client.get(self.WAIT, follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# self.assertRedirects(r, self.NEXT, host=self.HOST)
# Test already provisioned
data['id_number'] = '999999999'
r = self.client.post(self.INDEX, data=data)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
r = self.client.get(self.WAIT)
self.assertRedirects(r, self.NEXT, target_status_code=302, host=self.HOST)
class APNextTestCase(AccountPickupViewsTestCase):
def test_next_directory(self):
data = {'id_number': '666666666', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True, REMOTE_ADDR=choice(self.RAND_IP))
self.assertRedirects(r, reverse('MyInfo:set_directory'), host=self.HOST)
def test_next_password(self):
data = {'id_number': '777777777', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True, REMOTE_ADDR=choice(self.RAND_IP))
self.assertRedirects(r, reverse('MyInfo:set_password'), host=self.HOST)
def test_next_welcome(self):
data = {'id_number': '888888888', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True, REMOTE_ADDR=choice(self.RAND_IP))
# Welcome page should kill session
self.assertNotIn('_auth_user_id', self.client.session)
self.assertRedirects(r, reverse('MyInfo:welcome_landing'), host=self.HOST)
def test_next_complete(self):
data = {'id_number': '999999999', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
r = self.client.post(self.INDEX, data=data, follow=True, REMOTE_ADDR=choice(self.RAND_IP))
self.assertRedirects(r, reverse('MyInfo:pick_action'), host=self.HOST)
def test_next_api_fail(self):
data = {'id_number': '000000003', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
ip = choice(self.RAND_IP)
self.assertRaises(APIException, self.client.post, self.INDEX, data=data, follow=True, REMOTE_ADDR=ip)
class APRateLimitTestCase(AccountPickupViewsTestCase):
def test_rate_limit(self):
self.client = Client(REMOTE_ADDR="127.0.1.1")
for _ in range(30):
self.client.get(self.INDEX)
r = self.client.get(self.INDEX, follow=True)
self.assertListEqual(r.redirect_chain, [])
data = {'id_number': '123456789', 'birth_date': '12/21/2012', 'auth_pass': 'Password1!'}
for _ in range(30):
self.client.post(self.INDEX, data)
r = self.client.get(self.INDEX, follow=True)
self.assertListEqual(r.redirect_chain, [])
r = self.client.post(self.INDEX, data, follow=True)
self.assertRedirects(r, reverse('rate_limited'), host=self.HOST)
| 13,530
| 1,653
| 472
|
12233affe5ac6a1b618d714a3c6910e493a5a18b
| 1,795
|
py
|
Python
|
scrape.py
|
djsnipa1/python-shit
|
b14eddd1f6bbf90063829ab85ed4bde7adb54e55
|
[
"MIT"
] | 3
|
2020-03-03T22:55:19.000Z
|
2021-04-26T02:39:41.000Z
|
scrape.py
|
djsnipa1/python-shit
|
b14eddd1f6bbf90063829ab85ed4bde7adb54e55
|
[
"MIT"
] | 2
|
2020-03-03T22:55:20.000Z
|
2020-04-08T12:33:15.000Z
|
scrape.py
|
djsnipa1/python-shit
|
b14eddd1f6bbf90063829ab85ed4bde7adb54e55
|
[
"MIT"
] | null | null | null |
import requests
import lxml.html
import json
# tutorial from An Intro to Web Scraping With lxml and Python – Python Tips
# https://pythontips.com/2018/06/20/an-intro-to-web-scraping-with-lxml-and-python/
# html = requests.get("https://www.beatport.com/genre/psy-trance/13/top-100")
html = requests.get("https://store.steampowered.com/explore/new/")
doc = lxml.html.fromstring(html.content)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')
doc = lxml.html.fromstring(html.content)
print(new_releases)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')[0]
titles = new_releases.xpath('.//div[@class="tab_item_name"]/text()')
print(titles)
prices = new_releases.xpath(
'.//div[@class="discount_final_price"]/text()')
print(prices)
# tags = new_releases.xpath('.//div[@class="tab_item_top_tags"]')
# total_tags = []
# for tag in tags:
# total_tags.append(tag.text_content())
#
# print(total_tags)
tags = [tag.text_content() for tag in new_releases.xpath(
'.//div[@class="tab_item_top_tags"]')]
tags = [tag.split(', ') for tag in tags]
print(tags)
platforms_div = new_releases.xpath('.//div[@class="tab_item_details"]')
total_platforms = []
for game in platforms_div:
temp = game.xpath('.//span[contains(@class, "platform_img")]')
platforms = [t.get('class').split(' ')[-1] for t in temp]
if 'hmd_separator' in platforms:
platforms.remove('hmd_separator')
total_platforms.append(platforms)
print(total_platforms)
output = []
for info in zip(titles, prices, tags, total_platforms):
resp = {}
resp['title'] = info[0]
resp['price'] = info[1]
resp['tags'] = info[2]
resp['platforms'] = info[3]
output.append(resp)
print(output)
with open('output.json', 'w') as outfile:
json.dump(output, outfile)
| 29.42623
| 82
| 0.693036
|
import requests
import lxml.html
import json
# tutorial from An Intro to Web Scraping With lxml and Python – Python Tips
# https://pythontips.com/2018/06/20/an-intro-to-web-scraping-with-lxml-and-python/
# html = requests.get("https://www.beatport.com/genre/psy-trance/13/top-100")
html = requests.get("https://store.steampowered.com/explore/new/")
doc = lxml.html.fromstring(html.content)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')
doc = lxml.html.fromstring(html.content)
print(new_releases)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')[0]
titles = new_releases.xpath('.//div[@class="tab_item_name"]/text()')
print(titles)
prices = new_releases.xpath(
'.//div[@class="discount_final_price"]/text()')
print(prices)
# tags = new_releases.xpath('.//div[@class="tab_item_top_tags"]')
# total_tags = []
# for tag in tags:
# total_tags.append(tag.text_content())
#
# print(total_tags)
tags = [tag.text_content() for tag in new_releases.xpath(
'.//div[@class="tab_item_top_tags"]')]
tags = [tag.split(', ') for tag in tags]
print(tags)
platforms_div = new_releases.xpath('.//div[@class="tab_item_details"]')
total_platforms = []
for game in platforms_div:
temp = game.xpath('.//span[contains(@class, "platform_img")]')
platforms = [t.get('class').split(' ')[-1] for t in temp]
if 'hmd_separator' in platforms:
platforms.remove('hmd_separator')
total_platforms.append(platforms)
print(total_platforms)
output = []
for info in zip(titles, prices, tags, total_platforms):
resp = {}
resp['title'] = info[0]
resp['price'] = info[1]
resp['tags'] = info[2]
resp['platforms'] = info[3]
output.append(resp)
print(output)
with open('output.json', 'w') as outfile:
json.dump(output, outfile)
| 0
| 0
| 0
|
8a70fe6634e3d7a0bb08218dc91468337036413b
| 92
|
py
|
Python
|
aenet/__init__.py
|
simaki/adaptive-enet
|
ecae8ad770b818c0ff51203e5a462e4dbf41beca
|
[
"BSD-3-Clause"
] | null | null | null |
aenet/__init__.py
|
simaki/adaptive-enet
|
ecae8ad770b818c0ff51203e5a462e4dbf41beca
|
[
"BSD-3-Clause"
] | 12
|
2021-01-27T01:43:40.000Z
|
2021-02-03T06:14:43.000Z
|
aenet/__init__.py
|
simaki/adaptive-enet
|
ecae8ad770b818c0ff51203e5a462e4dbf41beca
|
[
"BSD-3-Clause"
] | 1
|
2021-12-26T23:18:34.000Z
|
2021-12-26T23:18:34.000Z
|
# flake8: noqa
from .aen import AdaptiveElasticNet
from .aencv import AdaptiveElasticNetCV
| 18.4
| 39
| 0.826087
|
# flake8: noqa
from .aen import AdaptiveElasticNet
from .aencv import AdaptiveElasticNetCV
| 0
| 0
| 0
|
8963a0c9c703678e1b2a0261227cefe53355d690
| 1,331
|
py
|
Python
|
simianpy/analysis/detectfixations.py
|
jselvan/simianpy
|
5b2b162789e11bc89ca2179358ab682269e7df15
|
[
"MIT"
] | null | null | null |
simianpy/analysis/detectfixations.py
|
jselvan/simianpy
|
5b2b162789e11bc89ca2179358ab682269e7df15
|
[
"MIT"
] | null | null | null |
simianpy/analysis/detectfixations.py
|
jselvan/simianpy
|
5b2b162789e11bc89ca2179358ab682269e7df15
|
[
"MIT"
] | null | null | null |
from ..misc import binary_digitize
import numpy as np
import pandas as pd
| 35.972973
| 113
| 0.676183
|
from ..misc import binary_digitize
import numpy as np
import pandas as pd
def DetectFixations(eye_data, velocity_threshold=2, duration_threshold=None, sampling_rate=1e3, Filter=None):
if Filter is None:
velocity = eye_data.diff().abs()*sampling_rate
else:
velocity = (eye_data.apply(Filter).diff().abs()*sampling_rate).apply(Filter)
fix = (velocity < velocity_threshold).all(axis=1)
onset, offset = binary_digitize(fix)
onset, offset = fix.index[onset], fix.index[offset]
fixation_data = pd.DataFrame({
'onset': onset,
'offset': offset
})
fixation_data['duration_dt'] = fixation_data['offset'] - fixation_data['onset']
if duration_threshold is not None:
fixation_data = fixation_data[fixation_data['duration_dt'] > duration_threshold]
if hasattr(fixation_data['duration_dt'], 'dt') and hasattr(fixation_data['duration_dt'].dt, 'total_seconds'):
fixation_data['duration'] = fixation_data['duration_dt'].dt.total_seconds() * 1e3
if fixation_data.empty:
return fixation_data
fixation_data = fixation_data.join(
fixation_data.apply(
lambda fixation: eye_data.loc[slice(fixation.onset, fixation.offset),['eyeh','eyev']].mean(),
axis=1
)
)
return fixation_data
| 1,234
| 0
| 23
|
b4b4904010fb89d55f0da4c6c0ace8c62a596c76
| 680
|
py
|
Python
|
examples/reply.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/reply.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/reply.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | 1
|
2022-02-20T09:10:40.000Z
|
2022-02-20T09:10:40.000Z
|
# This example requires the `message_content` privileged intent for access to message content.
import discord
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)
client.run("TOKEN")
| 27.2
| 94
| 0.670588
|
# This example requires the `message_content` privileged intent for access to message content.
import discord
class MyClient(discord.Client):
async def on_ready(self):
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
async def on_message(self, message: discord.Message):
# Make sure we won't be replying to ourselves.
if message.author.id == self.user.id:
return
if message.content.startswith("!hello"):
await message.reply("Hello!", mention_author=True)
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)
client.run("TOKEN")
| 357
| 10
| 76
|
11bcae79dbe3d4ab0b00e4eb06f7e6d850ed2b07
| 1,486
|
py
|
Python
|
targetmodel.py
|
BXuan694/universalAdversarialPerturbation
|
ebca90f76b5d45715c98a1ff0b6f11df753b51c6
|
[
"BSD-2-Clause"
] | 38
|
2019-01-19T09:43:13.000Z
|
2022-01-05T09:47:02.000Z
|
targetmodel.py
|
BXuan694/universalAdversarialPerturbation
|
ebca90f76b5d45715c98a1ff0b6f11df753b51c6
|
[
"BSD-2-Clause"
] | 3
|
2020-02-24T05:56:35.000Z
|
2022-01-07T12:08:33.000Z
|
targetmodel.py
|
BXuan694/universalAdversarialPerturbation
|
ebca90f76b5d45715c98a1ff0b6f11df753b51c6
|
[
"BSD-2-Clause"
] | 10
|
2019-02-19T10:05:57.000Z
|
2021-06-07T08:02:36.000Z
|
import torch.nn as nn
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
from transform_file import cut
root='/home/wang/Dataset/Caltech256/'
#root='/media/this/02ff0572-4aa8-47c6-975d-16c3b8062013/Caltech256/'
| 29.137255
| 93
| 0.611036
|
import torch.nn as nn
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
from transform_file import cut
root='/home/wang/Dataset/Caltech256/'
#root='/media/this/02ff0572-4aa8-47c6-975d-16c3b8062013/Caltech256/'
def default_loader(path):
return Image.open(path).convert('RGB')
class MyDataset(Dataset):
def __init__(self, txt, transform=None, pert=np.zeros(1), loader=default_loader):
fh = open(txt, 'r')
imgs = []
for line in fh:
line = line.rstrip()
line = line.strip('\n')
line = line.rstrip()
words = line.split()
imgs.append((words[0],int(words[1])))
self.imgs = imgs
self.transform = transform
self.loader = loader
self.pert = pert
def __getitem__(self, index):
fn, label = self.imgs[index]
img = Image.fromarray(np.clip(cut(self.loader(fn))+self.pert,0,255).astype(np.uint8))
if self.transform is not None:
img = self.transform(img)
return img,label
def __len__(self):
return len(self.imgs)
class ResNet50_ft(nn.Module):
def __init__(self, model):
super(ResNet50_ft, self).__init__()
self.resnet_layer = nn.Sequential(*list(model.children())[:-1])
self.Linear_layer = nn.Linear(2048, 257)
def forward(self, x):
x = self.resnet_layer(x)
x = x.view(x.size(0), -1)
x = self.Linear_layer(x)
return x
| 1,032
| 12
| 202
|
0a58b4051a93fb17bcd48c758537da77b9670348
| 14,519
|
py
|
Python
|
appcenter/models.py
|
danielamitay/appcenter-rest-python
|
240f71ce290d54f79b4e0814e068da6986abb34f
|
[
"MIT"
] | 10
|
2019-10-28T13:33:46.000Z
|
2021-11-26T15:16:04.000Z
|
appcenter/models.py
|
danielamitay/appcenter-rest-python
|
240f71ce290d54f79b4e0814e068da6986abb34f
|
[
"MIT"
] | 12
|
2020-12-09T14:38:31.000Z
|
2022-02-23T11:13:37.000Z
|
appcenter/models.py
|
danielamitay/appcenter-rest-python
|
240f71ce290d54f79b4e0814e068da6986abb34f
|
[
"MIT"
] | 10
|
2020-06-30T16:13:07.000Z
|
2022-01-24T17:01:14.000Z
|
"""Data type models"""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import datetime
import enum
from typing import Any, Dict, List, Optional
import deserialize
def iso8601parse(date_string: Optional[str]) -> Optional[datetime.datetime]:
"""Parse an ISO8601 date string into a datetime.
:param date_string: The date string to parse
:returns: The parsed datetime
"""
if date_string is None:
return None
try:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ")
# pylint: disable=missing-docstring
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
@deserialize.parser("timestamp", iso8601parse)
@deserialize.parser("timestamp", iso8601parse)
@deserialize.parser("appLaunchTimestamp", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("store_type", "type")
@deserialize.key("identifier", "id")
@deserialize.parser("uploaded_at", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.parser("provisioning_profile_expiry_date", iso8601parse)
@deserialize.parser("uploaded_at", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.parser("expiration_date", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.parser("created_at", iso8601parse)
| 25.83452
| 99
| 0.693574
|
"""Data type models"""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import datetime
import enum
from typing import Any, Dict, List, Optional
import deserialize
def iso8601parse(date_string: Optional[str]) -> Optional[datetime.datetime]:
"""Parse an ISO8601 date string into a datetime.
:param date_string: The date string to parse
:returns: The parsed datetime
"""
if date_string is None:
return None
try:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ")
# pylint: disable=missing-docstring
class HandledErrorReasonFrame:
class ProgrammingLanguage(enum.Enum):
javascript = "JavaScript"
csharp = "CSharp"
objectivec = "Objective-C"
objectivecpp = "Objective-Cpp"
cpp = "Cpp"
c = "C"
swift = "Swift"
java = "Java"
unknown = "Unknown"
className: Optional[str] # name of the class
method: Optional[str] # name of the method
classMethod: Optional[bool] # is a class method
file: Optional[str] # name of the file
line: Optional[int] # line number
appCode: Optional[bool] # this line isn't from any framework
frameworkName: Optional[str] # Name of the framework
codeFormatted: Optional[str] # Formatted frame string
codeRaw: Optional[str] # Unformatted Frame string
methodParams: Optional[str] # parameters of the frames method
exceptionType: Optional[str] # Exception type.
osExceptionType: Optional[str] # OS exception type. (aka. SIGNAL)
language: Optional[ProgrammingLanguage] # programming language of the frame
class ErrorGroupState(enum.Enum):
open = "Open"
closed = "Closed"
ignored = "Ignored"
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
class ErrorGroupListItem:
state: ErrorGroupState
annotation: Optional[str]
errorGroupId: str
appVersion: str
appBuild: Optional[str]
count: int
deviceCount: int
firstOccurrence: datetime.datetime
lastOccurrence: datetime.datetime
exceptionType: Optional[str]
exceptionMessage: Optional[str]
exceptionClassName: Optional[str]
exceptionClassMethod: Optional[bool]
exceptionMethod: Optional[str]
exceptionAppCode: Optional[bool]
exceptionFile: Optional[str]
exceptionLine: Optional[str]
codeRaw: Optional[str]
reasonFrames: Optional[List[HandledErrorReasonFrame]]
class ErrorGroups:
nextLink: Optional[str]
errorGroups: Optional[List[ErrorGroupListItem]]
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
class ErrorGroup:
state: ErrorGroupState
annotation: Optional[str]
errorGroupId: str
appVersion: str
appBuild: Optional[str]
count: int
deviceCount: int
firstOccurrence: datetime.datetime
lastOccurrence: datetime.datetime
exceptionType: Optional[str]
exceptionMessage: Optional[str]
exceptionClassName: Optional[str]
exceptionClassMethod: Optional[bool]
exceptionMethod: Optional[str]
exceptionAppCode: Optional[bool]
exceptionFile: Optional[str]
exceptionLine: Optional[str]
codeRaw: Optional[str]
reasonFrames: Optional[List[HandledErrorReasonFrame]]
@deserialize.parser("timestamp", iso8601parse)
class HandledError:
errorId: Optional[str]
timestamp: Optional[datetime.datetime]
deviceName: Optional[str]
osVersion: Optional[str]
osType: Optional[str]
country: Optional[str]
language: Optional[str]
userId: Optional[str]
class HandledErrors:
nextLink: Optional[str]
errors: Optional[List[HandledError]]
@deserialize.parser("timestamp", iso8601parse)
@deserialize.parser("appLaunchTimestamp", iso8601parse)
class HandledErrorDetails:
errorId: Optional[str]
timestamp: Optional[datetime.datetime]
deviceName: Optional[str]
osVersion: Optional[str]
osType: Optional[str]
country: Optional[str]
language: Optional[str]
userId: Optional[str]
name: Optional[str]
reasonFrames: Optional[List[HandledErrorReasonFrame]]
appLaunchTimestamp: Optional[datetime.datetime]
carrierName: Optional[str]
jailbreak: Optional[bool]
properties: Optional[Dict[str, str]]
class ReleaseOrigin(enum.Enum):
hockey = "hockeyapp"
appcenter = "appcenter"
class BuildInfo:
branch_name: Optional[str]
commit_hash: Optional[str]
commit_message: Optional[str]
def __init__(
self,
branch_name: Optional[str] = None,
commit_hash: Optional[str] = None,
commit_message: Optional[str] = None,
) -> None:
self.branch_name = branch_name
self.commit_hash = commit_hash
self.commit_message = commit_message
def json(self) -> Dict[str, Any]:
result = {}
if self.branch_name is not None:
result["branch_name"] = self.branch_name
if self.commit_hash is not None:
result["commit_hash"] = self.commit_hash
if self.commit_message is not None:
result["commit_message"] = self.commit_message
return result
class StoreType(enum.Enum):
intune = "intune"
googleplay = "googleplay"
apple = "apple"
none = "none"
class DestinationType(enum.Enum):
group = "group"
store = "store"
tester = "tester"
@deserialize.key("identifier", "id")
@deserialize.key("store_type", "type")
class Destination:
identifier: str
name: Optional[str]
is_latest: Optional[bool]
store_type: Optional[StoreType]
publishing_status: Optional[str]
destination_type: Optional[DestinationType]
display_name: Optional[str]
@deserialize.key("identifier", "id")
@deserialize.parser("uploaded_at", iso8601parse)
class BasicReleaseDetailsResponse:
identifier: int
version: str
origin: Optional[ReleaseOrigin]
short_version: str
enabled: bool
uploaded_at: datetime.datetime
destinations: Optional[List[Destination]]
build: Optional[BuildInfo]
class ProvisioningProfileType(enum.Enum):
adhoc = "adhoc"
enterprise = "enterprise"
other = "other"
@deserialize.key("identifier", "id")
@deserialize.parser("provisioning_profile_expiry_date", iso8601parse)
@deserialize.parser("uploaded_at", iso8601parse)
class ReleaseDetailsResponse:
# ID identifying this unique release.
identifier: int
# The app's name (extracted from the uploaded release).
app_name: str
# The app's display name.
app_display_name: str
# The app's OS.
app_os: Optional[str]
# The release's version.
version: str
# The release's origin
origin: Optional[ReleaseOrigin]
# The release's short version.
short_version: str
# The release's release notes.
release_notes: Optional[str]
# The release's provisioning profile name.
provisioning_profile_name: Optional[str]
# The type of the provisioning profile for the requested app version.
provisioning_profile_type: Optional[ProvisioningProfileType]
# expiration date of provisioning profile in UTC format.
provisioning_profile_expiry_date: Optional[datetime.datetime]
# A flag that determines whether the release's provisioning profile is still extracted or not.
is_provisioning_profile_syncing: Optional[bool]
# The release's size in bytes.
size: Optional[int]
# The release's minimum required operating system.
min_os: Optional[str]
# The release's device family.
device_family: Optional[str]
# The release's minimum required Android API level.
android_min_api_level: Optional[str]
# The identifier of the apps bundle.
bundle_identifier: Optional[str]
# Hashes for the packages
package_hashes: Optional[List[str]]
# MD5 checksum of the release binary.
fingerprint: Optional[str]
# The uploaded time.
uploaded_at: datetime.datetime
# The URL that hosts the binary for this release.
download_url: Optional[str]
# A URL to the app's icon.
app_icon_url: str
# The href required to install a release on a mobile device. On iOS devices will be prefixed
# with itms-services://?action=download-manifest&url=
install_url: Optional[str]
destinations: Optional[List[Destination]]
# In calls that allow passing udid in the query string, this value will hold the provisioning
# status of that UDID in this release. Will be ignored for non-iOS platforms.
is_udid_provisioned: Optional[bool]
# In calls that allow passing udid in the query string, this value determines if a release can
# be re-signed. When true, after a re-sign, the tester will be able to install the release from
# his registered devices. Will not be returned for non-iOS platforms.
can_resign: Optional[bool]
build: Optional[BuildInfo]
# This value determines the whether a release currently is enabled or disabled.
enabled: bool
# Status of the release.
status: Optional[str]
class ReleaseWithDistributionGroup:
release: str # The release ID
distribution_group: str # The distribution group ID
def __init__(self, release: str, distribution_group: str) -> None:
self.release = release
self.distribution_group = distribution_group
class ReleaseCount:
release_id: str
distribution_group: Optional[str]
unique_count: int
total_count: int
class ReleaseCounts:
total: Optional[int]
counts: List[ReleaseCount]
@deserialize.key("identifier", "id")
class SetUploadMetadataResponse:
identifier: str
error: bool
chunk_size: int
resume_restart: bool
chunk_list: List[int]
blob_partitions: int
status_code: str
class ChunkUploadResponse:
error: bool
chunk_num: int
error_code: str
@deserialize.key("identifier", "id")
class CreateReleaseUploadResponse:
identifier: str
upload_domain: str
token: str
url_encoded_token: str
package_asset_id: str
@deserialize.key("identifier", "id")
class CommitUploadResponse:
identifier: str
upload_status: str
release_distinct_id: Optional[int]
@deserialize.key("identifier", "id")
class UploadCompleteResponse:
absolute_uri: str
chunk_num: int
error: bool
error_code: Optional[str]
location: str
message: str
raw_location: str
state: str
@deserialize.key("identifier", "id")
class ReleaseDestinationResponse:
identifier: str
mandatory_update: bool
provisioning_status_url: Optional[str]
@deserialize.key("identifier", "id")
class DestinationId:
name: Optional[str]
identifier: Optional[str]
def __init__(self, *, name: Optional[str] = None, identifier: Optional[str] = None) -> None:
self.name = name
self.identifier = identifier
def json(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
if self.name is not None:
result["name"] = self.name
if self.identifier is not None:
result["id"] = self.identifier
return result
class ReleaseUpdateRequest:
release_notes: Optional[str]
mandatory_update: Optional[bool]
destinations: Optional[List[DestinationId]]
build: Optional[BuildInfo]
notify_testers: Optional[bool]
def __init__(
self,
*,
release_notes: Optional[str] = None,
mandatory_update: Optional[bool] = None,
destinations: Optional[List[DestinationId]] = None,
build: Optional[BuildInfo] = None,
notify_testers: Optional[bool] = None,
) -> None:
self.release_notes = release_notes
self.mandatory_update = mandatory_update
self.destinations = destinations
self.build = build
self.notify_testers = notify_testers
def json(self) -> Dict[str, Any]:
output: Dict[str, Any] = {}
if self.release_notes is not None:
output["release_notes"] = self.release_notes
if self.mandatory_update is not None:
output["mandatory_update"] = self.mandatory_update
if self.destinations is not None:
output["destinations"] = [destination.json() for destination in self.destinations]
if self.build is not None:
output["build"] = self.build.json()
if self.notify_testers is not None:
output["notify_testers"] = self.notify_testers
return output
class SymbolType(enum.Enum):
apple = "Apple"
javascript = "JavaScript"
breakpad = "Breakpad"
proguard = "AndroidProguard"
uwp = "UWP"
@deserialize.parser("expiration_date", iso8601parse)
class SymbolUploadBeginResponse:
symbol_upload_id: str
upload_url: str
expiration_date: datetime.datetime
class SymbolUploadStatus(enum.Enum):
committed = "committed"
aborted = "aborted"
class SymbolUploadEndRequest:
status: SymbolUploadStatus
class Origin(enum.Enum):
appcenter = "appcenter"
hockeyapp = "hockeyapp"
codepush = "codepush"
class Permission(enum.Enum):
manager = "manager"
developer = "developer"
viewer = "viewer"
tester = "tester"
class Role(enum.Enum):
admin = "admin"
collaborator = "collaborator"
member = "member"
@deserialize.key("identifier", "id")
class User:
# The unique ID of the user
identifier: str
# The avatar URL of the user
avatar_url: Optional[str]
# User is required to send an old password in order to change the password
can_change_password: Optional[bool]
# The full name of the user. Might for example be first and last name
display_name: str
# The email address of the user
email: str
# The unique name that is used to identify the user
name: str
# The permissions the user has for the app
permissions: List[Permission]
# The creation origin of this user
origin: Origin
@deserialize.key("identifier", "id")
@deserialize.parser("created_at", iso8601parse)
class UserToken:
# The unique ID of the token
identifier: str
# The user supplied description for the token
description: str
# The scope the token has
scope: List[str]
# The creation date
created_at: datetime.datetime
# The value of the token - Only set when creating a new tokern
api_token: Optional[str]
| 2,205
| 9,684
| 812
|
e3f8e1d22bcafa358edf7f3c0244842cba8cf451
| 4,272
|
py
|
Python
|
indexer/index_mapper.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | 2
|
2021-06-23T08:52:20.000Z
|
2021-06-23T08:52:31.000Z
|
indexer/index_mapper.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | null | null | null |
indexer/index_mapper.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | null | null | null |
import os
import pandas as pd
import collections
import re
import pickle
from basic_util.files import *
import argparse
if __name__ =='__main__':
parser = get_parser()
args = parser.parse_args()
imap = IMap(args.dir_path, args.base_name)
imap.learn_dic(args.count_names, args.check_names)
imap.convert_and_save(args.convert_names)
| 32.610687
| 96
| 0.59574
|
import os
import pandas as pd
import collections
import re
import pickle
from basic_util.files import *
import argparse
def count(fl, targets=['input_context'], checks=['input_keyword'], vocab_size=10000):
cnter = collections.Counter()
s = set()
for filename in fl:
cur_df = pd.read_pickle(filename)
for target in targets:
texts = cur_df[target].tolist()
for i in texts:
cnter.update(i[1:])
s.add(i[0])
#check
for filename in fl:
cur_df = pd.read_pickle(filename)
for check in checks:
texts = cur_df[check].tolist()
for i in texts:
s.update(i)
for i in s:
if i not in cnter:
cnter[i] = 1
for i in range(vocab_size):
if i not in cnter:
cnter[i] = 1
tot = 0
cum_prob = [0]
for i in cnter.most_common():
tot += i[1]
for i in cnter.most_common():
cum_prob.append(cum_prob[-1] + i[1] / tot)
cum_prob.pop(0)
new_dict = dict([(int(old[0]), int(new)) for (new, old) in enumerate(cnter.most_common())])
return cum_prob, new_dict
def convert_idx(filename, dic, targets:list):
key_type = type(list(dic)[0])
cur_df = pd.read_pickle(filename)
for target in targets:
new = []
for line in cur_df[target].tolist():
converted = []
for token in line:
if key_type(token) in dic:
converted.append(dic[key_type(token)])
else:
converted.append(len(dic)-1)
new.append(converted)
cur_df[target] = new
return cur_df
class IMap:
def __init__(self, dir_path, prefix):
self.probs_path, self.dic_path, self.file_path = self.get_path(dir_path, prefix)
self.vocab_size = self.get_vocab_size(prefix)
self.dic = self.load_dic(self.dic_path)
@staticmethod
def load_dic(path):
if os.path.exists(path):
return load_pkl(path)
else:
return None
@staticmethod
def get_vocab_size(prefix):
target = prefix.split('_')[-1]
try:
return int(target)
except ValueError:
print('invalid prefix format vocab size set to 20000')
return 20000
@staticmethod
def get_path(dir_path, prefix):
probs_path = os.path.join(dir_path, '{}_probs.json'.format(prefix))
dic_path = os.path.join(dir_path, '{}_dic.json'.format(prefix))
file_path = os.path.join(dir_path,'{}_indexed'.format(prefix))
return probs_path, dic_path, file_path
def learn_dic(self, count_name, check_names):
if not self.dic:
print('start imap')
probs, dic = count(get_files(self.file_path),count_name,check_names,self.vocab_size)
self.dic = dic
pickle.dump(probs, open(self.probs_path, 'wb'))
pickle.dump(dic, open(self.dic_path, 'wb'))
else:
print('imap exists')
def convert_and_save(self, targets:list):
fl = get_files(self.file_path)
print(fl)
if not self.dic:
raise ValueError('dictionary is empty')
for filename in fl:
cur_df = convert_idx(filename,self.dic, targets)
new_filename = re.sub(r'indexed/','indexed_new/',filename)
if not os.path.exists(os.path.dirname(new_filename)):
os.makedirs(os.path.dirname(new_filename))
cur_df.to_pickle(new_filename)
def get_parser():
parser=argparse.ArgumentParser()
parser.add_argument("--base-name", type=str,
help='parent directory path')
parser.add_argument("--dir-path", type=str,
help='directory where input data is stored')
parser.add_argument("--count-names", type=str, nargs='*')
parser.add_argument("--check-names", type=str, nargs='*')
parser.add_argument("--convert-names", type=str, nargs='*')
return parser
if __name__ =='__main__':
parser = get_parser()
args = parser.parse_args()
imap = IMap(args.dir_path, args.base_name)
imap.learn_dic(args.count_names, args.check_names)
imap.convert_and_save(args.convert_names)
| 3,619
| 205
| 92
|
bf31b720b525652222135f120c0a379b010899a7
| 294
|
py
|
Python
|
profiles/migrations/New folder/0041_delete_user.py
|
Rxavio/link
|
a0aa34b89a769cfaa20a92a04980e142e42eaa10
|
[
"MIT"
] | null | null | null |
profiles/migrations/New folder/0041_delete_user.py
|
Rxavio/link
|
a0aa34b89a769cfaa20a92a04980e142e42eaa10
|
[
"MIT"
] | null | null | null |
profiles/migrations/New folder/0041_delete_user.py
|
Rxavio/link
|
a0aa34b89a769cfaa20a92a04980e142e42eaa10
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-11-03 07:43
from django.db import migrations
| 17.294118
| 48
| 0.602041
|
# Generated by Django 3.0.3 on 2020-11-03 07:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0040_auto_20201103_0801'),
]
operations = [
migrations.DeleteModel(
name='User',
),
]
| 0
| 188
| 23
|
c8677981f8f86e197f62a450212ec26939d05188
| 8,453
|
py
|
Python
|
solaredge_influx/__main__.py
|
c0deaddict/solaredge-influx
|
f258b77f66ad53f67f25b62f4db7c33a935b6a00
|
[
"MIT"
] | null | null | null |
solaredge_influx/__main__.py
|
c0deaddict/solaredge-influx
|
f258b77f66ad53f67f25b62f4db7c33a935b6a00
|
[
"MIT"
] | null | null | null |
solaredge_influx/__main__.py
|
c0deaddict/solaredge-influx
|
f258b77f66ad53f67f25b62f4db7c33a935b6a00
|
[
"MIT"
] | null | null | null |
import argparse
import requests
from os import getenv
import sys
from influxdb import InfluxDBClient
from datetime import datetime, timedelta
solaredge_api_url = "https://monitoringapi.solaredge.com"
required_version = dict(release="1.0.0")
if __name__ == "__main__":
main()
| 32.891051
| 87
| 0.651603
|
import argparse
import requests
from os import getenv
import sys
from influxdb import InfluxDBClient
from datetime import datetime, timedelta
solaredge_api_url = "https://monitoringapi.solaredge.com"
required_version = dict(release="1.0.0")
def api_request(api_key, path, params=dict()):
params = dict(**params, api_key=api_key)
response = requests.get(solaredge_api_url + path, params, timeout=60)
if response.status_code != 200:
raise Exception(f"Solaredge API error {response.status_code}")
return response.json()
def version_check(args, influx):
current = api_request(args.api_key, "/version/current")["version"]
supported = api_request(args.api_key, "/version/supported")["supported"]
print(f"Solaredge API version: {current}")
if required_version not in supported:
print(f"API version {required_version} is NOT supported anymore")
sys.exit(1)
else:
print("API version is supported")
def show_inventory(args, influx):
response = api_request(args.api_key, f"/site/{args.site_id}/inventory")
for inv in response["Inventory"]["inverters"]:
print(inv["name"])
print(f"Model: {inv['manufacturer']} {inv['model']}")
print(f"Serial number: {inv['SN']}")
print(f"Firmware version (CPU): {inv['cpuVersion']}")
print(f"Connected optimizers: {inv['connectedOptimizers']}")
def add_time_period_args(parser):
parser.add_argument(
"--start",
type=lambda s: datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
help="Start time in format YYYY-MM-DD hh:mm:ss",
default=None,
)
parser.add_argument(
"--end",
type=lambda s: datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
help="End time in format YYYY-MM-DD hh:mm:ss",
default=None,
)
parser.add_argument(
"--minutes",
type=int,
help="Time period in minutes. Can be used with --start or --end",
)
def time_period_params(args, max_period):
if args.start and args.end:
if args.minutes:
print("Start, end and minutes are given, pick two.")
sys.exit(1)
else:
if not args.minutes:
print("Missing the minutes period.")
sys.exit(1)
if args.start:
args.end = args.start + timedelta(minutes=args.minutes)
elif args.end:
args.start = args.end - timedelta(minutes=args.minutes)
else:
args.end = datetime.now().replace(microsecond=0)
args.start = args.end - timedelta(minutes=args.minutes)
if args.end - args.start > max_period:
print(f"Time period exceeds maximum {max_period}")
sys.exit(1)
return dict(
startTime=args.start.strftime("%Y-%m-%d %H:%M:%S"),
endTime=args.end.strftime("%Y-%m-%d %H:%M:%S"),
)
def convert_inverter_metric(metric, tags):
time = datetime.strptime(metric["date"], "%Y-%m-%d %H:%M:%S")
fields = dict(
totalActivePower=metric["totalActivePower"],
dcVoltage=metric["dcVoltage"],
powerLimit=metric["powerLimit"],
totalEnergy=metric["totalEnergy"],
temperature=metric["temperature"],
operationMode=metric["operationMode"],
acCurrent=metric["L1Data"]["acCurrent"],
acVoltage=metric["L1Data"]["acVoltage"],
acFrequency=metric["L1Data"]["acFrequency"],
apparentPower=metric["L1Data"]["apparentPower"],
activePower=metric["L1Data"]["activePower"],
reactivePower=metric["L1Data"]["reactivePower"],
cosPhi=metric["L1Data"]["cosPhi"],
)
# Not present when inverterMode="SLEEPING"
if "groundFaultResistance" in metric:
fields["groundFaultResistance"] = metric["groundFaultResistance"]
return dict(
measurement="solaredge_inverter",
tags=tags,
time=time.astimezone().isoformat(),
fields=fields,
)
def import_inverter_data(args, influx):
params = time_period_params(args, timedelta(days=7))
url = f"/equipment/{args.site_id}/{args.serial}/data"
response = api_request(args.api_key, url, params)
tags = dict(site_id=args.site_id, serial=args.serial)
influx.write_points(
convert_inverter_metric(metric, tags)
for metric in response["data"]["telemetries"]
)
def convert_power_metric(metric, tags):
time = datetime.strptime(metric["date"], "%Y-%m-%d %H:%M:%S")
power = metric.get("value")
power = float(power if power is not None else 0)
return dict(
measurement="solaredge_power",
tags=tags,
time=time.astimezone().isoformat(),
fields=dict(power=power),
)
def import_power_data(args, influx):
params = time_period_params(args, timedelta(days=30))
response = api_request(args.api_key, f"/site/{args.site_id}/power", params)
tags = dict(site_id=args.site_id)
influx.write_points(
convert_power_metric(metric, tags) for metric in response["power"]["values"]
)
def convert_energy_metric(metric, tags):
time = datetime.strptime(metric["date"], "%Y-%m-%d %H:%M:%S")
energy = metric.get("value")
energy = float(energy if energy is not None else 0)
return dict(
measurement="solaredge_energy",
tags=tags,
time=time.astimezone().isoformat(),
fields=dict(energy=energy),
)
def import_energy_data(args, influx):
params = time_period_params(args, timedelta(days=30))
params = dict(**params, meters="Production", timeUnit="QUARTER_OF_AN_HOUR")
response = api_request(args.api_key, f"/site/{args.site_id}/energyDetails", params)
meter = response["energyDetails"]["meters"][0]
tags = dict(site_id=args.site_id)
influx.write_points(
convert_energy_metric(metric, tags) for metric in meter["values"]
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--api-key",
help="Solaredge API key (env SOLAREDGE_API_KEY)",
default=getenv("SOLAREDGE_API_KEY"),
)
parser.add_argument(
"--site-id",
help="Site ID (env SOLAREDGE_SITE_ID)",
default=getenv("SOLAREDGE_SITE_ID"),
)
parser.add_argument(
"--influx-host",
help="InfluxDB host, defaults to 'localhost' (env INFLUXDB_HOST)",
default=getenv("INFLUX_HOST", "localhost"),
)
parser.add_argument(
"--influx-port",
help="InfluxDB port, defaults to 8086 (env INFLUX_PORT)",
type=int,
default=int(getenv("INFLUX_PORT", "8086")),
)
parser.add_argument(
"--influx-db",
help="InfluxDB database (env INFLUX_DB)",
default=getenv("INFLUX_DB"),
)
subparsers = parser.add_subparsers()
parser_version = subparsers.add_parser("version", help="version check")
parser_version.set_defaults(func=version_check)
parser_inventory = subparsers.add_parser("inventory", help="show inventory")
parser_inventory.set_defaults(func=show_inventory)
parser_import_inventory = subparsers.add_parser(
"inverter", help="import inverter data"
)
parser_import_inventory.add_argument(
"--serial", help="Inverter Serial number", required=True
)
add_time_period_args(parser_import_inventory)
parser_import_inventory.set_defaults(func=import_inverter_data)
parser_import_power = subparsers.add_parser("power", help="import power data")
add_time_period_args(parser_import_power)
parser_import_power.set_defaults(func=import_power_data)
parser_import_energy = subparsers.add_parser("energy", help="import energy data")
add_time_period_args(parser_import_energy)
parser_import_energy.set_defaults(func=import_energy_data)
args = parser.parse_args()
if args.api_key is None:
print(
"No api-key given. Either specify it via the --api-key "
"argument of the SOLAREDGE_API_KEY environment variable"
)
if args.site_id is None:
print(
"No site-id given. Either specify it via the --site-id "
"argument of the SOLAREDGE_SITE_ID environment variable"
)
sys.exit(1)
if "func" not in args:
parser.print_help()
sys.exit(1)
influx = InfluxDBClient(host=args.influx_host, port=args.influx_port)
influx.switch_database(args.influx_db)
influx.ping()
args.func(args, influx)
if __name__ == "__main__":
main()
| 7,881
| 0
| 276
|
eda9d93837af8aac0adf4e04bd2e1c6bad1a9773
| 811
|
py
|
Python
|
QualificationRound/python/r.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
QualificationRound/python/r.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
QualificationRound/python/r.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
import urllib3
print(main())
| 27.965517
| 144
| 0.52651
|
import urllib3
def main():
input1='https://mettl-arq.s3-ap-southeast-1.amazonaws.com/questions/iit-kanpur/cyber-security-hackathon/round1/problem1/defaulttestcase.txt'
try:
r = urllib3.PoolManager().request('GET', input1)
except:
return ({0:0}, 'the URL is incorrect')
# alternative 1:
if r.headers['Content-Type'][:4] != 'text':
return ({0:0}, 'file is not a text file')
# alternative 2:
if input1[-4:] != '.txt':
return ({0:0}, 'file is not a text file')
l = r.data.decode('utf-8').split('\n')
d = {}
for i in range(len(l)):
s = ''.join(l[i].split(' '))
s = s.lower()
if s == s[-1::-1] && len(s) != 0:
d[i+1] = len(s)
if len(d) == 0:
d = {0:0}
return (d, 'file ok')
print(main())
| 759
| 0
| 22
|
bb248d0321e754a4546fd7e0e3d830f91298fc50
| 1,301
|
py
|
Python
|
src/lib/dynamo/migration.py
|
arnulfojr/sanic-persistance-patterns
|
c3c433014401725ab60f1dde3c35848f9ce3ef88
|
[
"MIT"
] | null | null | null |
src/lib/dynamo/migration.py
|
arnulfojr/sanic-persistance-patterns
|
c3c433014401725ab60f1dde3c35848f9ce3ef88
|
[
"MIT"
] | null | null | null |
src/lib/dynamo/migration.py
|
arnulfojr/sanic-persistance-patterns
|
c3c433014401725ab60f1dde3c35848f9ce3ef88
|
[
"MIT"
] | null | null | null |
from lib.dynamo.client import DynamoClientManager
async def table_exists(name: str) -> bool:
"""Check if table exists."""
async with DynamoClientManager() as dynamodb:
try:
await dynamodb.describe_table(TableName=name)
except dynamodb.exceptions.ResourceNotFoundException:
state = False
else:
state = True
# allow the Context Manager to exit
return state
async def ensure_table(schema: dict):
"""Ensure the table exists."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.create_table(**schema)
waiter = dynamodb.get_waiter('table_exists')
await waiter.wait(TableName=table_name)
async def delete_table(schema: dict):
"""Deletes the table."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if not exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.delete_table(TableName=table_name)
waiter = dynamodb.get_waiter('table_not_exists')
await waiter.wait(TableName=table_name)
| 27.680851
| 61
| 0.668716
|
from lib.dynamo.client import DynamoClientManager
async def table_exists(name: str) -> bool:
"""Check if table exists."""
async with DynamoClientManager() as dynamodb:
try:
await dynamodb.describe_table(TableName=name)
except dynamodb.exceptions.ResourceNotFoundException:
state = False
else:
state = True
# allow the Context Manager to exit
return state
async def ensure_table(schema: dict):
"""Ensure the table exists."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.create_table(**schema)
waiter = dynamodb.get_waiter('table_exists')
await waiter.wait(TableName=table_name)
async def delete_table(schema: dict):
"""Deletes the table."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if not exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.delete_table(TableName=table_name)
waiter = dynamodb.get_waiter('table_not_exists')
await waiter.wait(TableName=table_name)
| 0
| 0
| 0
|
47dc8cafe5b06270c0674e3e4e16755e648fcdc1
| 1,140
|
py
|
Python
|
lib/models/backbone/supernet.py
|
Kevoen/LightTrack
|
0f4e0ffe7f4faaf39d637dcaa3a42e032b96f76c
|
[
"MIT"
] | 221
|
2021-04-16T11:23:55.000Z
|
2022-03-30T03:52:44.000Z
|
lib/models/backbone/supernet.py
|
swan2015/LightTrack
|
e94368aa80e924f8720887aa8f4fc23db074d3e7
|
[
"MIT"
] | 25
|
2021-04-16T11:37:20.000Z
|
2021-12-25T17:29:21.000Z
|
lib/models/backbone/supernet.py
|
swan2015/LightTrack
|
e94368aa80e924f8720887aa8f4fc23db074d3e7
|
[
"MIT"
] | 45
|
2021-04-30T07:10:15.000Z
|
2022-03-16T08:15:40.000Z
|
import numpy as np
import torch
from lib.models.backbone.models.hypernet import _gen_supernet
def build_supernet_DP(flops_maximum=600):
"""Backbone with Dynamic output position"""
set_seed()
model, sta_num, size_factor = _gen_supernet(
flops_minimum=0,
flops_maximum=flops_maximum,
DP=True,
num_classes=1000,
drop_rate=0.0,
global_pool='avg',
resunit=False,
dil_conv=False,
slice=4)
return model, sta_num
if __name__ == '__main__':
_, sta_num = build_supernet(flops_maximum=600)
print(sta_num)
| 22.8
| 61
| 0.657895
|
import numpy as np
import torch
from lib.models.backbone.models.hypernet import _gen_supernet
def set_seed():
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def build_supernet(flops_maximum=600):
set_seed()
model, sta_num, size_factor = _gen_supernet(
flops_minimum=0,
flops_maximum=flops_maximum,
num_classes=1000,
drop_rate=0.0,
global_pool='avg',
resunit=False,
dil_conv=False,
slice=4)
return model, sta_num
def build_supernet_DP(flops_maximum=600):
"""Backbone with Dynamic output position"""
set_seed()
model, sta_num, size_factor = _gen_supernet(
flops_minimum=0,
flops_maximum=flops_maximum,
DP=True,
num_classes=1000,
drop_rate=0.0,
global_pool='avg',
resunit=False,
dil_conv=False,
slice=4)
return model, sta_num
if __name__ == '__main__':
_, sta_num = build_supernet(flops_maximum=600)
print(sta_num)
| 497
| 0
| 46
|
130f82519e4ae3275d304a709e3078a731173f91
| 1,377
|
py
|
Python
|
ktane/solverutils/morse.py
|
thecnoNSMB/ktane-mission-solver
|
237d969a8acd21533b5fb18cc5a47dd03ba1cc7d
|
[
"MIT"
] | 1
|
2022-01-04T03:47:34.000Z
|
2022-01-04T03:47:34.000Z
|
ktane/solverutils/morse.py
|
thecnoNSMB/ktane-mission-solver
|
237d969a8acd21533b5fb18cc5a47dd03ba1cc7d
|
[
"MIT"
] | 4
|
2021-04-13T07:03:37.000Z
|
2021-12-15T11:34:05.000Z
|
ktane/solverutils/morse.py
|
thecnoNSMB/ktane-mission-solver
|
237d969a8acd21533b5fb18cc5a47dd03ba1cc7d
|
[
"MIT"
] | null | null | null |
"Utilities for asking for and processing Morse Code signals."
from typing import Final
from ktane import ask
__all__ = ["valid_morse", "decode", "ask_word"]
MORSE_ALPHABET: Final = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----."
}
INVERSE_MORSE_ALPHABET: Final = {v: k for k, v in MORSE_ALPHABET.items()}
def valid_morse(text: str) -> bool:
"Determine whether a string is valid Morse code."
chars = text.split()
return all(c in INVERSE_MORSE_ALPHABET for c in chars)
def decode(code: str) -> str:
"Convert a Morse code string into regular text."
chars = code.split()
return "".join(INVERSE_MORSE_ALPHABET[char] for char in chars)
def ask_word() -> str:
"Get a Morse code string from the user and convert it to a word."
code = ask.str_from_func(valid_morse)
return decode(code)
| 20.552239
| 73
| 0.424837
|
"Utilities for asking for and processing Morse Code signals."
from typing import Final
from ktane import ask
__all__ = ["valid_morse", "decode", "ask_word"]
MORSE_ALPHABET: Final = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----."
}
INVERSE_MORSE_ALPHABET: Final = {v: k for k, v in MORSE_ALPHABET.items()}
def valid_morse(text: str) -> bool:
"Determine whether a string is valid Morse code."
chars = text.split()
return all(c in INVERSE_MORSE_ALPHABET for c in chars)
def decode(code: str) -> str:
"Convert a Morse code string into regular text."
chars = code.split()
return "".join(INVERSE_MORSE_ALPHABET[char] for char in chars)
def ask_word() -> str:
"Get a Morse code string from the user and convert it to a word."
code = ask.str_from_func(valid_morse)
return decode(code)
| 0
| 0
| 0
|
a2dd565565e8f9a9e23f9aca3e491ce17adf4c0a
| 657
|
py
|
Python
|
osc_tui/flexibleGPU.py
|
outscale-mdr/osc-tui
|
7d562c3f3ed88add27586bb0c3c14bee7053c37e
|
[
"BSD-3-Clause"
] | 5
|
2020-06-05T11:46:01.000Z
|
2022-02-01T18:05:11.000Z
|
osc_tui/flexibleGPU.py
|
outscale-mdr/osc-tui
|
7d562c3f3ed88add27586bb0c3c14bee7053c37e
|
[
"BSD-3-Clause"
] | 23
|
2020-04-29T19:26:49.000Z
|
2022-03-07T09:21:34.000Z
|
osc_tui/flexibleGPU.py
|
outscale-mdr/osc-tui
|
7d562c3f3ed88add27586bb0c3c14bee7053c37e
|
[
"BSD-3-Clause"
] | 10
|
2020-02-21T14:00:28.000Z
|
2021-09-23T12:06:39.000Z
|
import npyscreen
import pyperclip
import createVm
import main
import popup
import selectableGrid
import virtualMachine
| 29.863636
| 78
| 0.643836
|
import npyscreen
import pyperclip
import createVm
import main
import popup
import selectableGrid
import virtualMachine
class Grid(selectableGrid.SelectableGrid):
def __init__(self, screen, *args, **keywords):
super().__init__(screen, *args, **keywords)
self.col_titles = ["Id", "Generation", "Model Name", "State"]
def refresh(self, name_filter=None):
groups = main.GATEWAY.ReadFlexibleGpus(form=self.form)['FlexibleGpus']
values = list()
for g in groups:
values.append([g['FlexibleGpuId'], g['Generation'],
g['ModelName'], g["State"]])
self.values = values
| 440
| 21
| 76
|
62095d061138f576a9cac1b1bf00dee00d53a7c1
| 8,026
|
py
|
Python
|
tests/acceptance/rfc1350_test.py
|
pedrudehuere/py3tftp
|
b43d993f73a9f9617f22f886a9d2d6b91884ed1c
|
[
"MIT"
] | 41
|
2016-02-28T08:01:30.000Z
|
2022-03-12T14:53:34.000Z
|
tests/acceptance/rfc1350_test.py
|
pedrudehuere/otftp
|
b43d993f73a9f9617f22f886a9d2d6b91884ed1c
|
[
"MIT"
] | 15
|
2017-03-04T04:04:42.000Z
|
2021-05-19T03:33:46.000Z
|
tests/acceptance/rfc1350_test.py
|
pedrudehuere/otftp
|
b43d993f73a9f9617f22f886a9d2d6b91884ed1c
|
[
"MIT"
] | 22
|
2017-03-29T07:50:09.000Z
|
2021-12-24T22:02:27.000Z
|
import hashlib
import socket
import unittest
from io import BytesIO
from os import remove as rm
from os.path import exists
from time import sleep
import tests.test_helpers as h
if __name__ == '__main__':
unittest.main()
| 31.849206
| 79
| 0.567157
|
import hashlib
import socket
import unittest
from io import BytesIO
from os import remove as rm
from os.path import exists
from time import sleep
import tests.test_helpers as h
class TestRRQ(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open('LICENSE', 'rb') as f:
cls.license = f.read()
cls.license_md5 = hashlib.md5(cls.license).hexdigest()
cls.server_addr = ('127.0.0.1', 9069,)
cls.rrq = h.RRQ + b'LICENSE\x00binary\x00'
def setUp(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.counter = 1
self.output = []
self.data = None
self.s.sendto(self.rrq, self.server_addr)
def tearDown(self):
self.s.close()
def test_perfect_scenario(self):
while True:
self.data, server = self.s.recvfrom(1024)
self.output += self.data[4:]
msg = h.ACK + self.counter.to_bytes(2, byteorder='big')
self.s.sendto(msg, server)
self.counter += 1
if len(self.data[4:]) < 512:
break
received = bytes(self.output)
received_md5 = hashlib.md5(received).hexdigest()
self.assertEqual(len(self.license), len(received))
self.assertTrue(self.license_md5 == received_md5)
def test_no_acks(self):
no_ack = True
while True:
self.data, server = self.s.recvfrom(1024)
if self.counter % 5 == 0 and no_ack:
# dont ack, discard data
no_ack = False
else:
no_ack = True
self.output += self.data[4:]
msg = h.ACK + self.counter.to_bytes(2, byteorder='big')
self.s.sendto(msg, server)
self.counter += 1
if len(self.data[4:]) < 512:
break
received = bytes(self.output)
received_md5 = hashlib.md5(received).hexdigest()
self.assertEqual(len(self.license), len(received))
self.assertTrue(self.license_md5 == received_md5)
def test_total_timeout(self):
max_msgs = 2
while True:
self.data, server = self.s.recvfrom(1024)
if self.counter >= max_msgs:
break
self.output += self.data[4:]
msg = h.ACK + self.counter.to_bytes(2, byteorder='big')
self.s.sendto(msg, server)
self.counter += 1
if len(self.data[4:]) < 512:
break
received = bytes(self.output)
self.assertEqual((max_msgs - 1) * 512, len(received))
class TestWRQ(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.license_buf = BytesIO()
with open('LICENSE', 'rb') as f:
license = f.read()
cls.license_buf.write(license)
cls.license_buf.seek(0)
cls.license_md5 = hashlib.md5(license).hexdigest()
cls.server_addr = ('127.0.0.1', 9069,)
cls.wrq = h.WRQ + b'LICENSE_TEST\x00binary\x00'
def setUp(self):
if exists('LICENSE_TEST'):
rm('LICENSE_TEST')
self.license = iter(lambda: self.license_buf.read(512), b'')
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.sendto(self.wrq, self.server_addr)
def tearDown(self):
self.license_buf.seek(0)
self.s.close()
def test_perfect_transfer(self):
for i, chunk in enumerate(self.license):
ack, server = self.s.recvfrom(1024)
self.assertEqual(ack, h.ACK + i.to_bytes(2, byteorder='big'))
self.s.sendto(h.DAT + (i + 1).to_bytes(2, byteorder='big') + chunk,
server)
sleep(1)
with open('LICENSE_TEST', 'rb') as f:
license_test = f.read()
license_test_md5 = hashlib.md5(license_test).hexdigest()
self.assertEqual(len(license_test), self.license_buf.tell())
self.assertEqual(self.license_md5, license_test_md5)
def test_lost_data_packet(self):
last_pkt = None
pkt = None
counter = 0
outbound_data = self.license
while True:
ack, server = self.s.recvfrom(1024)
if counter > 0 and counter % 10 == 0 and pkt != last_pkt:
pkt = last_pkt
else:
try:
pkt = next(outbound_data)
except StopIteration:
break
counter += 1
self.s.sendto(h.DAT +
(counter).to_bytes(2,
byteorder='big') + pkt,
server)
last_pkt = pkt
sleep(1)
with open('LICENSE_TEST', 'rb') as f:
license_test = f.read()
license_test_md5 = hashlib.md5(license_test).hexdigest()
self.assertEqual(len(license_test), self.license_buf.tell())
self.assertEqual(self.license_md5, license_test_md5)
def test_drop_client_connection(self):
PKTS_BEFORE_DISCONNECT = 1
for i, chunk in enumerate(self.license):
ack, server = self.s.recvfrom(1024)
if i >= PKTS_BEFORE_DISCONNECT:
break
self.s.sendto(h.DAT + (i + 1).to_bytes(2, byteorder='big') + chunk,
server)
# wait for timeout to close file
sleep(5.1)
with open('LICENSE_TEST', 'rb') as f:
license_test = f.read()
self.assertEqual(len(license_test), self.license_buf.tell() - 512)
class TestTFTPErrors(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server_addr = ('127.0.0.1', 9069,)
def setUp(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.s.close()
def test_file_not_found(self):
no_such_file = h.RRQ + b'NOSUCHFILE\x00binary\x00'
self.s.sendto(no_such_file, self.server_addr)
data, server = self.s.recvfrom(512)
self.assertEqual(h.ERR + h.NOFOUND, data[:4])
def test_file_already_exists(self):
dup_file = h.WRQ + b'LICENSE\x00octet\x00'
self.s.sendto(dup_file, self.server_addr)
data, server = self.s.recvfrom(512)
self.assertEqual(h.ERR + h.EEXISTS, data[:4])
def test_unknown_transfer_id_rrq(self):
legit_transfer = h.RRQ + b'LICENSE\x00octet\x00'
self.s.sendto(legit_transfer, self.server_addr)
data, server = self.s.recvfrom(1024)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.sendto(h.ACK + (1).to_bytes(2, byteorder='big'), server)
err, server = s.recvfrom(32)
finally:
s.close()
self.assertEqual(h.ERR + h.UNKNTID, err[:4])
def test_unknown_transfer_id_wrq(self):
if exists('LICENSE_TEST'):
rm('LICENSE_TEST')
legit_transfer = h.WRQ + b'LICENSE_TEST\x00octet\x00'
self.s.sendto(legit_transfer, self.server_addr)
ack, server = self.s.recvfrom(16)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.sendto(h.DAT +
(1).to_bytes(2, byteorder='big') + b'\x41\x41\x41',
server)
err, server = s.recvfrom(32)
finally:
s.close()
self.assertEqual(h.ERR + h.UNKNTID, err[:4])
@unittest.skip('Gotta think of a way to test this')
def test_access_violation(self):
no_perms = h.RRQ + b'NOPERMS\x00binary\x00'
self.s.sendto(no_perms, self.server_addr)
data, server = self.s.recvfrom(512)
self.assertEqual(h.ERR + h.ACCVIOL, data[:4])
@unittest.skip('')
def test_illegal_tftp_operation(self):
pass
@unittest.skip('')
def test_undefined_error(self):
pass
@unittest.skip('')
def test_disk_full(self):
pass
if __name__ == '__main__':
unittest.main()
| 6,890
| 837
| 69
|
b5f117895fcef3c40425dd579c4ad9c9fc4abb1b
| 1,561
|
py
|
Python
|
rvo/cli.py
|
noqqe/rvo
|
423e1ea1aea0a2dc849ceae838e18896a13e7771
|
[
"MIT"
] | 14
|
2016-05-04T13:56:10.000Z
|
2019-08-01T14:31:33.000Z
|
rvo/cli.py
|
noqqe/rvo
|
423e1ea1aea0a2dc849ceae838e18896a13e7771
|
[
"MIT"
] | 12
|
2016-08-01T12:42:53.000Z
|
2022-02-16T09:37:47.000Z
|
rvo/cli.py
|
noqqe/rvo
|
423e1ea1aea0a2dc849ceae838e18896a13e7771
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
import sys
import pymongo
import os
import click
import datetime
import rvo.utils as utils
from rvo import __version__
import rvo.config
command_folder = os.path.join(os.path.dirname(__file__), 'commands')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# rvo command class
# base help message
@click.command(cls=rvoCommands, context_settings=CONTEXT_SETTINGS,
help="""
Manage text data on commandline
\b
888,8, Y8b Y888P e88 88e
888 " Y8b Y8P d888 888b
888 Y8b " Y888 888P
888 Y8P "88 88"
For the sake of your own data being managed
by you and only you!
""")
@click.version_option(version=__version__, prog_name="rvo")
@click.pass_context
if __name__ == '__main__':
cli()
| 24.777778
| 80
| 0.627162
|
#!/usr/bin/env python2.7
import sys
import pymongo
import os
import click
import datetime
import rvo.utils as utils
from rvo import __version__
import rvo.config
command_folder = os.path.join(os.path.dirname(__file__), 'commands')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# rvo command class
class rvoCommands(click.MultiCommand):
def list_commands(self, ctx):
rv = []
for filename in os.listdir(command_folder):
#if filename.endswith('.py'):
if filename.endswith('.py') and not filename.startswith('__init__'):
rv.append(filename[:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
ns = {}
fn = os.path.join(command_folder, name + '.py')
try:
with open(fn) as f:
code = compile(f.read(), fn, 'exec')
eval(code, ns, ns)
return ns[name]
except IOError:
click.help_option()
# base help message
@click.command(cls=rvoCommands, context_settings=CONTEXT_SETTINGS,
help="""
Manage text data on commandline
\b
888,8, Y8b Y888P e88 88e
888 " Y8b Y8P d888 888b
888 Y8b " Y888 888P
888 Y8P "88 88"
For the sake of your own data being managed
by you and only you!
""")
@click.version_option(version=__version__, prog_name="rvo")
@click.pass_context
def cli(ctx):
ctx.obj = {}
ctx.obj['config'] = rvo.config.parse_config()
ctx.obj['db'] = pymongo.MongoClient(ctx.obj["config"]["uri"])
if __name__ == '__main__':
cli()
| 702
| 17
| 98
|
3ff966cd61af5b1670e1b17560b12849204e6a21
| 2,423
|
py
|
Python
|
python/config.py
|
NREL/dgen_globetrotter
|
afef63c75a0721e18d2c3748a73a6f5300d2054b
|
[
"BSD-3-Clause"
] | null | null | null |
python/config.py
|
NREL/dgen_globetrotter
|
afef63c75a0721e18d2c3748a73a6f5300d2054b
|
[
"BSD-3-Clause"
] | null | null | null |
python/config.py
|
NREL/dgen_globetrotter
|
afef63c75a0721e18d2c3748a73a6f5300d2054b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module contains variables that can be changed, but are not exposed to non-expert users.
"""
import os
import multiprocessing
#==============================================================================
#==============================================================================
SCENARIOS = ['india_base']
SECTORS = ['res','com','ind']
SECTOR_NAMES = {'res':'Residential','com':'Commercial','ind':'Industrial'}
TECHS = [['solar']]
TECH_MODES = ['elec']
BA_COLUMN = 'state_id' #geo id column that data is available at such as control_reg_id, state_id, district_id etc.
#==============================================================================
# get the path of the current file
#==============================================================================
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
#==============================================================================
# model start year
#==============================================================================
START_YEAR = 2016
#==============================================================================
# local cores
#==============================================================================
LOCAL_CORES = int(multiprocessing.cpu_count() / 2)
#==============================================================================
# silence some output
#==============================================================================
VERBOSE = False
#==============================================================================
# run a smaller agent_df for debugging
#==============================================================================
SAMPLE_PCT = 1
#==============================================================================
# Runtime Tests
#==============================================================================
NULL_COLUMN_EXCEPTIONS = ['state_incentives', 'pct_state_incentives', 'batt_dispatch_profile', 'export_tariff_results','carbon_price_cents_per_kwh']
# 'market_share_last_year', 'max_market_share_last_year', 'adopters_cum_last_year', 'market_value_last_year', 'initial_number_of_adopters', 'initial_pv_kw', 'initial_market_share', 'initial_market_value', 'system_kw_cum_last_year', 'new_system_kw', 'batt_kw_cum_last_year', 'batt_kwh_cum_last_year',
CHANGED_DTYPES_EXCEPTIONS = []
MISSING_COLUMN_EXCEPTIONS = []
| 49.44898
| 323
| 0.401156
|
# -*- coding: utf-8 -*-
"""
This module contains variables that can be changed, but are not exposed to non-expert users.
"""
import os
import multiprocessing
#==============================================================================
#==============================================================================
SCENARIOS = ['india_base']
SECTORS = ['res','com','ind']
SECTOR_NAMES = {'res':'Residential','com':'Commercial','ind':'Industrial'}
TECHS = [['solar']]
TECH_MODES = ['elec']
BA_COLUMN = 'state_id' #geo id column that data is available at such as control_reg_id, state_id, district_id etc.
#==============================================================================
# get the path of the current file
#==============================================================================
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
#==============================================================================
# model start year
#==============================================================================
START_YEAR = 2016
#==============================================================================
# local cores
#==============================================================================
LOCAL_CORES = int(multiprocessing.cpu_count() / 2)
#==============================================================================
# silence some output
#==============================================================================
VERBOSE = False
#==============================================================================
# run a smaller agent_df for debugging
#==============================================================================
SAMPLE_PCT = 1
#==============================================================================
# Runtime Tests
#==============================================================================
NULL_COLUMN_EXCEPTIONS = ['state_incentives', 'pct_state_incentives', 'batt_dispatch_profile', 'export_tariff_results','carbon_price_cents_per_kwh']
# 'market_share_last_year', 'max_market_share_last_year', 'adopters_cum_last_year', 'market_value_last_year', 'initial_number_of_adopters', 'initial_pv_kw', 'initial_market_share', 'initial_market_value', 'system_kw_cum_last_year', 'new_system_kw', 'batt_kw_cum_last_year', 'batt_kwh_cum_last_year',
CHANGED_DTYPES_EXCEPTIONS = []
MISSING_COLUMN_EXCEPTIONS = []
| 0
| 0
| 0
|
1d7828e5aaee6bca7ce83a37cb7af43c58b1cbd9
| 4,240
|
py
|
Python
|
tsdata/models.py
|
caktus/Traffic-Stops
|
2c6eda9477f1770c5ad1208a1937c3e828fbfb28
|
[
"MIT"
] | 1
|
2021-12-10T14:58:11.000Z
|
2021-12-10T14:58:11.000Z
|
tsdata/models.py
|
caktus/Traffic-Stops
|
2c6eda9477f1770c5ad1208a1937c3e828fbfb28
|
[
"MIT"
] | 5
|
2020-08-12T15:20:31.000Z
|
2021-06-10T13:43:02.000Z
|
tsdata/models.py
|
caktus/Traffic-Stops
|
2c6eda9477f1770c5ad1208a1937c3e828fbfb28
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
STATE_CHOICES = ((settings.NC_KEY, "North Carolina"),)
STATUS_CHOICES = (
("running", "Running"),
("error", "Error"),
("finished", "Finished"),
)
GEOGRAPHY_CHOICES = (
("county", "County"),
("place", "Place"),
)
| 34.754098
| 92
| 0.700708
|
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
STATE_CHOICES = ((settings.NC_KEY, "North Carolina"),)
STATUS_CHOICES = (
("running", "Running"),
("error", "Error"),
("finished", "Finished"),
)
GEOGRAPHY_CHOICES = (
("county", "County"),
("place", "Place"),
)
class Dataset(models.Model):
state = models.CharField(choices=STATE_CHOICES, max_length=2)
name = models.CharField(max_length=255, unique=True)
date_added = models.DateTimeField(auto_now_add=True)
date_received = models.DateField()
url = models.URLField("URL", unique=True)
destination = models.CharField(
blank=True,
max_length=1024,
help_text="Absolute path to destination directory (helpful for testing)",
) # noqa
report_email_1 = models.EmailField(blank=True)
report_email_2 = models.EmailField(blank=True)
def __str__(self):
return "{}: {}".format(self.get_state_display(), self.name)
@property
def agency_model(self):
"""Return the appropriate Agency model for this Dataset's state.
"""
from nc.models import Agency as NCAgency
agencies = {
settings.NC_KEY: NCAgency,
}
return agencies.get(self.state)
class Import(models.Model):
dataset = models.ForeignKey(Dataset, on_delete=models.CASCADE)
date_started = models.DateTimeField(auto_now_add=True)
date_finished = models.DateTimeField(null=True)
successful = models.BooleanField(default=False)
def __str__(self):
return "Import of {}".format(self.dataset)
class CensusProfile(models.Model):
id = models.CharField("ID", primary_key=True, max_length=16)
location = models.CharField(max_length=255)
geography = models.CharField(max_length=16, choices=GEOGRAPHY_CHOICES)
state = models.CharField(max_length=2)
source = models.CharField(max_length=255)
white = models.PositiveIntegerField(default=0)
black = models.PositiveIntegerField(default=0)
native_american = models.PositiveIntegerField(default=0)
asian = models.PositiveIntegerField(default=0)
native_hawaiian = models.PositiveIntegerField(default=0)
other = models.PositiveIntegerField(default=0)
two_or_more_races = models.PositiveIntegerField(default=0)
hispanic = models.PositiveIntegerField(default=0)
non_hispanic = models.PositiveIntegerField(default=0)
total = models.PositiveIntegerField(default=0)
def __str__(self):
return self.location
def get_census_dict(self):
return dict(
white=self.white,
black=self.black,
native_american=self.native_american,
asian=self.asian,
other=self.other + self.native_hawaiian + self.two_or_more_races,
hispanic=self.hispanic,
non_hispanic=self.non_hispanic,
total=self.total,
)
class StateFacts(models.Model):
state_key = models.CharField(choices=STATE_CHOICES, max_length=2, unique=True)
total_stops = models.PositiveIntegerField(default=0)
total_stops_millions = models.PositiveIntegerField(default=0)
total_searches = models.PositiveIntegerField(default=0)
total_agencies = models.PositiveIntegerField(default=0)
start_date = models.CharField(max_length=20, default="")
end_date = models.CharField(max_length=20, default="")
def __str__(self):
return "Facts for state %s" % self.state_key
class Meta:
verbose_name_plural = "state facts"
class TopAgencyFacts(models.Model):
state_facts = models.ForeignKey(StateFacts, on_delete=models.CASCADE)
rank = models.SmallIntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])
agency_id = models.PositiveIntegerField(default=0)
stops = models.PositiveIntegerField(default=0)
name = models.CharField(max_length=255, default="")
def __str__(self):
return "Facts for state %s agency %s" % (self.state_facts.state_key, self.name)
class Meta:
unique_together = (("state_facts", "rank"),)
verbose_name_plural = "top agency facts"
ordering = ["state_facts__state_key", "rank"]
| 638
| 3,113
| 115
|
08aa3d0ae0cea2ca611adaf32c844e2e0178cc1a
| 12,695
|
py
|
Python
|
pyrehol/__init__.py
|
fakeNetflix/uber-repo-pyrehol
|
7c8b75e1780c7e85cb77748fbdf3cf6fa6c14294
|
[
"MIT"
] | null | null | null |
pyrehol/__init__.py
|
fakeNetflix/uber-repo-pyrehol
|
7c8b75e1780c7e85cb77748fbdf3cf6fa6c14294
|
[
"MIT"
] | null | null | null |
pyrehol/__init__.py
|
fakeNetflix/uber-repo-pyrehol
|
7c8b75e1780c7e85cb77748fbdf3cf6fa6c14294
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import cStringIO
import types
__name__ = 'pyrehol'
__author__ = 'James Brown <jbrown@uber.com>'
version_info = (0, 3)
__version__ = '.'.join(map(str, version_info))
INDENT_CHAR = ' '
PREDEFINED_SERVICES = frozenset([
'any', 'anystateless', 'all',
'AH', 'apcupsd', 'apcupsdnis', 'aptproxy', 'asterisk', 'cups',
'cvspserver', 'darkstat', 'daytime', 'dcc', 'dcpp', 'dhcprelay', 'dict',
'distcc', 'dns', 'echo', 'eserver', 'ESP', 'finger', 'gift', 'giftui',
'gkrellmd', 'GRE', 'h323', 'heartbeat', 'http', 'https', 'iax', 'iax2',
'icmp', 'ICMP', 'icp', 'ident', 'imap', 'imaps', 'irc', 'isakmp',
'jabber', 'jabberd', 'ldap', 'ldaps', 'lpd', 'mms', 'msn', 'msnp',
'mysql', 'netbackup', 'nfs', 'nntp', 'nntps', 'ntp', 'nut', 'nxserver', 'openvpn',
'oracle', 'OSPF', 'pop3', 'pop3s', 'portmap', 'postgres', 'privoxy',
'radius', 'radiusold', 'radiusoldproxy', 'radiusproxy', 'rdp', 'rndc',
'rsync', 'rtp', 'sip', 'smtp', 'smtps', 'snmp', 'snmptrap', 'socks',
'squid', 'ssh', 'stun', 'submission', 'sunrpc', 'swat', 'syslog', 'telnet',
'time', 'upnp', 'uucp', 'vmware', 'vmwareauth', 'vmwareweb', 'vnc',
'webcache', 'webmin', 'whois', 'xdmcp',
])
class Pyrehol(object):
"""Top-level wrapper for a Firehol config"""
def emit(self, out_fo=None):
"""Write out to a file descriptor. If one isn't passed, prints to standard out.
:param out_fo: A file-like object or None
"""
print_it = False
if out_fo is None:
out_fo = cStringIO.StringIO()
print_it = True
out_fo.write('version %d\n\n' % self.version)
if self.leader_lines:
out_fo.write('\n'.join(self.leader_lines))
out_fo.write('\n\n')
for thing in sorted(self.service_defines.values()):
thing.emit(out_fo)
out_fo.write('\n')
for thing in self.contents:
thing.emit(out_fo)
out_fo.write('\n')
if self.trailer_lines:
out_fo.write('\n'.join(self.trailer_lines))
out_fo.write('\n\n')
if print_it:
print out_fo.getvalue()
def define_service(self, service_name, server_portspec,
client_portspec='default'):
"""Add a new service to Firehol (for use in server/client blocks later).
:param service_name: Name for the service, suitable for use as a bash variable name
:param server_portspec: Port specification for the server side (example: "tcp/80 tcp/443")
:param client_portspec: Port specification for the client side (example: "any")
"""
new_define = _PyreholService(
service_name, server_portspec, client_portspec, root=self
)
if service_name in self.services:
assert new_define == self.service_defines[service_name],\
'%s != %s' % (new_define, self.service_defines[service_name])
else:
self.service_defines[service_name] = new_define
self.services.add(service_name)
| 35.460894
| 110
| 0.589681
|
from __future__ import absolute_import
import cStringIO
import types
__name__ = 'pyrehol'
__author__ = 'James Brown <jbrown@uber.com>'
version_info = (0, 3)
__version__ = '.'.join(map(str, version_info))
INDENT_CHAR = ' '
PREDEFINED_SERVICES = frozenset([
'any', 'anystateless', 'all',
'AH', 'apcupsd', 'apcupsdnis', 'aptproxy', 'asterisk', 'cups',
'cvspserver', 'darkstat', 'daytime', 'dcc', 'dcpp', 'dhcprelay', 'dict',
'distcc', 'dns', 'echo', 'eserver', 'ESP', 'finger', 'gift', 'giftui',
'gkrellmd', 'GRE', 'h323', 'heartbeat', 'http', 'https', 'iax', 'iax2',
'icmp', 'ICMP', 'icp', 'ident', 'imap', 'imaps', 'irc', 'isakmp',
'jabber', 'jabberd', 'ldap', 'ldaps', 'lpd', 'mms', 'msn', 'msnp',
'mysql', 'netbackup', 'nfs', 'nntp', 'nntps', 'ntp', 'nut', 'nxserver', 'openvpn',
'oracle', 'OSPF', 'pop3', 'pop3s', 'portmap', 'postgres', 'privoxy',
'radius', 'radiusold', 'radiusoldproxy', 'radiusproxy', 'rdp', 'rndc',
'rsync', 'rtp', 'sip', 'smtp', 'smtps', 'snmp', 'snmptrap', 'socks',
'squid', 'ssh', 'stun', 'submission', 'sunrpc', 'swat', 'syslog', 'telnet',
'time', 'upnp', 'uucp', 'vmware', 'vmwareauth', 'vmwareweb', 'vnc',
'webcache', 'webmin', 'whois', 'xdmcp',
])
def listify(string_or_list):
if isinstance(string_or_list, basestring):
return [string_or_list]
else:
return string_or_list
def nameify(name):
if name is None:
return
assert '-' not in name, 'Name may not contain the "-" characeter'
assert len(name) < 28, 'For dumb reasons, iptables varibales must be < 28 chars'
class Pyrehol(object):
"""Top-level wrapper for a Firehol config"""
def __init__(self):
self.contents = []
self.service_defines = {}
self.services = set(PREDEFINED_SERVICES)
self.version = 5
self.leader_lines = []
self.trailer_lines = []
def emit(self, out_fo=None):
"""Write out to a file descriptor. If one isn't passed, prints to standard out.
:param out_fo: A file-like object or None
"""
print_it = False
if out_fo is None:
out_fo = cStringIO.StringIO()
print_it = True
out_fo.write('version %d\n\n' % self.version)
if self.leader_lines:
out_fo.write('\n'.join(self.leader_lines))
out_fo.write('\n\n')
for thing in sorted(self.service_defines.values()):
thing.emit(out_fo)
out_fo.write('\n')
for thing in self.contents:
thing.emit(out_fo)
out_fo.write('\n')
if self.trailer_lines:
out_fo.write('\n'.join(self.trailer_lines))
out_fo.write('\n\n')
if print_it:
print out_fo.getvalue()
def define_service(self, service_name, server_portspec,
client_portspec='default'):
"""Add a new service to Firehol (for use in server/client blocks later).
:param service_name: Name for the service, suitable for use as a bash variable name
:param server_portspec: Port specification for the server side (example: "tcp/80 tcp/443")
:param client_portspec: Port specification for the client side (example: "any")
"""
new_define = _PyreholService(
service_name, server_portspec, client_portspec, root=self
)
if service_name in self.services:
assert new_define == self.service_defines[service_name],\
'%s != %s' % (new_define, self.service_defines[service_name])
else:
self.service_defines[service_name] = new_define
self.services.add(service_name)
class _PyreholChainable(type):
def __new__(cls, name, bases, dct):
cls_obj = type.__new__(cls, name, bases, dct)
if cls_obj.label is not None:
for kls in cls_obj._addable_from:
if cls_obj._is_setter:
function_name = 'set_%s' % cls_obj.label
else:
function_name = 'add_%s' % cls_obj.label
if not getattr(kls, function_name, None):
def add_thing(self, *args, **kwargs):
if isinstance(self, Pyrehol):
kwargs['root'] = self
else:
kwargs['root'] = self.root
if cls_obj._is_setter and getattr(self, 'did_set_%s' % cls_obj.label, False):
raise ValueError('Cannot set %s on the same block more than once' % cls_obj.label)
o = cls_obj(*args, **kwargs)
setattr(self, 'set_%s' % cls_obj.label, True)
self.contents.append(o)
return o
add_thing.__name__ = function_name
add_thing.__doc__ = '%s %s on this %s. Returns the %s.\n\n' % (
'Set the' if cls_obj._is_setter else 'Add a new',
cls_obj.label, kls.__name__, name.replace('_', '', 1),
)
if cls_obj.__init__.__doc__:
add_thing.__doc__ += cls_obj.__init__.__doc__
setattr(kls, function_name, types.UnboundMethodType(add_thing, None, kls))
return cls_obj
class _PyreholObject(object):
__metaclass__ = _PyreholChainable
_addable_from = tuple()
_is_setter = False
label = None
def __init__(self, root=None):
self.root = root
def _w(self, file_object, indent, line):
file_object.write(INDENT_CHAR * indent + line + '\n')
def emit(self, fo):
for indent, line in self.lines:
self._w(fo, indent, line)
class _PyreholBlock(_PyreholObject):
def __init__(self, name, root=None):
super(_PyreholBlock, self).__init__(root=root)
nameify(name)
self.name = name
self.contents = []
@property
def lines(self):
for thing in self.contents:
for indent, line in thing.lines:
yield indent + 1, line
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return False
class _PyreholTopLevelBlock(_PyreholBlock):
_addable_from = (Pyrehol,)
def __init__(self, name, root=None):
super(_PyreholTopLevelBlock, self).__init__(name, root=root)
self._before_name = ''
self._after_name = ''
@property
def lines(self):
yield (0, '%s%s%s %s%s%s' % (
self.label,
' ' if self._before_name else '', self._before_name,
self.name,
' ' if self._after_name else '', self._after_name
))
for line in super(_PyreholTopLevelBlock, self).lines:
yield line
class _PyreholRouter(_PyreholTopLevelBlock):
label = 'router'
def __init__(self, name, rule_params, root=None):
"""Construct a router block
:param name: Name of this block. Should be suitable to use as a bash variable name
:param rule_params: A list of rule paramters (e.g., "inface eth0" or "src 10.0.0.0/8")
"""
super(_PyreholInterface, self).__init__(name, root=root)
self.rule_params = listify(rule_params)
self._after_name = ' '.join(self.rule_params)
class _PyreholInterface(_PyreholTopLevelBlock):
label = 'interface'
def __init__(self, name, interfaces, root=None):
"""Construct an interface block
:param name: Name of this block. Should be suitable to use as a bash variable name
:param interfaces: List of interface devices (e.g., "eth0")
"""
super(_PyreholInterface, self).__init__(name, root=root)
self.interfaces = listify(interfaces)
self._before_name = '"%s"' % ' '.join(self.interfaces)
class _PyreholGroup(_PyreholBlock):
_addable_from = (_PyreholBlock,)
label = 'group'
def __init__(self, rule_params, root=None):
"""An arbitrary grouping of rules, for efficiency
:param rule_params: A list of mutating parameters to group by (e.g., "src 10.0.0.0/8")
"""
super(_PyreholGroup, self).__init__(name=None, root=root)
self.rule_params = listify(rule_params)
@property
def lines(self):
if not self.contents:
return
yield (0, 'group with %s' % ' '.join(self.rule_params))
for thing in self.contents:
for indent, line in thing.lines:
yield indent + 1, line
yield (0, 'group end')
class _PyreholStanza(_PyreholObject):
_addable_from = (_PyreholBlock,)
@property
def lines(self):
yield 0, self.text
class _PyreholPolicy(_PyreholStanza):
label = 'policy'
_is_setter = True
def __init__(self, action, root=None):
"""Set the default policy for this block.
:param action: The default action to take (accept, drop, reject, etc.)
"""
super(_PyreholPolicy, self).__init__(root=root)
self.text = '%s %s' % (self.label, action)
class _PyreholService(_PyreholStanza):
_addable_from = (Pyrehol,)
def __init__(self, name, server_portspec, client_portspec, root=None):
"""A single service
:param name: A name suitable for use as a bash variable name
:param server_portspec: Server portspec (e.g., "tcp/80")
:param client_portspec: Client portspec (e.g., "default")
"""
super(_PyreholService, self).__init__(root=root)
nameify(name)
self.name = name
self.server_portspec = tuple(sorted(listify(server_portspec)))
self.client_portspec = tuple(sorted(listify(client_portspec)))
@property
def _tuple(self):
return (self.name, self.server_portspec, self.client_portspec)
def __cmp__(self, other):
return cmp(self._tuple, other._tuple)
def __repr__(self):
return 'PyreholService(%s, %s, %s)' % (self.name, self.server_portspec, self.client_portspec)
@property
def lines(self):
yield 0, 'server_%s_ports="%s"' % (
self.name, ' '.join(self.server_portspec)
)
yield 0, 'client_%s_ports="%s"' % (
self.name, ' '.join(self.client_portspec)
)
class _PyreholServer(_PyreholStanza):
label = 'server'
def __init__(self, services, action, rule_params=[], root=None):
"""A server stanza. For communication INPUT to this host.
:param services: Service name or list of service names (e.g., "http")
:param action: Action to take for these services (e.g., "accept")
:param rule_params: A list of modifying rule parameters (e.g, "src 10.0.0.0/8")
"""
super(_PyreholServer, self).__init__(root=root)
services = listify(services)
for service in services:
assert service in self.root.services, \
'%s not defined (missing .define_service call?)' % service
self.text = '%s %s%s%s %s%s%s' % (
self.label,
'"' if len(services) > 1 else '',
' '.join(services),
'"' if len(services) > 1 else '',
action,
' ' if rule_params else '',
' '.join(rule_params),
)
class _PyreholClient(_PyreholStanza):
label = 'client'
def __init__(self, services, action, rule_params=[], root=None):
"""A client stanza. For communication OUTPUT from this host.
:param services: Service name or list of service names (e.g., "http")
:param action: Action to take for these services (e.g., "accept")
:param rule_params: A list of modifying rule parameters (e.g, "src 10.0.0.0/8")
"""
super(_PyreholClient, self).__init__(root=root)
services = listify(services)
for service in services:
assert service in self.root.services, \
'%s not defined (missing .define_service call?)' % service
self.text = '%s %s%s%s %s %s' % (
self.label,
'"' if len(services) > 1 else '',
' '.join(services),
'"' if len(services) > 1 else '',
action,
' '.join(rule_params),
)
class _PyreholProtection(_PyreholStanza):
label = 'protection'
_is_setter = True
def __init__(self, protection_level, root=None):
"""The flood/invalid packet protection level for this block
:param protection_level: http://firehol.org/firehol-manual/firehol-protection/
"""
super(_PyreholProtection, self).__init__(root=root)
self.text = '%s %s' % (self.label, protection_level)
| 3,780
| 5,408
| 397
|
6b990586083411f6986cff94ef38575c8d9185de
| 691
|
py
|
Python
|
Lib/site-packages/win32/test/test_win32print.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-25T13:46:54.000Z
|
2022-02-25T13:46:54.000Z
|
Lib/site-packages/win32/test/test_win32print.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/win32/test/test_win32print.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-25T13:47:47.000Z
|
2022-02-25T13:47:47.000Z
|
# Tests (scarce) for win32print module
import os
import unittest
import win32print as wprn
if __name__ == "__main__":
unittest.main()
| 26.576923
| 81
| 0.677279
|
# Tests (scarce) for win32print module
import os
import unittest
import win32print as wprn
class Win32PrintTestCase(unittest.TestCase):
def setUp(self):
self.printer_idx = 0
self.printer_levels_all = list(range(1, 10))
self.local_printers = wprn.EnumPrinters(wprn.PRINTER_ENUM_LOCAL, None, 1)
def test_printer_levels_read_dummy(self):
if not self.local_printers:
print("Test didn't run (no local printers)!")
return
ph = wprn.OpenPrinter(self.local_printers[self.printer_idx][2])
for level in self.printer_levels_all:
wprn.GetPrinter(ph, level)
if __name__ == "__main__":
unittest.main()
| 449
| 23
| 76
|
1a6e55efc63a6e46a26098f90cb96d56d38f48aa
| 7,070
|
py
|
Python
|
modelvshuman/datasets/create_dataset.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 158
|
2021-06-04T15:19:58.000Z
|
2022-03-30T00:31:28.000Z
|
modelvshuman/datasets/create_dataset.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 7
|
2021-07-20T03:57:34.000Z
|
2022-02-01T11:00:47.000Z
|
modelvshuman/datasets/create_dataset.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 14
|
2021-06-16T13:33:11.000Z
|
2022-03-29T15:04:09.000Z
|
#!/usr/bin/env python3
"""
Create dataset and experiments.
A dataset is a directory with subdirectories, one subdir per class.
An experiment is a directory subdirectories, one subdir per participant.
"""
import os
from os.path import join as pjoin
from os import listdir as ld
import numpy as np
import shutil
import sys
from PIL import Image
import numpy as np
import math
from torchvision import transforms
from ..helper import human_categories as hc
from .. import constants as consts
def resize_crop_image(input_file,
resize_size,
crop_size):
"""Replace input_file with resized and cropped version (png)."""
img = Image.open(input_file)
t = transforms.Compose([transforms.Resize(resize_size),
transforms.CenterCrop(crop_size)])
new_img = t(img)
os.remove(input_file)
new_img.save(input_file.replace(".JPEG", ".png"), 'png')
def create_experiment(expt_name,
expt_abbreviation,
expt_source_dir,
expt_target_dir,
only_dnn=True,
num_subjects=1,
rng=None):
"""Create human / CNN experiment.
parameters:
- only_dnn: boolean indicating whether this is a DNN experiment
or not (if not, a human experiment will be created.)
"""
if not only_dnn:
assert rng is not None, "Please specify random number generator (rng)!"
assert("_" not in expt_name), "no '_' in experiment name!"
assert(os.path.exists(expt_source_dir)), "directory "+expt_source_dir+" does not exist."
for i in range(0, num_subjects+1):
if i==0:
subject_abbreviation = "dnn"
subject_name="dnn"
else:
subject_abbreviation = "s"+get_leading_zeros(i, 2)
subject_name = "subject-"+get_leading_zeros(i, 2)
print("Creating experiment for subject: '"+subject_name+"'")
target_dir = pjoin(expt_target_dir, expt_name,
subject_name, "session-1")
if os.path.exists(target_dir):
print("Error: target directory "+target_dir+" does already exist.")
sys.exit(1)
else:
os.makedirs(target_dir)
img_list = []
for c in sorted(hc.get_human_object_recognition_categories()):
for x in sorted(ld(pjoin(expt_source_dir, c))):
input_file = pjoin(expt_source_dir, c, x)
img_list.append(input_file)
order = np.arange(len(img_list))
if i != 0:
rng.shuffle(order)
for i, img_index in enumerate(order):
input_file = img_list[img_index]
imgname = input_file.split("/")[-1]
correct_category = input_file.split("/")[-2]
condition = "0"
target_image_path = pjoin(target_dir,
(get_leading_zeros(i+1)+"_"+
expt_abbreviation+"_"+
subject_abbreviation+"_"+
condition+"_"+
correct_category+"_"+
"00_"+
imgname))
shutil.copyfile(input_file, target_image_path)
| 36.632124
| 121
| 0.599717
|
#!/usr/bin/env python3
"""
Create dataset and experiments.
A dataset is a directory with subdirectories, one subdir per class.
An experiment is a directory subdirectories, one subdir per participant.
"""
import os
from os.path import join as pjoin
from os import listdir as ld
import numpy as np
import shutil
import sys
from PIL import Image
import numpy as np
import math
from torchvision import transforms
from ..helper import human_categories as hc
from .. import constants as consts
def resize_crop_image(input_file,
resize_size,
crop_size):
"""Replace input_file with resized and cropped version (png)."""
img = Image.open(input_file)
t = transforms.Compose([transforms.Resize(resize_size),
transforms.CenterCrop(crop_size)])
new_img = t(img)
os.remove(input_file)
new_img.save(input_file.replace(".JPEG", ".png"), 'png')
def create_dataset(original_dataset_path,
target_dataset_path,
rng,
min_num_imgs_per_class,
max_num_imgs_per_class,
target_resize_size,
target_crop_size):
"Create a balanced dataset from a larger (potentially unbalanced) dataset."""
categories = hc.HumanCategories()
class_count_dict = dict()
image_path_dict = dict()
for human_category in sorted(hc.get_human_object_recognition_categories()):
class_count_dict[human_category] = 0
image_path_dict[human_category] = list()
for c in sorted(os.listdir(original_dataset_path)):
human_category = categories.get_human_category_from_WNID(c)
if human_category is not None:
class_count_dict[human_category] += len(os.listdir(pjoin(original_dataset_path,
c)))
for image_name in sorted(os.listdir(pjoin(original_dataset_path, c))):
image_path_dict[human_category].append(pjoin(original_dataset_path,
c, image_name))
count = 0
maximum = 0
minimum = np.Inf
for c in sorted(os.listdir(original_dataset_path)):
num = len(os.listdir(pjoin(original_dataset_path, c)))
count += num
if num > maximum:
maximum = num
if num < minimum:
minimum = num
min_16_classes = np.Inf
for k, v in class_count_dict.items():
if v < min_16_classes:
min_16_classes = v
print("Total image count: "+str(count))
print("Max #images per class: "+str(maximum))
print("Min #images per class: "+str(minimum))
print("Min #images within 16 classes: "+str(min_16_classes))
print(class_count_dict)
assert min_16_classes >= min_num_imgs_per_class, "not enough images"
num_imgs_per_target_class = max_num_imgs_per_class
if min_16_classes < num_imgs_per_target_class:
num_imgs_per_target_class = min_16_classes
if not os.path.exists(target_dataset_path):
print("Creating directory "+target_dataset_path)
os.makedirs(target_dataset_path)
else:
raise OSError("target dataset already exists: "+target_dataset_path)
for human_category in sorted(hc.get_human_object_recognition_categories()):
print("Creating category "+human_category)
category_dir = pjoin(target_dataset_path, human_category)
if not os.path.exists(category_dir):
os.makedirs(category_dir)
num_images = class_count_dict[human_category]
assert num_images >= min_16_classes, "not enough images found"
choice = rng.choice(num_images, num_imgs_per_target_class, replace=False)
assert len(choice) <= len(image_path_dict[human_category])
assert len(choice) == num_imgs_per_target_class
for image_index in choice:
image_index_str = str(image_index+1)
while len(image_index_str) < 4:
image_index_str = "0"+image_index_str
image_path = image_path_dict[human_category][image_index]
target_image_path = pjoin(target_dataset_path, human_category,
human_category+"-"+image_index_str+"-"+image_path.split("/")[-1].replace("_", "-"))
shutil.copyfile(image_path, target_image_path)
resize_crop_image(target_image_path, target_resize_size,
target_crop_size)
def create_experiment(expt_name,
expt_abbreviation,
expt_source_dir,
expt_target_dir,
only_dnn=True,
num_subjects=1,
rng=None):
"""Create human / CNN experiment.
parameters:
- only_dnn: boolean indicating whether this is a DNN experiment
or not (if not, a human experiment will be created.)
"""
if not only_dnn:
assert rng is not None, "Please specify random number generator (rng)!"
assert("_" not in expt_name), "no '_' in experiment name!"
assert(os.path.exists(expt_source_dir)), "directory "+expt_source_dir+" does not exist."
for i in range(0, num_subjects+1):
if i==0:
subject_abbreviation = "dnn"
subject_name="dnn"
else:
subject_abbreviation = "s"+get_leading_zeros(i, 2)
subject_name = "subject-"+get_leading_zeros(i, 2)
print("Creating experiment for subject: '"+subject_name+"'")
target_dir = pjoin(expt_target_dir, expt_name,
subject_name, "session-1")
if os.path.exists(target_dir):
print("Error: target directory "+target_dir+" does already exist.")
sys.exit(1)
else:
os.makedirs(target_dir)
img_list = []
for c in sorted(hc.get_human_object_recognition_categories()):
for x in sorted(ld(pjoin(expt_source_dir, c))):
input_file = pjoin(expt_source_dir, c, x)
img_list.append(input_file)
order = np.arange(len(img_list))
if i != 0:
rng.shuffle(order)
for i, img_index in enumerate(order):
input_file = img_list[img_index]
imgname = input_file.split("/")[-1]
correct_category = input_file.split("/")[-2]
condition = "0"
target_image_path = pjoin(target_dir,
(get_leading_zeros(i+1)+"_"+
expt_abbreviation+"_"+
subject_abbreviation+"_"+
condition+"_"+
correct_category+"_"+
"00_"+
imgname))
shutil.copyfile(input_file, target_image_path)
def get_leading_zeros(num, length=4):
return ("0"*length+str(num))[-length:]
| 3,615
| 0
| 46
|
989c61bb976af823ae52ec40394e5d1ec9e1e616
| 18,061
|
py
|
Python
|
roxbot/cogs/voice.py
|
TBTerra/roxbot
|
14ae7524201d6f795eefd33041c7a97b8868a521
|
[
"MIT"
] | null | null | null |
roxbot/cogs/voice.py
|
TBTerra/roxbot
|
14ae7524201d6f795eefd33041c7a97b8868a521
|
[
"MIT"
] | null | null | null |
roxbot/cogs/voice.py
|
TBTerra/roxbot
|
14ae7524201d6f795eefd33041c7a97b8868a521
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017-2018 Roxanne Gibson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import asyncio
import discord
import datetime
import youtube_dl
from math import ceil
from discord.ext import commands
import roxbot
from roxbot import guild_settings
def _clear_cache():
"""Clears the cache folder for the music bot. Ignores the ".gitignore" file to avoid deleting versioned files."""
for file in os.listdir("roxbot/cache"):
if file != ".gitignore":
os.remove("roxbot/cache/{}".format(file))
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': './roxbot/cache/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
}
ffmpeg_options = {
'before_options': '-nostdin',
'options': '-vn -loglevel panic --force-ipv4'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class ModifiedFFmpegPMCAudio(discord.FFmpegPCMAudio):
"""Modifies the read function of FFmpegPCMAudio to add a timer.
Thanks to eliza(nearlynon#3292) for teaching me how to do this"""
| 38.509595
| 173
| 0.71253
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017-2018 Roxanne Gibson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import asyncio
import discord
import datetime
import youtube_dl
from math import ceil
from discord.ext import commands
import roxbot
from roxbot import guild_settings
def _clear_cache():
"""Clears the cache folder for the music bot. Ignores the ".gitignore" file to avoid deleting versioned files."""
for file in os.listdir("roxbot/cache"):
if file != ".gitignore":
os.remove("roxbot/cache/{}".format(file))
def volume_perms():
def predicate(ctx):
gs = guild_settings.get(ctx.guild)
if gs.voice["need_perms"]: # Had to copy the admin or mod code cause it wouldn't work ;-;
if ctx.message.author.id == roxbot.owner:
return True
else:
admin_roles = gs.perm_roles["admin"]
mod_roles = gs.perm_roles["mod"]
for role in ctx.author.roles:
if role.id in mod_roles or role.id in admin_roles:
return True
return False
else:
return True
return commands.check(predicate)
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': './roxbot/cache/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
}
ffmpeg_options = {
'before_options': '-nostdin',
'options': '-vn -loglevel panic --force-ipv4'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class ModifiedFFmpegPMCAudio(discord.FFmpegPCMAudio):
"""Modifies the read function of FFmpegPCMAudio to add a timer.
Thanks to eliza(nearlynon#3292) for teaching me how to do this"""
def __init__(self, source, options):
super().__init__(source, **options)
self.timer = 0
def read(self):
self.timer += 20
return super().read()
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume):
self.source = source
super().__init__(self.source, volume)
self.data = data
self.title = data.get('title')
self.uploader = data.get("uploader")
self.uploader_url = data.get("uploader_url")
self.url = data.get('url')
self.duration = data.get("duration")
self.host = data.get("extractor_key")
self.webpage_url = data.get('webpage_url')
self.thumbnail_url = data.get("thumbnail", "")
@classmethod
async def from_url(cls, url, *, loop=None, stream=False, volume=0.2):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist. This shouldn't need to happen but in case it does.
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(ModifiedFFmpegPMCAudio(filename, ffmpeg_options), data=data, volume=volume)
class Voice:
def __init__(self, bot):
# Auto Cleanup cache files on boot
_clear_cache()
# Setup variables and then add dictionary entries for all guilds the bot can see on boot-up.
self.bot = bot
self._volume = {}
self.playlist = {} # All audio to be played
self.skip_votes = {}
self.am_queuing = {}
self.now_playing = {} # Currently playing audio
self.queue_logic = {}
for guild in bot.guilds:
self._volume[guild.id] = 0.2
self.playlist[guild.id] = []
self.skip_votes[guild.id] = []
self.am_queuing[guild.id] = False
self.now_playing[guild.id] = None
self.queue_logic[guild.id] = None
@staticmethod
def _format_duration(duration):
"""Static method to turn the duration of a file (in seconds) into something presentable for the user"""
if not duration:
return duration
hours = duration // 3600
minutes = (duration % 3600) // 60
seconds = duration % 60
format_me = {"second": int(seconds), "minute": int(minutes), "hour": int(hours)}
formatted = datetime.time(**format_me)
output = "{:%M:%S}".format(formatted)
if formatted.hour >= 1:
output = "{:%H:}".format(formatted) + output
return output
async def _queue_logic(self, ctx):
"""Background task designed to help the bot move on to the next video in the queue"""
sleep_for = 0.5
try:
while ctx.voice_client.is_playing() or ctx.voice_client.is_paused():
await asyncio.sleep(sleep_for)
except AttributeError:
pass # This is to stop any errors appearing if the bot suddenly leaves voice chat.
self.now_playing[ctx.guild.id] = None
self.skip_votes[ctx.guild.id] = []
if self.playlist[ctx.guild.id] and ctx.voice_client:
player = self.playlist[ctx.guild.id].pop(0)
await ctx.invoke(self.play, url=player, stream=player.get("stream", False), from_queue=True)
def _queue_song(self, ctx, video, stream):
"""Fuction to queue up a video into the playlist."""
video["stream"] = stream
video["queued_by"] = ctx.author
self.playlist[ctx.guild.id].append(video)
return video
def _generate_np_embed(self, guild, playing_status):
np = self.now_playing[guild.id]
title = "{0}: '{1.title}' from {1.host}".format(playing_status, np)
duration = self._format_duration(np.duration)
time_played = self._format_duration(np.source.timer/1000)
embed = discord.Embed(title=title, colour=roxbot.EmbedColours.pink, url=np.webpage_url)
embed.description = "Uploaded by: [{0.uploader}]({0.uploader_url})\nURL: [Here]({0.webpage_url})\nDuration: {1}\nQueued by: {0.queued_by}".format(np, duration)
embed.set_image(url=np.thumbnail_url)
embed.set_footer(text="{}/{} | Volume: {}%".format(time_played, duration, int(self.now_playing[guild.id].volume*100)))
return embed
async def on_guild_join(self, guild):
"""Makes sure that when the bot joins a guild it won't need to reboot for the music bot to work."""
self.playlist[guild.id] = []
self.skip_votes[guild.id] = []
self.am_queuing[guild.id] = False
self.now_playing[guild.id] = None
self.queue_logic[guild.id] = None
@roxbot.checks.is_admin_or_mod()
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel = None):
"""Joins the voice channel your in."""
# Get channel
if channel is None:
try:
channel = ctx.author.voice.channel
except AttributeError:
raise commands.CommandError("Failed to join voice channel. Please specify a channel or join one for Roxbot to join.")
# Join VoiceChannel
if ctx.voice_client is not None:
await ctx.voice_client.move_to(channel)
else:
await channel.connect()
return await ctx.send("Joined {0.name} :ok_hand:".format(channel))
@commands.command(hidden=True, enabled=False)
async def play_local(self, ctx, *, query):
"""Plays a file from the local filesystem."""
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}'.format(query))
@commands.cooldown(1, 0.5, commands.BucketType.guild)
@commands.command(aliases=["yt"])
async def play(self, ctx, *, url, stream=False, from_queue=False):
"""Plays from a url or search query (almost anything youtube_dl supports)"""
voice = guild_settings.get(ctx.guild).voice
guild = ctx.guild
# Checks if invoker is in voice with the bot. Skips admins and mods and owner.
if not roxbot.checks._is_admin_or_mod(ctx) or from_queue:
if not ctx.author.voice:
raise commands.CommandError("You're not in the same voice channel as Roxbot.")
if ctx.author.voice.channel != ctx.voice_client.channel:
raise commands.CommandError("You're not in the same voice channel as Roxbot.")
# For internal speed. This should make the playlist management quicker when play is being invoked internally.
if isinstance(url, dict):
video = url
url = video.get("webpage_url")
else:
video = ytdl.extract_info(url, download=False)
# Playlist and search handling.
if 'entries' in video and video.get("extractor_key") != "YoutubeSearch":
await ctx.send("Looks like you have given me a playlist. I will que up all {} videos in the playlist.".format(len(video.get("entries"))))
data = dict(video)
video = data["entries"].pop(0)
for entry in data["entries"]:
self._queue_song(ctx, entry, stream)
elif 'entries' in video and video.get("extractor_key") == "YoutubeSearch":
video = video["entries"][0]
# Duration limiter handling
if video.get("duration", 1) > voice["max_length"] and not roxbot.checks._is_admin_or_mod(ctx):
raise commands.CommandError("Cannot play video, duration is bigger than the max duration allowed.")
# Actual playing stuff section.
# If not playing and not queuing, and not paused, play the song. Otherwise queue it.
if (not ctx.voice_client.is_playing() and self.am_queuing[guild.id] is False) and not ctx.voice_client.is_paused():
self.am_queuing[guild.id] = True
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop, stream=stream, volume=self._volume[ctx.guild.id])
player.stream = stream
player.queued_by = ctx.author
self.now_playing[guild.id] = player
self.am_queuing[guild.id] = False
ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
# Create task to deal with what to do when the video ends or is skipped and how to handle the queue
self.queue_logic[ctx.guild.id] = self.bot.loop.create_task(self._queue_logic(ctx))
embed = self._generate_np_embed(ctx.guild, "Now Playing")
await ctx.send(embed=embed)
else:
# Queue the song as there is already a song playing or paused.
self._queue_song(ctx, video, stream)
# Sleep because if not, queued up things will send first and probably freak out users or something
while self.am_queuing[guild.id] is True:
await asyncio.sleep(0.5)
embed = discord.Embed(description='Added "{}" to queue'.format(video.get("title")), colour=roxbot.EmbedColours.pink)
await ctx.send(embed=embed)
@commands.cooldown(1, 0.5, commands.BucketType.guild)
@commands.command()
async def stream(self, ctx, *, url):
"""Streams given link. Good for Twitch. (same as play, but doesn't predownload)"""
# Just invoke the play command with the stream argument as true. Deals with everything else anyway.
return await ctx.invoke(self.play, url=url, stream=True)
@play.before_invoke
@stream.before_invoke
@play_local.before_invoke
async def ensure_voice(self, ctx):
"""Ensures the bot is in a voice channel before continuing and if it cannot auto join, raise an error."""
if ctx.voice_client is None:
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
raise commands.CommandError("Roxbot is not connected to a voice channel and couldn't auto-join a voice channel.")
@volume_perms()
@commands.command()
async def volume(self, ctx, volume):
"""Changes the player's volume. Only accepts integers representing x% between 0-100% or "show", which will show the current volume."""
if ctx.voice_client is None:
raise commands.CommandError("Roxbot is not in a voice channel.")
try:
volume = int(volume)
except ValueError:
pass
if volume != "show" and not isinstance(volume, int):
raise commands.BadArgument("Not int or 'show'")
elif volume == "show":
return await ctx.send("Volume: {}%".format(self._volume[ctx.guild.id]*100))
if 0 < volume <= 100:
ctx.voice_client.source.volume = volume / 100 # Volume needs to be a float between 0 and 1... kinda
self._volume[ctx.guild.id] = volume / 100 # Volume needs to be a float between 0 and 1... kinda
else:
raise commands.CommandError("Volume needs to be between 0-100%")
return await ctx.send("Changed volume to {}%".format(volume))
@commands.command()
async def pause(self, ctx):
"""Pauses the current video, if playing."""
if ctx.voice_client is None:
raise commands.CommandError("Roxbot is not in a voice channel.")
else:
if not ctx.voice_client.is_playing():
return await ctx.send("Nothing is playing.")
elif ctx.voice_client.is_paused():
return await ctx.send("I already am paused!")
else:
ctx.voice_client.pause()
return await ctx.send("Paused '{}'".format(ctx.voice_client.source.title))
@commands.command()
async def resume(self, ctx):
"""Resumes the bot if paused. Also will play the next thing in the queue if the bot is stuck."""
if ctx.voice_client is None:
if len(self.playlist[ctx.guild.id]) < 1:
raise commands.CommandError("Roxbot is not in a voice channel.")
else:
video = self.playlist[ctx.guild.id].pop(0)
await ctx.invoke(self.play, url=video)
else:
if ctx.voice_client.is_paused():
ctx.voice_client.resume()
return await ctx.send("Resumed '{}'".format(ctx.voice_client.source.title))
else:
if ctx.voice_client.is_playing():
return await ctx.send("Can't resume if I'm already playing something!")
else:
return await ctx.send("Nothing to resume.")
@commands.command()
async def skip(self, ctx, option=""):
"""Skips or votes to skip the current video. Use option "--force" if your an admin and """
voice = guild_settings.get(ctx.guild).voice
if ctx.voice_client.is_playing():
if voice["skip_voting"] and not (option == "--force" and roxbot.checks._is_admin_or_mod(ctx)): # Admin force skipping
if ctx.author in self.skip_votes[ctx.guild.id]:
return await ctx.send("You have already voted to skip the current track.")
else:
self.skip_votes[ctx.guild.id].append(ctx.author)
# -1 due to the bot being counted in the members generator
ratio = len(self.skip_votes[ctx.guild.id]) / (len(ctx.voice_client.channel.members) - 1)
needed_users = ceil((len(ctx.voice_client.channel.members) - 1) * voice["skip_ratio"])
if ratio >= voice["skip_ratio"]:
await ctx.send("{} voted the skip the video.".format(ctx.author))
await ctx.send("Votes to skip now playing has been met. Skipping video...")
self.skip_votes[ctx.guild.id] = []
else:
await ctx.send("{} voted the skip the song.".format(ctx.author))
return await ctx.send("{}/{} votes required to skip the video. To vote, use the command `{}skip`".format(len(self.skip_votes[ctx.guild.id]), needed_users, ctx.prefix))
else:
await ctx.send("Skipped video")
# This should be fine as the queue_logic function should handle moving to the next song and all that.
self.now_playing[ctx.guild.id] = None
ctx.voice_client.stop()
else:
await ctx.send("I'm not playing anything.")
@commands.command(aliases=["np"])
async def nowplaying(self, ctx):
"""Displays the video now playing."""
if self.now_playing[ctx.guild.id] is None:
return await ctx.send("Nothing is playing.")
else:
if ctx.voice_client.is_paused():
x = "Paused"
else:
x = "Now Playing"
embed = self._generate_np_embed(ctx.guild, x)
return await ctx.send(embed=embed)
@commands.command()
async def queue(self, ctx):
"""Displays what videos are queued up and waiting to be played."""
output = ""
index = 1
for video in self.playlist[ctx.guild.id]:
output += "{}) '{}' queued by {}\n".format(index, video["title"], video["queued_by"])
index += 1
if output == "":
output = "Nothing is up next. Maybe you should add something!"
embed = discord.Embed(title="Queue", description=output, colour=roxbot.EmbedColours.pink)
return await ctx.send(embed=embed)
@roxbot.checks.is_admin_or_mod()
@commands.command()
async def remove(self, ctx, index):
"""Removes a item from the queue with the given index. Can also input all to delete all queued items."""
# Try and convert index into an into. If not possible, just move forward
try:
index = int(index)
except ValueError:
pass
# If not str "all" or an int, raise error.
if index != "all" and not isinstance(index, int):
raise commands.CommandError("No valid option given.")
elif index == "all":
# Remove all queued items
length = len(self.playlist[ctx.guild.id])
self.playlist[ctx.guild.id] = []
return await ctx.send("Removed all queued videos. ({})".format(length))
else:
try:
# Try and remove item using index.
removed = self.playlist[ctx.guild.id].pop(index-1) # -1 because queue index shown starts from 1, not 0
return await ctx.send("Removed '{}' from the queue.".format(removed.get("title", index)))
except IndexError:
raise commands.CommandError("Valid Index not given.")
@roxbot.checks.is_admin_or_mod()
@commands.command(alaises=["disconnect"])
async def stop(self, ctx):
"""Stops and disconnects the bot from voice."""
if ctx.voice_client is None:
raise commands.CommandError("Roxbot is not in a voice channel.")
else:
# Clear up variables before stopping.
self.playlist[ctx.guild.id] = []
self.now_playing[ctx.guild.id] = None
self.queue_logic[ctx.guild.id].cancel()
await ctx.voice_client.disconnect()
return await ctx.send(":wave:")
def setup(bot_client):
bot_client.add_cog(Voice(bot_client))
| 2,803
| 12,821
| 139
|
4b499df6f92f959afcd92c59e28a63a941fc08bc
| 729
|
py
|
Python
|
ebr_trackerbot/command/untrack.py
|
tomtom-international/ebr-trackerbot
|
327e43a1708aa01ef2022a3d786a72eab9274872
|
[
"Apache-2.0"
] | null | null | null |
ebr_trackerbot/command/untrack.py
|
tomtom-international/ebr-trackerbot
|
327e43a1708aa01ef2022a3d786a72eab9274872
|
[
"Apache-2.0"
] | 12
|
2019-08-02T12:31:47.000Z
|
2019-08-16T11:45:42.000Z
|
ebr_trackerbot/command/untrack.py
|
LaudateCorpus1/ebr-trackerbot
|
327e43a1708aa01ef2022a3d786a72eab9274872
|
[
"Apache-2.0"
] | 1
|
2021-09-14T04:04:36.000Z
|
2021-09-14T04:04:36.000Z
|
"""
Slack Bot Untrack Command
"""
import logging
from ebr_trackerbot.bot import register_command, get_storage
def untrack_command(text, result, payload, config, commands):
"""
Slack Bot Untrack Command
"""
logging.debug("Untrack command")
test = result.group(1)
get_storage()["delete_for_user"](payload["data"]["user"], test)
payload["web_client"].chat_postMessage(
channel=payload["data"]["channel"],
text="Tracking was stopped for test *" + test + "*",
thread_ts=payload["data"]["ts"],
)
register_command(
"untrack", "Stops test tracking. Command syntax: untrack full_testname", "^untrack ([^ ]+)$", untrack_command
)
logging.info("Untrack command registered")
| 26.035714
| 113
| 0.672154
|
"""
Slack Bot Untrack Command
"""
import logging
from ebr_trackerbot.bot import register_command, get_storage
def untrack_command(text, result, payload, config, commands):
"""
Slack Bot Untrack Command
"""
logging.debug("Untrack command")
test = result.group(1)
get_storage()["delete_for_user"](payload["data"]["user"], test)
payload["web_client"].chat_postMessage(
channel=payload["data"]["channel"],
text="Tracking was stopped for test *" + test + "*",
thread_ts=payload["data"]["ts"],
)
register_command(
"untrack", "Stops test tracking. Command syntax: untrack full_testname", "^untrack ([^ ]+)$", untrack_command
)
logging.info("Untrack command registered")
| 0
| 0
| 0
|
a4744648f99aed2d6c2a7a6e702f3a4e944b14f0
| 7,651
|
py
|
Python
|
RecoEgamma/EgammaMCTools/test/pfClusterForCalibration.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoEgamma/EgammaMCTools/test/pfClusterForCalibration.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoEgamma/EgammaMCTools/test/pfClusterForCalibration.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
# EGM skimmer
# Author: Rafael Lopes de Sa
import FWCore.ParameterSet.Config as cms
# Run with the 2017 detector
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('SKIM',Run2_2017)
# Import the standard packages for reconstruction and digitization
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('RecoEgamma.EgammaMCTools.pfClusterMatchedToPhotonsSelector_cfi')
# Global Tag configuration ... just using the same as in the RelVal
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '81X_upgrade2017_realistic_v26', '')
process.MessageLogger.cerr.threshold = 'ERROR'
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.options = cms.untracked.PSet( allowUnscheduled = cms.untracked.bool(True) )
# This is where users have some control.
# Define which collections to save and which dataformat we are using
savedCollections = cms.untracked.vstring('drop *',
# The commented ones are large collections that can be kept for debug
# 'keep EcalRecHitsSorted_*_*_*',
# 'keep recoPFClusters_*_*_*',
# 'keep recoCaloClusters_*_*_*',
# 'keep recoSuperClusters_*_*_*',
# 'keep recoGsfElectron*_*_*_*',
# 'keep recoPhoton*_*_*_*',
# 'keep *_mix_MergedTrackTruth_*',
'keep *_reducedEcalRecHits*_*_*',
'keep double_fixedGridRho*_*_*',
'keep recoGenParticles_*_*_*',
'keep GenEventInfoProduct_*_*_*',
'keep PileupSummaryInfos_*_*_*',
'keep *_ecalDigis_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_particleFlowCluster*_*_*')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(15))
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/AODSIM/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/005AB6CE-27ED-E611-98CA-E0071B7A8590.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/0416D6B7-04ED-E611-B342-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/14829DD8-04ED-E611-8049-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/54AFE9C4-04ED-E611-952D-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/5A32C6B9-04ED-E611-B1EB-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/60E162B8-04ED-E611-898D-E0071B7A58F0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/6A47DD1A-FEEC-E611-81EB-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/92B923B6-04ED-E611-9DC9-24BE05C48821.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/B40E77B4-04ED-E611-9E30-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/C48157B5-04ED-E611-BEC1-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/CAED3A16-FEEC-E611-8262-24BE05CEFB41.root'
)
)
process.PFCLUSTERoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('skimEGMobjects_fromRAW.root'),
outputCommands = savedCollections,
splitLevel = cms.untracked.int32(0)
)
# Run the digitizer to make the trackingparticles
process.mix.digitizers = cms.PSet(process.theDigitizersValid)
process.trackingtruth_step = cms.Path(process.pdigi_valid)
# Remake the PFClusters
process.pfclusters_step = cms.Path(process.bunchSpacingProducer *
process.ecalDigis *
process.ecalPreshowerDigis *
process.ecalPreshowerRecHit *
process.ecalMultiFitUncalibRecHit *
process.ecalDetIdToBeRecovered *
process.ecalRecHit *
process.particleFlowRecHitPS *
process.particleFlowRecHitECAL *
process.particleFlowClusterECALUncorrected *
process.particleFlowClusterPS *
process.particleFlowClusterECAL)
# Select the PFClusters we want to calibrate
process.particleFlowClusterECALMatchedToPhotons = process.pfClusterMatchedToPhotonsSelector.clone()
process.selection_step = cms.Path(process.particleFlowClusterECALMatchedToPhotons)
# Ends job and writes our output
process.endjob_step = cms.EndPath(process.endOfProcess)
process.output_step = cms.EndPath(process.PFCLUSTERoutput)
# Schedule definition, rebuilding rechits
process.schedule = cms.Schedule(process.trackingtruth_step,process.pfclusters_step,process.selection_step,process.endjob_step,process.output_step)
| 67.114035
| 191
| 0.66671
|
# EGM skimmer
# Author: Rafael Lopes de Sa
import FWCore.ParameterSet.Config as cms
# Run with the 2017 detector
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('SKIM',Run2_2017)
# Import the standard packages for reconstruction and digitization
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('RecoEgamma.EgammaMCTools.pfClusterMatchedToPhotonsSelector_cfi')
# Global Tag configuration ... just using the same as in the RelVal
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '81X_upgrade2017_realistic_v26', '')
process.MessageLogger.cerr.threshold = 'ERROR'
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.options = cms.untracked.PSet( allowUnscheduled = cms.untracked.bool(True) )
# This is where users have some control.
# Define which collections to save and which dataformat we are using
savedCollections = cms.untracked.vstring('drop *',
# The commented ones are large collections that can be kept for debug
# 'keep EcalRecHitsSorted_*_*_*',
# 'keep recoPFClusters_*_*_*',
# 'keep recoCaloClusters_*_*_*',
# 'keep recoSuperClusters_*_*_*',
# 'keep recoGsfElectron*_*_*_*',
# 'keep recoPhoton*_*_*_*',
# 'keep *_mix_MergedTrackTruth_*',
'keep *_reducedEcalRecHits*_*_*',
'keep double_fixedGridRho*_*_*',
'keep recoGenParticles_*_*_*',
'keep GenEventInfoProduct_*_*_*',
'keep PileupSummaryInfos_*_*_*',
'keep *_ecalDigis_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_particleFlowCluster*_*_*')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(15))
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/AODSIM/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/005AB6CE-27ED-E611-98CA-E0071B7A8590.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/0416D6B7-04ED-E611-B342-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/14829DD8-04ED-E611-8049-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/54AFE9C4-04ED-E611-952D-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/5A32C6B9-04ED-E611-B1EB-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/60E162B8-04ED-E611-898D-E0071B7A58F0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/6A47DD1A-FEEC-E611-81EB-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/92B923B6-04ED-E611-9DC9-24BE05C48821.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/B40E77B4-04ED-E611-9E30-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/C48157B5-04ED-E611-BEC1-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/CAED3A16-FEEC-E611-8262-24BE05CEFB41.root'
)
)
process.PFCLUSTERoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('skimEGMobjects_fromRAW.root'),
outputCommands = savedCollections,
splitLevel = cms.untracked.int32(0)
)
# Run the digitizer to make the trackingparticles
process.mix.digitizers = cms.PSet(process.theDigitizersValid)
process.trackingtruth_step = cms.Path(process.pdigi_valid)
# Remake the PFClusters
process.pfclusters_step = cms.Path(process.bunchSpacingProducer *
process.ecalDigis *
process.ecalPreshowerDigis *
process.ecalPreshowerRecHit *
process.ecalMultiFitUncalibRecHit *
process.ecalDetIdToBeRecovered *
process.ecalRecHit *
process.particleFlowRecHitPS *
process.particleFlowRecHitECAL *
process.particleFlowClusterECALUncorrected *
process.particleFlowClusterPS *
process.particleFlowClusterECAL)
# Select the PFClusters we want to calibrate
process.particleFlowClusterECALMatchedToPhotons = process.pfClusterMatchedToPhotonsSelector.clone()
process.selection_step = cms.Path(process.particleFlowClusterECALMatchedToPhotons)
# Ends job and writes our output
process.endjob_step = cms.EndPath(process.endOfProcess)
process.output_step = cms.EndPath(process.PFCLUSTERoutput)
# Schedule definition, rebuilding rechits
process.schedule = cms.Schedule(process.trackingtruth_step,process.pfclusters_step,process.selection_step,process.endjob_step,process.output_step)
| 0
| 0
| 0
|
e86d0fe71bb121a1115b9c3ad10fc4dbf87a3968
| 409
|
py
|
Python
|
runesanalyzer/tests.py
|
clemsciences/runes-analyzer
|
0af8baaf604179c31dcbf28b1a023ca650a9ff34
|
[
"MIT"
] | 5
|
2018-10-17T15:35:51.000Z
|
2022-01-23T10:57:55.000Z
|
runesanalyzer/tests.py
|
clemsciences/runes-analyzer
|
0af8baaf604179c31dcbf28b1a023ca650a9ff34
|
[
"MIT"
] | 5
|
2018-06-25T16:06:00.000Z
|
2018-09-09T13:50:16.000Z
|
runesanalyzer/tests.py
|
clemsciences/runes-analyzer
|
0af8baaf604179c31dcbf28b1a023ca650a9ff34
|
[
"MIT"
] | null | null | null |
"""
"""
import unittest
from runesanalyzer import data
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
| 27.266667
| 119
| 0.513447
|
"""
"""
import unittest
from runesanalyzer import data
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
class Tests(unittest.TestCase):
def test_elder_futhark(self):
self.assertListEqual(['ᚠ', 'ᚢ', 'ᚦ', 'ᚨ', 'ᚱ', 'ᚲ', 'ᚷ', 'ᚹ', 'ᚺ', 'ᚾ', 'ᛁ', 'ᛃ', 'ᛇ', 'ᛈ', 'ᛉ', 'ᛊ', 'ᛏ', 'ᛒ',
'ᛖ', 'ᛗ', 'ᛚ', 'ᛜ', 'ᛟ', 'ᛞ'], [str(rune) for rune in data.ELDER_FUTHARK])
| 281
| 10
| 49
|
baf69027186fe85ed22eec3cf5fd7e5276dfca83
| 3,759
|
py
|
Python
|
spec_uploader.py
|
uk-gov-mirror/nhsdigital.api-platform
|
4e634b70268a3509b3f53ea2c87e41eeececfeda
|
[
"MIT"
] | null | null | null |
spec_uploader.py
|
uk-gov-mirror/nhsdigital.api-platform
|
4e634b70268a3509b3f53ea2c87e41eeececfeda
|
[
"MIT"
] | 56
|
2020-03-31T09:03:19.000Z
|
2021-07-28T02:07:50.000Z
|
spec_uploader.py
|
uk-gov-mirror/nhsdigital.api-platform
|
4e634b70268a3509b3f53ea2c87e41eeececfeda
|
[
"MIT"
] | 1
|
2021-04-11T07:33:15.000Z
|
2021-04-11T07:33:15.000Z
|
"""
spec_uploader.py
A tool for uploading apigee specs
Usage:
spec_uploader.py <apigee_org> <specs_dir> -u <username> -p <password> [-t <apigee_token>]
spec_uploader.py (-h | --help)
Options:
-h --help Show this screen
-u Which username to log in with
-p Password for login
-t Access Token from apigee
"""
import os
from docopt import docopt
from apigee_client import ApigeeClient
ENV_NAMES = {
'nhsd-prod': ['sandbox', 'dev', 'int', 'prod'],
'nhsd-nonprod': ['internal-dev', 'internal-qa-sandbox', 'internal-qa', 'ref']
}
FRIENDLY_ENV_NAMES = {
'prod': '(Production)',
'int': '(Integration Testing)',
'dev': '(Development)',
'ref': '(Reference)',
'internal-qa': '(Internal QA)',
'internal-dev': '(Internal Development)'
}
FRIENDLY_API_NAMES = {
'personal-demographics': 'Personal Demographics Service API'
}
if __name__ == "__main__":
args = docopt(__doc__)
client = ApigeeClient(args['<apigee_org>'], args['<username>'], args['<password>'], args['<apigee_token>'])
upload_specs(ENV_NAMES[args['<apigee_org>']], args['<specs_dir>'], client)
| 32.973684
| 124
| 0.597233
|
"""
spec_uploader.py
A tool for uploading apigee specs
Usage:
spec_uploader.py <apigee_org> <specs_dir> -u <username> -p <password> [-t <apigee_token>]
spec_uploader.py (-h | --help)
Options:
-h --help Show this screen
-u Which username to log in with
-p Password for login
-t Access Token from apigee
"""
import os
from docopt import docopt
from apigee_client import ApigeeClient
ENV_NAMES = {
'nhsd-prod': ['sandbox', 'dev', 'int', 'prod'],
'nhsd-nonprod': ['internal-dev', 'internal-qa-sandbox', 'internal-qa', 'ref']
}
FRIENDLY_ENV_NAMES = {
'prod': '(Production)',
'int': '(Integration Testing)',
'dev': '(Development)',
'ref': '(Reference)',
'internal-qa': '(Internal QA)',
'internal-dev': '(Internal Development)'
}
FRIENDLY_API_NAMES = {
'personal-demographics': 'Personal Demographics Service API'
}
def to_friendly_name(spec_name, env):
friendly_env = FRIENDLY_ENV_NAMES.get(env, env)
friendly_api = FRIENDLY_ENV_NAMES.get(spec_name, spec_name.replace('-', ' ').title())
return f'{friendly_api} {friendly_env}'
def upload_specs(envs, specs_dir, client):
# Grab a list of local specs
local_specs = os.listdir(specs_dir)
# Grab a list of remote specs
folder = client.list_specs()
folder_id = folder['id']
existing_specs = {v['name']: v['id'] for v in folder['contents']}
# Figure out where the portal is
portal_id = client.get_portals().json()['data'][0]['id']
print(f'portal is {portal_id}')
portal_specs = {i['specId']: i for i in client.get_apidocs(portal_id).json()['data']}
print(f'grabbed apidocs')
# Run through the list of local specs -- if it's on the portal, update it;
# otherwise, add a new one
for spec in local_specs:
spec_name = os.path.splitext(spec)[0]
if spec_name in existing_specs:
print(f'{spec} exists')
spec_id = existing_specs[spec_name]
else:
print(f'{spec} does not exist, creating')
response = client.create_spec(spec_name, folder_id)
spec_id = response.json()['id']
print(f'{spec} id is {spec_id}')
with open(os.path.join(specs_dir, spec), 'r') as f:
response = client.update_spec(spec_id, f.read())
print(f'{spec} updated')
# For this, sometimes the product refs change between deploys: instead of updating, delete the old one and recreate.
for env in envs:
if 'sandbox' in env: # we don't want to publish stuff for sandbox
continue
print(f'checking if this spec is on the portal in {env}')
ns_spec_name = f'{spec_name}-{env}'
if ns_spec_name in portal_specs:
print(f'{ns_spec_name} is on the portal, updating')
apidoc_id = portal_specs[ns_spec_name]['id']
client.update_portal_api(
apidoc_id,
to_friendly_name(spec_name, env),
ns_spec_name,
spec_id,
portal_id
)
client.update_spec_snapshot(portal_id, apidoc_id)
else:
print(f'{ns_spec_name} is not on the portal, adding it')
client.create_portal_api(
to_friendly_name(spec_name, env),
ns_spec_name,
spec_id,
portal_id
)
print('done.')
if __name__ == "__main__":
args = docopt(__doc__)
client = ApigeeClient(args['<apigee_org>'], args['<username>'], args['<password>'], args['<apigee_token>'])
upload_specs(ENV_NAMES[args['<apigee_org>']], args['<specs_dir>'], client)
| 2,575
| 0
| 46
|
2012588b8fb7495ab120e653910b9eb717841ec4
| 119
|
py
|
Python
|
ekorpkit/models/transformer/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 4
|
2022-02-26T10:54:16.000Z
|
2022-02-26T11:01:56.000Z
|
ekorpkit/models/transformer/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T06:37:12.000Z
|
2022-03-25T06:45:53.000Z
|
ekorpkit/models/transformer/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | null | null | null |
from .simple import (
SimpleNER,
SimpleMultiLabel,
SimpleClassification,
)
from .simple_t5 import SimpleT5
| 17
| 31
| 0.739496
|
from .simple import (
SimpleNER,
SimpleMultiLabel,
SimpleClassification,
)
from .simple_t5 import SimpleT5
| 0
| 0
| 0
|
6ef6bb588ed00ad965b37321174f6dabafe870a9
| 1,001
|
py
|
Python
|
target_to_lines.py
|
crazydigger/ru_summarization_mbart
|
9da680bd67f869de9094c26f7a8a8e6b4c1b06bf
|
[
"Apache-2.0"
] | 1
|
2021-09-02T06:46:31.000Z
|
2021-09-02T06:46:31.000Z
|
target_to_lines.py
|
AlexKay28/summarus
|
9da680bd67f869de9094c26f7a8a8e6b4c1b06bf
|
[
"Apache-2.0"
] | null | null | null |
target_to_lines.py
|
AlexKay28/summarus
|
9da680bd67f869de9094c26f7a8a8e6b4c1b06bf
|
[
"Apache-2.0"
] | 1
|
2021-01-04T02:22:57.000Z
|
2021-01-04T02:22:57.000Z
|
import argparse
from allennlp.common.params import Params
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.models.archival import load_archive
from summarus.readers import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--archive-file', type=str, required=True)
parser.add_argument('--input-file', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
target_to_lines(**vars(args))
| 35.75
| 76
| 0.705295
|
import argparse
from allennlp.common.params import Params
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.models.archival import load_archive
from summarus.readers import *
def target_to_lines(archive_file, input_file, output_file, lowercase=True):
archive = load_archive(archive_file)
reader = DatasetReader.from_params(archive.config.pop("dataset_reader"))
with open(output_file, "w") as w:
for t in reader.parse_set(input_file):
target = t[1]
target = target.strip()
target = target.lower() if lowercase else target
w.write(target.replace("\n", " ") + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--archive-file', type=str, required=True)
parser.add_argument('--input-file', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
target_to_lines(**vars(args))
| 434
| 0
| 23
|
46d7bd2bfb36cc2efd9ae401ded0e3b065c81bac
| 1,029
|
py
|
Python
|
tempest/services/orchestration/json/dns_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/orchestration/json/dns_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/orchestration/json/dns_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'sabe6191'
import json
import datetime
from tempest.common import rest_client
| 33.193548
| 77
| 0.60447
|
__author__ = 'sabe6191'
import json
import datetime
from tempest.common import rest_client
class DnsClient(rest_client.RestClient):
def __init__(self, config, username, password, auth_url,
token_url ,tenant_name=None):
super(DnsClient, self).__init__(config, username, password, auth_url,
token_url, tenant_name)
self.url = self.config.dns.url
self.service = self.config.dns.catalog_type
def list_domain_id(self, domain_id, region):
url = "https://dns.api.rackspacecloud.com/v1" \
".0/%s/domains/%s" % (self.tenant_name, domain_id)
resp, body = self.get(url, region)
if resp['status'] == ('200'):
body = json.loads(body)
return resp, body
def datehandler(obj):
if isinstance(obj, datetime.date):
return str(obj)
else:
raise TypeError, 'Object of type %s with value of %s is not ' \
'JSON serializable' % (type(obj), repr(obj))
| 817
| 19
| 100
|
983fe14532b3a5a93ae1262c586285af92006ebf
| 505
|
py
|
Python
|
list_remove_duplicate.py
|
xiaoledeng/python-tiny-functions
|
bf31421f4ea06b71c26fcc3de7b2134060ed34dc
|
[
"MIT"
] | 1
|
2019-09-19T15:09:14.000Z
|
2019-09-19T15:09:14.000Z
|
list_remove_duplicate.py
|
xiaoledeng/python-tiny-functions
|
bf31421f4ea06b71c26fcc3de7b2134060ed34dc
|
[
"MIT"
] | null | null | null |
list_remove_duplicate.py
|
xiaoledeng/python-tiny-functions
|
bf31421f4ea06b71c26fcc3de7b2134060ed34dc
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
Date: 2019/11/27
Author: Xiao-Le Deng
Email: xiaoledeng at gmail.com
Function: remove duplicates in a given list
"""
# List1 = [1,1,1]
# List2 = ["John","John","John","Mark","David","David","Shalom","Shalom","Shalom"]
# print(list_remove_duplicate(List1))
# print(list_remove_duplicate(List2))
| 26.578947
| 82
| 0.706931
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
Date: 2019/11/27
Author: Xiao-Le Deng
Email: xiaoledeng at gmail.com
Function: remove duplicates in a given list
"""
def list_remove_duplicate(original_list):
format_list = list(set(original_list))
format_list.sort(key=original_list.index)
return format_list
# List1 = [1,1,1]
# List2 = ["John","John","John","Mark","David","David","Shalom","Shalom","Shalom"]
# print(list_remove_duplicate(List1))
# print(list_remove_duplicate(List2))
| 132
| 0
| 23
|
d1d3d70a9689a9253aab1fb4196730cfab689e31
| 618
|
py
|
Python
|
neighbor/tests.py
|
olesigilai/neighborhood
|
d188323f45ea000320903cfc6b871abcf346e326
|
[
"MIT",
"Unlicense"
] | null | null | null |
neighbor/tests.py
|
olesigilai/neighborhood
|
d188323f45ea000320903cfc6b871abcf346e326
|
[
"MIT",
"Unlicense"
] | null | null | null |
neighbor/tests.py
|
olesigilai/neighborhood
|
d188323f45ea000320903cfc6b871abcf346e326
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import healthservices,neighbourhood
import datetime as dt
# Create your tests here.
| 28.090909
| 63
| 0.726537
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import healthservices,neighbourhood
import datetime as dt
# Create your tests here.
class neighbourhoodTestClass(TestCase):
def setUp(self):
self.kataret = neighbourhood(neighbourhood='kataret')
def test_instance(self):
self.assertTrue(isinstance(self.kataret,neighbourhood))
def tearDown(self):
neighbourhood.objects.all().delete()
def test_save_method(self):
self.kataret.save_neighbourhood()
hood = neighbourhood.objects.all()
self.assertTrue(len(hood)>0)
| 295
| 18
| 129
|
e09e9e063bc8a78a97ab704726d573106da2d907
| 1,223
|
py
|
Python
|
setup.py
|
park-sungmoo/morfessor
|
b6356c877f4af28d90bda2373b3b71bd371a3273
|
[
"BSD-2-Clause"
] | 2
|
2021-03-28T00:12:00.000Z
|
2021-03-28T20:38:57.000Z
|
setup.py
|
park-sungmoo/morfessor
|
b6356c877f4af28d90bda2373b3b71bd371a3273
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
park-sungmoo/morfessor
|
b6356c877f4af28d90bda2373b3b71bd371a3273
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from codecs import open
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import re
main_py = open('morfessor/__init__.py', encoding='utf-8').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
requires = [
# 'progressbar',
]
setup(name='Morfessor',
version=metadata['version'],
author=metadata['author'],
author_email='morpho@aalto.fi',
url='http://morpho.aalto.fi',
description='Morfessor',
packages=['morfessor', 'morfessor.test'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
license="BSD",
scripts=['scripts/morfessor',
'scripts/morfessor-train',
'scripts/morfessor-segment',
'scripts/morfessor-evaluate',
],
install_requires=requires,
extras_require={
'docs': [l.strip() for l in open('docs/build_requirements.txt')]
}
)
| 28.44186
| 74
| 0.591169
|
#!/usr/bin/env python
from codecs import open
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import re
main_py = open('morfessor/__init__.py', encoding='utf-8').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
requires = [
# 'progressbar',
]
setup(name='Morfessor',
version=metadata['version'],
author=metadata['author'],
author_email='morpho@aalto.fi',
url='http://morpho.aalto.fi',
description='Morfessor',
packages=['morfessor', 'morfessor.test'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
license="BSD",
scripts=['scripts/morfessor',
'scripts/morfessor-train',
'scripts/morfessor-segment',
'scripts/morfessor-evaluate',
],
install_requires=requires,
extras_require={
'docs': [l.strip() for l in open('docs/build_requirements.txt')]
}
)
| 0
| 0
| 0
|
5c07f3f58be4aba3c39cf1aebdf84f5e6d58c7f0
| 4,918
|
py
|
Python
|
tests/docs_test.py
|
loganasherjones/yapconf
|
0199d8b232fa50a590aac7dd5cc772efc1f7872c
|
[
"MIT"
] | 20
|
2018-01-30T14:32:40.000Z
|
2020-08-04T21:31:37.000Z
|
tests/docs_test.py
|
loganasherjones/yapconf
|
0199d8b232fa50a590aac7dd5cc772efc1f7872c
|
[
"MIT"
] | 95
|
2018-02-01T05:06:06.000Z
|
2022-03-30T00:41:33.000Z
|
tests/docs_test.py
|
loganasherjones/yapconf
|
0199d8b232fa50a590aac7dd5cc772efc1f7872c
|
[
"MIT"
] | 7
|
2018-02-06T21:15:24.000Z
|
2021-11-04T16:39:20.000Z
|
# -*- coding: utf-8 -*-
from yapconf.docs import build_markdown_table
# flake8: noqa
| 27.943182
| 104
| 0.530907
|
# -*- coding: utf-8 -*-
from yapconf.docs import build_markdown_table
# flake8: noqa
def test_build_markdown_table():
headers = {'foo': 'Foo', 'bar': 'Bar'}
rows = [
{
'bar': 'bar_value',
'foo': 'x',
},
{
'bar': 'bar_value2',
'foo': 'y',
},
]
table = build_markdown_table(headers, rows, ['foo', 'bar'])
assert table == ("| Foo | Bar |\n"
"| --- | ---------- |\n"
"| x | bar_value |\n"
"| y | bar_value2 |\n")
def test_generate_markdown_doc_simple(simple_spec):
doc = simple_spec.generate_documentation('My App Name')
assert doc == (
"""# My App Name Configuration
This document describes the configuration for My App Name. Each section will
document a particular configuration value and its description. First,
though, we start with the possible sources.
This documentation was auto-generated by [yapconf.](https://github.com/loganasherjones/yapconf)
## Sources
My App Name configuration can be loaded from the below sources:
## Configuration
This section outlines the various configuration items My App Name supports.
| Name | Type | Default | Description |
| ------------------------- | ------- | ------- | ----------- |
| [my_bool](#my_bool) | bool | None | None |
| [my_complex](#my_complex) | complex | None | None |
| [my_float](#my_float) | float | None | None |
| [my_int](#my_int) | int | None | None |
| [my_long](#my_long) | long | None | None |
| [my_string](#my_string) | str | None | None |
### my_bool
No description provided.
| Attribute | Value |
| ------------- | ----------- |
| **item_type** | `bool` |
| **default** | `None` |
| **env_name** | `MY_BOOL` |
| **required** | `True` |
| **cli_name** | `--my-bool` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_bool from the environment by setting the environment variable `MY_BOOL`
You can set `my_bool` from the command-line by specifying `--my-bool` at My App Name's entrypoint.
### my_complex
No description provided.
| Attribute | Value |
| ------------- | -------------- |
| **item_type** | `complex` |
| **default** | `None` |
| **env_name** | `MY_COMPLEX` |
| **required** | `True` |
| **cli_name** | `--my-complex` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_complex from the environment by setting the environment variable `MY_COMPLEX`
You can set `my_complex` from the command-line by specifying `--my-complex` at My App Name's entrypoint.
### my_float
No description provided.
| Attribute | Value |
| ------------- | ------------ |
| **item_type** | `float` |
| **default** | `None` |
| **env_name** | `MY_FLOAT` |
| **required** | `True` |
| **cli_name** | `--my-float` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_float from the environment by setting the environment variable `MY_FLOAT`
You can set `my_float` from the command-line by specifying `--my-float` at My App Name's entrypoint.
### my_int
No description provided.
| Attribute | Value |
| ------------- | ---------- |
| **item_type** | `int` |
| **default** | `None` |
| **env_name** | `MY_INT` |
| **required** | `True` |
| **cli_name** | `--my-int` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_int from the environment by setting the environment variable `MY_INT`
You can set `my_int` from the command-line by specifying `--my-int` at My App Name's entrypoint.
### my_long
No description provided.
| Attribute | Value |
| ------------- | ----------- |
| **item_type** | `long` |
| **default** | `None` |
| **env_name** | `MY_LONG` |
| **required** | `True` |
| **cli_name** | `--my-long` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_long from the environment by setting the environment variable `MY_LONG`
You can set `my_long` from the command-line by specifying `--my-long` at My App Name's entrypoint.
### my_string
No description provided.
| Attribute | Value |
| ------------- | ------------- |
| **item_type** | `str` |
| **default** | `None` |
| **env_name** | `MY_STRING` |
| **required** | `True` |
| **cli_name** | `--my-string` |
| **fallback** | `None` |
| **choices** | `None` |
You can set my_string from the environment by setting the environment variable `MY_STRING`
You can set `my_string` from the command-line by specifying `--my-string` at My App Name's entrypoint.
"""
)
| 4,784
| 0
| 45
|
168c20682896b2ca672a403ebe967cd8636d6ded
| 10,132
|
py
|
Python
|
src/core_modules/config.py
|
dngfx/MagicBot
|
56abfce2aac28f36e24ebe00229625196b269907
|
[
"WTFPL"
] | 1
|
2020-12-31T03:10:42.000Z
|
2020-12-31T03:10:42.000Z
|
src/core_modules/config.py
|
dngfx/MagicBot
|
56abfce2aac28f36e24ebe00229625196b269907
|
[
"WTFPL"
] | 3
|
2020-10-12T21:27:28.000Z
|
2021-08-12T09:46:55.000Z
|
src/core_modules/config.py
|
dngfx/MagicBot
|
56abfce2aac28f36e24ebe00229625196b269907
|
[
"WTFPL"
] | 1
|
2020-10-12T21:17:58.000Z
|
2020-10-12T21:17:58.000Z
|
# --depends-on channel_access
# --depends-on check_mode
# --depends-on commands
# --depends-on permissions
import enum
from src import ModuleManager, utils
| 37.947566
| 85
| 0.554777
|
# --depends-on channel_access
# --depends-on check_mode
# --depends-on commands
# --depends-on permissions
import enum
from src import ModuleManager, utils
class ConfigInvalidValue(Exception):
def __init__(self, message: str = None):
self.message = message
class ConfigSettingInexistent(Exception):
pass
class ConfigResults(enum.Enum):
Changed = 1
Retrieved = 2
Removed = 3
Unchanged = 4
class ConfigResult(object):
def __init__(self, result, data=None):
self.result = result
self.data = data
class ConfigChannelTarget(object):
def __init__(self, bot, server, channel_name):
self._bot = bot
self._server = server
self._channel_name = channel_name
def _get_id(self):
return self._server.channels.get_id(self._channel_name)
def set_setting(self, setting, value):
channel_id = self._get_id()
self._bot.database.channel_settings.set(channel_id, setting, value)
def get_setting(self, setting, default=None):
channel_id = self._get_id()
return self._bot.database.channel_settings.get(channel_id, setting, default)
def del_setting(self, setting):
channel_id = self._get_id()
self._bot.database.channel_settings.delete(channel_id, setting)
def get_user_setting(self, user_id, setting, default=None):
return self._bot.database.user_channel_settings.get(
user_id, self._get_id(), setting, default
)
class Module(ModuleManager.BaseModule):
def _to_context(self, server, channel, user, context_desc):
context_desc_lower = context_desc.lower()
if context_desc == "*":
if channel == user:
# we're in PM
return user, "set", None
else:
# we're in a channel
return channel, "channelset", None
elif server.is_channel(context_desc):
return context_desc, "channelset", context_desc
elif server.irc_lower(context_desc) == user.nickname_lower:
return user, "set", None
elif "user".startswith(context_desc_lower):
return user, "set", None
elif "channel".startswith(context_desc_lower):
return channel, "channelset", None
elif "server".startswith(context_desc_lower):
return server, "serverset", None
elif "bot".startswith(context_desc_lower):
return self.bot, "botset", None
else:
raise ValueError()
@utils.hook("preprocess.command")
def preprocess_command(self, event):
require_setting = event["hook"].get_kwarg("require_setting", None)
if not require_setting == None:
require_setting_unless = event["hook"].get_kwarg(
"require_setting_unless", None
)
if not require_setting_unless == None:
require_setting_unless = int(require_setting_unless)
if len(event["args_split"]) >= require_setting_unless:
return
context, _, require_setting = require_setting.rpartition(":")
require_setting = require_setting.lower()
channel = None
if event["is_channel"]:
channel = event["target"]
context = context or "user"
target, setting_context, _ = self._to_context(
event["server"], channel, event["user"], context
)
export_settings = self._get_export_setting(setting_context)
setting_info = export_settings.get(require_setting, None)
if setting_info:
value = target.get_setting(require_setting, None)
if value == None:
example = setting_info.example or "<value>"
if context == "user":
context = event["user"].nickname
elif context == "channel" and not channel == None:
context = channel.name
else:
context = context[0]
error = "Please set %s, e.g.: %sconfig %s %s %s" % (
require_setting,
event["command_prefix"],
context,
require_setting,
example,
)
return utils.consts.PERMISSION_ERROR, error
def _get_export_setting(self, context):
settings = self.exports.get_all(context)
return {setting.name.lower(): setting for setting in settings}
def _config(self, export_settings, target, setting, value=None):
if not value == None:
setting_object = export_settings[setting]
try:
validated_value = setting_object.parse(value)
except utils.settings.SettingParseException as e:
raise ConfigInvalidValue(str(e))
if not validated_value == None:
existing_value = target.get_setting(setting, None)
if existing_value == validated_value:
return ConfigResult(ConfigResults.Unchanged)
else:
target.set_setting(setting, validated_value)
formatted_value = setting_object.format(validated_value)
return ConfigResult(ConfigResults.Changed, formatted_value)
else:
raise ConfigInvalidValue()
else:
unset = False
if setting.startswith("-"):
setting = setting[1:]
unset = True
existing_value = target.get_setting(setting, None)
if not existing_value == None:
if unset:
target.del_setting(setting)
return ConfigResult(ConfigResults.Removed)
else:
formatted = export_settings[setting].format(existing_value)
return ConfigResult(ConfigResults.Retrieved, formatted)
else:
raise ConfigSettingInexistent()
@utils.hook("received.command.c", alias_of="config")
@utils.hook("received.command.config")
@utils.kwarg("min_args", 1)
@utils.kwarg("help", "Change config options")
@utils.kwarg("usage", "[context][:name] [-][setting [value]]")
def config(self, event):
arg_count = len(event["args_split"])
context_desc, _, name = event["args_split"][0].partition(":")
setting = None
value = None
if arg_count > 1:
setting = event["args_split"][1].lower()
if arg_count > 2:
value = " ".join(event["args_split"][2:])
try:
target, context, name_override = self._to_context(
event["server"], event["target"], event["user"], context_desc
)
except ValueError:
raise utils.EventError(
"Unknown context '%s'. Please provide "
"'user', 'channel', 'server' or 'bot'" % context_desc
)
name = name_override or name
permission_check = utils.Check("permission", "config")
if context == "set":
if name:
event["check_assert"](utils.Check("self", name) | permission_check)
target = event["server"].get_user(name)
else:
target = event["user"]
elif context == "channelset":
if name:
if name in event["server"].channels:
target = event["server"].channels.get(name)
else:
target = ConfigChannelTarget(self.bot, event["server"], name)
else:
if event["is_channel"]:
target = event["target"]
else:
raise utils.EventError(
"Cannot change config for current channel when in "
"private message"
)
event["check_assert"](
permission_check
| utils.Check("channel-access", target, "high,config")
| utils.Check("channel-mode", target, "o")
)
elif context == "serverset" or context == "botset":
event["check_assert"](permission_check)
export_settings = self._get_export_setting(context)
if not setting == None:
if not setting.lstrip("-") in export_settings:
raise utils.EventError("Setting not found")
try:
result = self._config(export_settings, target, setting, value)
except ConfigInvalidValue as e:
if not e.message == None:
raise utils.EventError("Invalid value: %s" % e.message)
example = export_settings[setting].get_example()
if not example == None:
raise utils.EventError("Invalid value. %s" % example)
else:
raise utils.EventError("Invalid value")
except ConfigSettingInexistent:
raise utils.EventError("Setting not set")
for_str = ""
if name_override:
for_str = " for %s" % name_override
if result.result == ConfigResults.Changed:
event["stdout"].write(
"Config '%s'%s set to %s" % (setting, for_str, result.data)
)
elif result.result == ConfigResults.Retrieved:
event["stdout"].write("%s%s: %s" % (setting, for_str, result.data))
elif result.result == ConfigResults.Removed:
event["stdout"].write(
"Unset setting '%s'%s" % (setting.lstrip("-"), for_str)
)
elif result.result == ConfigResults.Unchanged:
event["stdout"].write("Config '%s'%s unchanged" % (setting, for_str))
else:
event["stdout"].write(
"Available config: %s" % ", ".join(export_settings.keys())
)
| 9,037
| 580
| 351
|
41dbe518bd2b960e579c5e5fae4ea30bac2b30e3
| 6,597
|
py
|
Python
|
src/pytest_mock_resources/patch/redshift/mock_s3_copy.py
|
schireson/pytest-mock-resources
|
a09fc18eeeac06c5589854ce200fa45f64c81cb5
|
[
"MIT"
] | 49
|
2020-01-24T21:08:43.000Z
|
2022-03-31T23:55:21.000Z
|
src/pytest_mock_resources/patch/redshift/mock_s3_copy.py
|
schireson/pytest-mock-resources
|
a09fc18eeeac06c5589854ce200fa45f64c81cb5
|
[
"MIT"
] | 29
|
2020-03-11T19:07:50.000Z
|
2022-03-30T16:49:06.000Z
|
src/pytest_mock_resources/patch/redshift/mock_s3_copy.py
|
schireson/pytest-mock-resources
|
a09fc18eeeac06c5589854ce200fa45f64c81cb5
|
[
"MIT"
] | 10
|
2020-01-23T19:04:09.000Z
|
2022-02-22T19:57:54.000Z
|
import binascii
import csv
import gzip
import io
import sys
from sqlalchemy import MetaData, Table
from pytest_mock_resources.compat import boto3
def _parse_s3_command(statement):
"""Format, Parse and call patched 'COPY' command."""
statement = strip(statement)
params = dict()
# deleting copy
tokens = statement.split()[1:]
# Fetching table name
params["schema_name"], params["table_name"] = _split_table_name(tokens.pop(0))
# Checking for columns
if tokens[0][0] == "(":
ending_index = 0
for index, arg in enumerate(tokens):
if arg.endswith(")"):
ending_index = index
break
ending_index += 1
columns = tokens[0:ending_index]
columns[0] = columns[0].replace("(", "")
columns[-1] = columns[-1].replace(")", "")
columns = [x.replace(",", "") for x in columns]
columns = [x for x in columns if x != ""]
tokens = tokens[ending_index:]
params["columns"] = columns
# Fetching s3_uri
if tokens.pop(0).lower() != "from":
raise ValueError(
(
"Possibly malformed S3 URI Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following syntax: "
"COPY <table_name> FROM [(column 1, [column2, [..]])] '<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>'"
).format(statement=statement)
)
params["s3_uri"] = strip(tokens.pop(0))
# Fetching credentials
for token in tokens:
if "aws_access_key_id" in token.lower() or "aws_secret_access_key" in token.lower():
# This is because of the following possibiliteis:
# ... [with ]credentials[ AS] 'aws_access_key_id=x;aws_secret_access_key=y'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;aws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;\naws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# Supportred AWS credentials format:
# [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# No Support for additional credential formats, eg IAM roles, etc, yet.
credentials_list = token.split(";")
for credentials in credentials_list:
if "aws_access_key_id" in credentials:
params["aws_access_key_id"] = credentials.split("=")[-1]
elif "aws_secret_access_key" in credentials:
params["aws_secret_access_key"] = credentials.split("=")[-1]
else:
raise ValueError(
(
"Possibly malformed AWS Credentials Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following "
"syntax: COPY <table_name> FROM [(column 1, [column2, [..]])] '"
"<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>' "
"Supportred AWS credentials format: "
"[with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'"
" No Support for additional credential formats, eg IAM roles, etc, yet."
).format(statement=statement)
)
return params
def _split_table_name(table_name):
"""Split 'schema_name.table_name' to (schema_name, table_name)."""
table_name_items = table_name.split(".")
if len(table_name_items) == 1:
schema_name = None
elif len(table_name_items) == 2:
schema_name, table_name = table_name_items
else:
raise ValueError("Cannot determine schema/table name from input {}".format(table_name))
return schema_name, table_name
def _mock_s3_copy(
table_name, s3_uri, schema_name, aws_secret_access_key, aws_access_key_id, columns, engine
):
"""Execute patched 'copy' command."""
s3 = boto3.client(
"s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key
)
ending_index = len(s3_uri)
path_to_file = s3_uri[5:ending_index]
bucket, key = path_to_file.split("/", 1)
response = s3.get_object(Bucket=bucket, Key=key)
# the following lins of code is used to check if the file is gzipped or not.
# To do so we use magic numbers.
# A mgic number is a constant numerical or text value used to identify a file format or protocol
# The magic number for gzip compressed files is 1f 8b.
is_gzipped = binascii.hexlify(response["Body"].read(2)) == b"1f8b"
response = s3.get_object(Bucket=bucket, Key=key)
data = read_data_csv(response["Body"].read(), is_gzipped, columns)
meta = MetaData()
table = Table(table_name, meta, autoload=True, schema=schema_name, autoload_with=engine)
engine.execute(table.insert(data))
def strip(input_string):
"""Strip trailing whitespace, single/double quotes."""
return input_string.strip().rstrip(";").strip('"').strip("'")
| 38.578947
| 100
| 0.609974
|
import binascii
import csv
import gzip
import io
import sys
from sqlalchemy import MetaData, Table
from pytest_mock_resources.compat import boto3
def execute_mock_s3_copy_command(statement, engine):
params = _parse_s3_command(statement)
_mock_s3_copy(
table_name=params["table_name"],
schema_name=params["schema_name"],
s3_uri=params["s3_uri"],
aws_secret_access_key=params["aws_secret_access_key"],
aws_access_key_id=params["aws_access_key_id"],
columns=params.get("columns", None),
engine=engine,
)
def _parse_s3_command(statement):
"""Format, Parse and call patched 'COPY' command."""
statement = strip(statement)
params = dict()
# deleting copy
tokens = statement.split()[1:]
# Fetching table name
params["schema_name"], params["table_name"] = _split_table_name(tokens.pop(0))
# Checking for columns
if tokens[0][0] == "(":
ending_index = 0
for index, arg in enumerate(tokens):
if arg.endswith(")"):
ending_index = index
break
ending_index += 1
columns = tokens[0:ending_index]
columns[0] = columns[0].replace("(", "")
columns[-1] = columns[-1].replace(")", "")
columns = [x.replace(",", "") for x in columns]
columns = [x for x in columns if x != ""]
tokens = tokens[ending_index:]
params["columns"] = columns
# Fetching s3_uri
if tokens.pop(0).lower() != "from":
raise ValueError(
(
"Possibly malformed S3 URI Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following syntax: "
"COPY <table_name> FROM [(column 1, [column2, [..]])] '<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>'"
).format(statement=statement)
)
params["s3_uri"] = strip(tokens.pop(0))
# Fetching credentials
for token in tokens:
if "aws_access_key_id" in token.lower() or "aws_secret_access_key" in token.lower():
# This is because of the following possibiliteis:
# ... [with ]credentials[ AS] 'aws_access_key_id=x;aws_secret_access_key=y'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;aws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;\naws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# Supportred AWS credentials format:
# [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# No Support for additional credential formats, eg IAM roles, etc, yet.
credentials_list = token.split(";")
for credentials in credentials_list:
if "aws_access_key_id" in credentials:
params["aws_access_key_id"] = credentials.split("=")[-1]
elif "aws_secret_access_key" in credentials:
params["aws_secret_access_key"] = credentials.split("=")[-1]
else:
raise ValueError(
(
"Possibly malformed AWS Credentials Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following "
"syntax: COPY <table_name> FROM [(column 1, [column2, [..]])] '"
"<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>' "
"Supportred AWS credentials format: "
"[with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'"
" No Support for additional credential formats, eg IAM roles, etc, yet."
).format(statement=statement)
)
return params
def _split_table_name(table_name):
"""Split 'schema_name.table_name' to (schema_name, table_name)."""
table_name_items = table_name.split(".")
if len(table_name_items) == 1:
schema_name = None
elif len(table_name_items) == 2:
schema_name, table_name = table_name_items
else:
raise ValueError("Cannot determine schema/table name from input {}".format(table_name))
return schema_name, table_name
def _mock_s3_copy(
table_name, s3_uri, schema_name, aws_secret_access_key, aws_access_key_id, columns, engine
):
"""Execute patched 'copy' command."""
s3 = boto3.client(
"s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key
)
ending_index = len(s3_uri)
path_to_file = s3_uri[5:ending_index]
bucket, key = path_to_file.split("/", 1)
response = s3.get_object(Bucket=bucket, Key=key)
# the following lins of code is used to check if the file is gzipped or not.
# To do so we use magic numbers.
# A mgic number is a constant numerical or text value used to identify a file format or protocol
# The magic number for gzip compressed files is 1f 8b.
is_gzipped = binascii.hexlify(response["Body"].read(2)) == b"1f8b"
response = s3.get_object(Bucket=bucket, Key=key)
data = read_data_csv(response["Body"].read(), is_gzipped, columns)
meta = MetaData()
table = Table(table_name, meta, autoload=True, schema=schema_name, autoload_with=engine)
engine.execute(table.insert(data))
def read_data_csv(file, is_gzipped=False, columns=None, delimiter="|"):
buffer = io.BytesIO(file)
if is_gzipped:
buffer = gzip.GzipFile(fileobj=buffer, mode="rb")
# FUCK you python 2. This is ridiculous!
wrapper = buffer
if sys.version_info.major >= 3:
wrapper = io.TextIOWrapper(buffer)
else:
delimiter = delimiter.encode("utf-8")
reader = csv.DictReader(
wrapper,
delimiter=delimiter,
quoting=csv.QUOTE_MINIMAL,
quotechar='"',
lineterminator="\n",
skipinitialspace=True,
doublequote=True,
)
return [dict(row) for row in reader]
def strip(input_string):
"""Strip trailing whitespace, single/double quotes."""
return input_string.strip().rstrip(";").strip('"').strip("'")
| 1,028
| 0
| 46
|
bd3b17572e5745406f81307baf82b51be3e01f17
| 682
|
py
|
Python
|
src/iotools/__init__.py
|
chinarjoshi/polar-rover
|
b46ffc89352a0fb1520d86e00f5c24983fdbf79c
|
[
"MIT"
] | null | null | null |
src/iotools/__init__.py
|
chinarjoshi/polar-rover
|
b46ffc89352a0fb1520d86e00f5c24983fdbf79c
|
[
"MIT"
] | null | null | null |
src/iotools/__init__.py
|
chinarjoshi/polar-rover
|
b46ffc89352a0fb1520d86e00f5c24983fdbf79c
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from iotools.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'sessions.login'
login_manager.login_message_category = 'info'
| 23.517241
| 48
| 0.777126
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from iotools.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'sessions.login'
login_manager.login_message_category = 'info'
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
from iotools.sessions.routes import sessions
from iotools.main.routes import main
app.register_blueprint(sessions)
app.register_blueprint(main)
return app
| 332
| 0
| 23
|