body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def registration_list_status_filter_sql(): 'SQL to filter for whitelisted or null registration_list statuses.' return sql.SQL("(status IS NULL OR status = 'whitelist')")
-2,651,173,938,659,543,600
SQL to filter for whitelisted or null registration_list statuses.
src/dirbs/utils.py
registration_list_status_filter_sql
nealmadhu/DIRBS-Core
python
def registration_list_status_filter_sql(): return sql.SQL("(status IS NULL OR status = 'whitelist')")
def compute_amnesty_flags(app_config, curr_date): 'Helper function to determine whether the date falls within amnesty eval or amnesty period.' in_amnesty_eval_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date <= app_config.amnesty_config.evaluation_period_end_date)) else False) in_...
4,268,033,186,415,836,700
Helper function to determine whether the date falls within amnesty eval or amnesty period.
src/dirbs/utils.py
compute_amnesty_flags
nealmadhu/DIRBS-Core
python
def compute_amnesty_flags(app_config, curr_date): in_amnesty_eval_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date <= app_config.amnesty_config.evaluation_period_end_date)) else False) in_amnesty_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date > app_co...
def table_exists_sql(any_schema=False): 'SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True.' if (not any_schema): schema_filter_sql = sql.SQL('AND schemaname = current_schema()') else: schema_filter_sql = sql.SQL('') return sql.SQL('SELECT...
-2,982,755,632,233,627,000
SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True.
src/dirbs/utils.py
table_exists_sql
nealmadhu/DIRBS-Core
python
def table_exists_sql(any_schema=False): if (not any_schema): schema_filter_sql = sql.SQL('AND schemaname = current_schema()') else: schema_filter_sql = sql.SQL() return sql.SQL('SELECT EXISTS (SELECT 1\n FROM pg_tables\n ...
def is_table_partitioned(conn, tbl_name): 'Function to determine whether a table is partitioned.' with conn.cursor() as cursor: cursor.execute('SELECT EXISTS (SELECT 1\n FROM pg_class\n JOIN pg_partitioned_table\n ...
5,915,633,380,111,043,000
Function to determine whether a table is partitioned.
src/dirbs/utils.py
is_table_partitioned
nealmadhu/DIRBS-Core
python
def is_table_partitioned(conn, tbl_name): with conn.cursor() as cursor: cursor.execute('SELECT EXISTS (SELECT 1\n FROM pg_class\n JOIN pg_partitioned_table\n ON pg_partitio...
def __init__(self, msg): 'Constructor.' super().__init__('DB schema check failure: {0}'.format(msg))
-4,235,430,209,384,187,000
Constructor.
src/dirbs/utils.py
__init__
nealmadhu/DIRBS-Core
python
def __init__(self, msg): super().__init__('DB schema check failure: {0}'.format(msg))
def __init__(self, msg): 'Constructor.' super().__init__('DB role check failure: {0}'.format(msg))
4,190,923,084,369,278,000
Constructor.
src/dirbs/utils.py
__init__
nealmadhu/DIRBS-Core
python
def __init__(self, msg): super().__init__('DB role check failure: {0}'.format(msg))
def default(self, obj): 'Overrides JSONEncoder.default.' if isinstance(obj, datetime.date): return obj.isoformat() return JSONEncoder.default(self, obj)
6,396,015,363,180,159,000
Overrides JSONEncoder.default.
src/dirbs/utils.py
default
nealmadhu/DIRBS-Core
python
def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() return JSONEncoder.default(self, obj)
def __init__(self, *args, **kwargs): 'Constructor.' super().__init__(*args, **kwargs) if (self.name is not None): self.itersize = 100000
1,331,113,109,789,704,000
Constructor.
src/dirbs/utils.py
__init__
nealmadhu/DIRBS-Core
python
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if (self.name is not None): self.itersize = 100000
def execute(self, query, params=None): 'Overrides NamedTupleCursor.execute.' try: return super(LoggingNamedTupleCursor, self).execute(query, params) finally: if (self.query is not None): logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
8,880,579,946,259,191,000
Overrides NamedTupleCursor.execute.
src/dirbs/utils.py
execute
nealmadhu/DIRBS-Core
python
def execute(self, query, params=None): try: return super(LoggingNamedTupleCursor, self).execute(query, params) finally: if (self.query is not None): logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
def callproc(self, procname, params=None): 'Overrides NamedTupleCursor.callproc.' try: return super(LoggingNamedTupleCursor, self).callproc(procname, params) finally: if (self.query is not None): logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
3,671,090,875,776,687,000
Overrides NamedTupleCursor.callproc.
src/dirbs/utils.py
callproc
nealmadhu/DIRBS-Core
python
def callproc(self, procname, params=None): try: return super(LoggingNamedTupleCursor, self).callproc(procname, params) finally: if (self.query is not None): logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
def __enter__(self): 'Python context manager support for use in with statement (on enter).' self.start = time.time() return self
-5,373,665,672,555,719,000
Python context manager support for use in with statement (on enter).
src/dirbs/utils.py
__enter__
nealmadhu/DIRBS-Core
python
def __enter__(self): self.start = time.time() return self
def __exit__(self, *args): 'Python context manager support for use in with statement (on exit).' self.duration = int(((time.time() - self.start) * 1000))
2,386,409,817,478,090,000
Python context manager support for use in with statement (on exit).
src/dirbs/utils.py
__exit__
nealmadhu/DIRBS-Core
python
def __exit__(self, *args): self.duration = int(((time.time() - self.start) * 1000))
def test_get_backupdir_path(tmp_path): 'Returns backups Path named for default working directory.' os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') backdir = '_backups' datestr = '2020-01-03_1646' workingdir = Path('agenda') workingdir.mkdir() os.chdir(workingdir) ...
5,251,406,337,909,453,000
Returns backups Path named for default working directory.
tests/returns/test_get_backupdir_path.py
test_get_backupdir_path
tombaker/mklists_old
python
def test_get_backupdir_path(tmp_path): os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') backdir = '_backups' datestr = '2020-01-03_1646' workingdir = Path('agenda') workingdir.mkdir() os.chdir(workingdir) actual = get_backupdir_path(backdir=backdir, now=datestr) ...
def test_get_backupdir_path_given_datadir(tmp_path): 'Returns backups Path named for specified working directory.' os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') workingdir = Path(tmp_path).joinpath('todolists/a') workingdir.mkdir(parents=True, exist_ok=True) workingdir_shor...
-8,056,138,804,211,980,000
Returns backups Path named for specified working directory.
tests/returns/test_get_backupdir_path.py
test_get_backupdir_path_given_datadir
tombaker/mklists_old
python
def test_get_backupdir_path_given_datadir(tmp_path): os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') workingdir = Path(tmp_path).joinpath('todolists/a') workingdir.mkdir(parents=True, exist_ok=True) workingdir_shortname_expected = 'todolists_a' backdir = '_backups' d...
def test_get_backupdir_path_given_datadir_with_slash(tmp_path): 'Returns backups Path named for specified working directory ending with slash.' os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') workingdir = Path(tmp_path).joinpath('todolists/a/') workingdir.mkdir(parents=True, exis...
-3,118,882,342,985,879,600
Returns backups Path named for specified working directory ending with slash.
tests/returns/test_get_backupdir_path.py
test_get_backupdir_path_given_datadir_with_slash
tombaker/mklists_old
python
def test_get_backupdir_path_given_datadir_with_slash(tmp_path): os.chdir(tmp_path) Path(CONFIGFILE_NAME).write_text('config stuff') workingdir = Path(tmp_path).joinpath('todolists/a/') workingdir.mkdir(parents=True, exist_ok=True) workingdir_shortname_expected = 'todolists_a' backdir = '_ba...
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path): 'Raises exception if no rootdir is found (rootdir is None).' os.chdir(tmp_path) with pytest.raises(SystemExit): get_backupdir_path()
1,102,953,894,251,443,700
Raises exception if no rootdir is found (rootdir is None).
tests/returns/test_get_backupdir_path.py
test_get_backupdir_path_raise_exception_if_rootdir_not_found
tombaker/mklists_old
python
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path): os.chdir(tmp_path) with pytest.raises(SystemExit): get_backupdir_path()
def __init__(self, host='127.0.0.1', port=9200): 'Create a Elasticsearch client.' super().__init__() self._error_container = {} self.user = current_app.config.get('ELASTIC_USER', 'user') self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass') self.ssl = current_app.config.get('ELASTIC...
3,857,736,299,582,721,500
Create a Elasticsearch client.
timesketch/lib/datastores/elastic.py
__init__
stevengoossensB/timesketch
python
def __init__(self, host='127.0.0.1', port=9200): super().__init__() self._error_container = {} self.user = current_app.config.get('ELASTIC_USER', 'user') self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass') self.ssl = current_app.config.get('ELASTIC_SSL', False) self.verify = ...
@staticmethod def _build_labels_query(sketch_id, labels): 'Build Elasticsearch query for Timesketch labels.\n\n Args:\n sketch_id: Integer of sketch primary key.\n labels: List of label names.\n\n Returns:\n Elasticsearch query as a dictionary.\n ' label_que...
-5,654,028,270,528,403,000
Build Elasticsearch query for Timesketch labels. Args: sketch_id: Integer of sketch primary key. labels: List of label names. Returns: Elasticsearch query as a dictionary.
timesketch/lib/datastores/elastic.py
_build_labels_query
stevengoossensB/timesketch
python
@staticmethod def _build_labels_query(sketch_id, labels): 'Build Elasticsearch query for Timesketch labels.\n\n Args:\n sketch_id: Integer of sketch primary key.\n labels: List of label names.\n\n Returns:\n Elasticsearch query as a dictionary.\n ' label_que...
@staticmethod def _build_events_query(events): 'Build Elasticsearch query for one or more document ids.\n\n Args:\n events: List of Elasticsearch document IDs.\n\n Returns:\n Elasticsearch query as a dictionary.\n ' events_list = [event['event_id'] for event in events]...
8,328,508,765,477,211,000
Build Elasticsearch query for one or more document ids. Args: events: List of Elasticsearch document IDs. Returns: Elasticsearch query as a dictionary.
timesketch/lib/datastores/elastic.py
_build_events_query
stevengoossensB/timesketch
python
@staticmethod def _build_events_query(events): 'Build Elasticsearch query for one or more document ids.\n\n Args:\n events: List of Elasticsearch document IDs.\n\n Returns:\n Elasticsearch query as a dictionary.\n ' events_list = [event['event_id'] for event in events]...
@staticmethod def _build_query_dsl(query_dsl, timeline_ids): 'Build Elastic Search DSL query by adding in timeline filtering.\n\n Args:\n query_dsl: A dict with the current query_dsl\n timeline_ids: Either a list of timeline IDs (int) or None.\n\n Returns:\n Elasticsea...
-3,096,211,081,514,344,400
Build Elastic Search DSL query by adding in timeline filtering. Args: query_dsl: A dict with the current query_dsl timeline_ids: Either a list of timeline IDs (int) or None. Returns: Elasticsearch query DSL as a dictionary.
timesketch/lib/datastores/elastic.py
_build_query_dsl
stevengoossensB/timesketch
python
@staticmethod def _build_query_dsl(query_dsl, timeline_ids): 'Build Elastic Search DSL query by adding in timeline filtering.\n\n Args:\n query_dsl: A dict with the current query_dsl\n timeline_ids: Either a list of timeline IDs (int) or None.\n\n Returns:\n Elasticsea...
@staticmethod def _convert_to_time_range(interval): 'Convert an interval timestamp into start and end dates.\n\n Args:\n interval: Time frame representation\n\n Returns:\n Start timestamp in string format.\n End timestamp in string format.\n ' TS_FORMAT = '%...
4,055,374,866,093,789,000
Convert an interval timestamp into start and end dates. Args: interval: Time frame representation Returns: Start timestamp in string format. End timestamp in string format.
timesketch/lib/datastores/elastic.py
_convert_to_time_range
stevengoossensB/timesketch
python
@staticmethod def _convert_to_time_range(interval): 'Convert an interval timestamp into start and end dates.\n\n Args:\n interval: Time frame representation\n\n Returns:\n Start timestamp in string format.\n End timestamp in string format.\n ' TS_FORMAT = '%...
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None, aggregations=None, timeline_ids=None): 'Build Elasticsearch DSL query.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters ...
8,189,367,095,946,872,000
Build Elasticsearch DSL query. Args: sketch_id: Integer of sketch primary key query_string: Query string query_filter: Dictionary containing filters to apply query_dsl: Dictionary containing Elasticsearch DSL query aggregations: Dict of Elasticsearch aggregations timeline_ids: Optional list of ...
timesketch/lib/datastores/elastic.py
build_query
stevengoossensB/timesketch
python
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None, aggregations=None, timeline_ids=None): 'Build Elasticsearch DSL query.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters ...
def search(self, sketch_id, query_string, query_filter, query_dsl, indices, count=False, aggregations=None, return_fields=None, enable_scroll=False, timeline_ids=None): 'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n ...
-7,302,113,754,087,591,000
Search ElasticSearch. This will take a query string from the UI together with a filter definition. Based on this it will execute the search request on ElasticSearch and get result back. Args: sketch_id: Integer of sketch primary key query_string: Query string query_filter: Dictionary containing filters to ...
timesketch/lib/datastores/elastic.py
search
stevengoossensB/timesketch
python
def search(self, sketch_id, query_string, query_filter, query_dsl, indices, count=False, aggregations=None, return_fields=None, enable_scroll=False, timeline_ids=None): 'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n ...
def search_stream(self, sketch_id=None, query_string=None, query_filter=None, query_dsl=None, indices=None, return_fields=None, enable_scroll=True, timeline_ids=None): 'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n ...
-2,000,918,080,028,975,000
Search ElasticSearch. This will take a query string from the UI together with a filter definition. Based on this it will execute the search request on ElasticSearch and get result back. Args : sketch_id: Integer of sketch primary key query_string: Query string query_filter: Dictionary containing filters to...
timesketch/lib/datastores/elastic.py
search_stream
stevengoossensB/timesketch
python
def search_stream(self, sketch_id=None, query_string=None, query_filter=None, query_dsl=None, indices=None, return_fields=None, enable_scroll=True, timeline_ids=None): 'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n ...
def get_filter_labels(self, sketch_id, indices): 'Aggregate labels for a sketch.\n\n Args:\n sketch_id: The Sketch ID\n indices: List of indices to aggregate on\n\n Returns:\n List with label names.\n ' max_labels = 10000 aggregation = {'aggs': {'nested'...
714,276,077,707,961,900
Aggregate labels for a sketch. Args: sketch_id: The Sketch ID indices: List of indices to aggregate on Returns: List with label names.
timesketch/lib/datastores/elastic.py
get_filter_labels
stevengoossensB/timesketch
python
def get_filter_labels(self, sketch_id, indices): 'Aggregate labels for a sketch.\n\n Args:\n sketch_id: The Sketch ID\n indices: List of indices to aggregate on\n\n Returns:\n List with label names.\n ' max_labels = 10000 aggregation = {'aggs': {'nested'...
def get_event(self, searchindex_id, event_id): 'Get one event from the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n\n Returns:\n Event document in JSON format\n ' METRICS['search_get_e...
4,496,177,488,117,825,500
Get one event from the datastore. Args: searchindex_id: String of ElasticSearch index id event_id: String of ElasticSearch event id Returns: Event document in JSON format
timesketch/lib/datastores/elastic.py
get_event
stevengoossensB/timesketch
python
def get_event(self, searchindex_id, event_id): 'Get one event from the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n\n Returns:\n Event document in JSON format\n ' METRICS['search_get_e...
def count(self, indices): 'Count number of documents.\n\n Args:\n indices: List of indices.\n\n Returns:\n Tuple containing number of documents and size on disk.\n ' if (not indices): return (0, 0) try: es_stats = self.client.indices.stats(index=ind...
6,281,411,345,004,881,000
Count number of documents. Args: indices: List of indices. Returns: Tuple containing number of documents and size on disk.
timesketch/lib/datastores/elastic.py
count
stevengoossensB/timesketch
python
def count(self, indices): 'Count number of documents.\n\n Args:\n indices: List of indices.\n\n Returns:\n Tuple containing number of documents and size on disk.\n ' if (not indices): return (0, 0) try: es_stats = self.client.indices.stats(index=ind...
def set_label(self, searchindex_id, event_id, event_type, sketch_id, user_id, label, toggle=False, remove=False, single_update=True): 'Set label on event in the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n ...
-3,900,731,638,094,000,600
Set label on event in the datastore. Args: searchindex_id: String of ElasticSearch index id event_id: String of ElasticSearch event id event_type: String of ElasticSearch document type sketch_id: Integer of sketch primary key user_id: Integer of user primary key label: String with the name of t...
timesketch/lib/datastores/elastic.py
set_label
stevengoossensB/timesketch
python
def set_label(self, searchindex_id, event_id, event_type, sketch_id, user_id, label, toggle=False, remove=False, single_update=True): 'Set label on event in the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n ...
def create_index(self, index_name=uuid4().hex, doc_type='generic_event', mappings=None): 'Create index with Timesketch settings.\n\n Args:\n index_name: Name of the index. Default is a generated UUID.\n doc_type: Name of the document type. Default id generic_event.\n mappings...
-8,882,026,856,317,529,000
Create index with Timesketch settings. Args: index_name: Name of the index. Default is a generated UUID. doc_type: Name of the document type. Default id generic_event. mappings: Optional dict with the document mapping for Elastic. Returns: Index name in string format. Document type in string forma...
timesketch/lib/datastores/elastic.py
create_index
stevengoossensB/timesketch
python
def create_index(self, index_name=uuid4().hex, doc_type='generic_event', mappings=None): 'Create index with Timesketch settings.\n\n Args:\n index_name: Name of the index. Default is a generated UUID.\n doc_type: Name of the document type. Default id generic_event.\n mappings...
def delete_index(self, index_name): 'Delete Elasticsearch index.\n\n Args:\n index_name: Name of the index to delete.\n ' if self.client.indices.exists(index_name): try: self.client.indices.delete(index=index_name) except ConnectionError as e: rai...
8,613,442,976,308,407,000
Delete Elasticsearch index. Args: index_name: Name of the index to delete.
timesketch/lib/datastores/elastic.py
delete_index
stevengoossensB/timesketch
python
def delete_index(self, index_name): 'Delete Elasticsearch index.\n\n Args:\n index_name: Name of the index to delete.\n ' if self.client.indices.exists(index_name): try: self.client.indices.delete(index=index_name) except ConnectionError as e: rai...
def import_event(self, index_name, event_type, event=None, event_id=None, flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None): 'Add event to Elasticsearch.\n\n Args:\n index_name: Name of the index in Elasticsearch\n event_type: Type of event (e.g. plaso_event)\n event: ...
8,753,995,590,469,953,000
Add event to Elasticsearch. Args: index_name: Name of the index in Elasticsearch event_type: Type of event (e.g. plaso_event) event: Event dictionary event_id: Event Elasticsearch ID flush_interval: Number of events to queue up before indexing timeline_id: Optional ID number of a Timeline objec...
timesketch/lib/datastores/elastic.py
import_event
stevengoossensB/timesketch
python
def import_event(self, index_name, event_type, event=None, event_id=None, flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None): 'Add event to Elasticsearch.\n\n Args:\n index_name: Name of the index in Elasticsearch\n event_type: Type of event (e.g. plaso_event)\n event: ...
def flush_queued_events(self, retry_count=0): 'Flush all queued events.\n\n Returns:\n dict: A dict object that contains the number of events\n that were sent to Elastic as well as information\n on whether there were any errors, and what the\n details o...
-8,373,796,784,467,723,000
Flush all queued events. Returns: dict: A dict object that contains the number of events that were sent to Elastic as well as information on whether there were any errors, and what the details of these errors if any. retry_count: optional int indicating whether this is a retry.
timesketch/lib/datastores/elastic.py
flush_queued_events
stevengoossensB/timesketch
python
def flush_queued_events(self, retry_count=0): 'Flush all queued events.\n\n Returns:\n dict: A dict object that contains the number of events\n that were sent to Elastic as well as information\n on whether there were any errors, and what the\n details o...
@property def version(self): 'Get Elasticsearch version.\n\n Returns:\n Version number as a string.\n ' version_info = self.client.info().get('version') return version_info.get('number')
2,982,666,308,491,461,600
Get Elasticsearch version. Returns: Version number as a string.
timesketch/lib/datastores/elastic.py
version
stevengoossensB/timesketch
python
@property def version(self): 'Get Elasticsearch version.\n\n Returns:\n Version number as a string.\n ' version_info = self.client.info().get('version') return version_info.get('number')
def render(game, current): ' Displays the current room ' print(('You are in the ' + game['rooms'][current]['name'])) print(game['rooms'][current]['desc'])
3,437,695,610,613,276,000
Displays the current room
main.py
render
BraffordHunter/03-Text-Adventure-2
python
def render(game, current): ' ' print(('You are in the ' + game['rooms'][current]['name'])) print(game['rooms'][current]['desc'])
def getInput(): ' Asks the user for input and returns a stripped, uppercase version of what they typed ' response = input('What would you like to do? ').strip().upper() return response
-8,819,435,133,751,094,000
Asks the user for input and returns a stripped, uppercase version of what they typed
main.py
getInput
BraffordHunter/03-Text-Adventure-2
python
def getInput(): ' ' response = input('What would you like to do? ').strip().upper() return response
def update(response, game, current): ' Process the input and update the state of the world ' for e in game['rooms'][current]['exits']: if (response == e['verb']): current = e['target'] return current
4,104,156,395,958,741,000
Process the input and update the state of the world
main.py
update
BraffordHunter/03-Text-Adventure-2
python
def update(response, game, current): ' ' for e in game['rooms'][current]['exits']: if (response == e['verb']): current = e['target'] return current
def _post_clients(self, client, user_ids, token_generator): '\n Helper function that creates (and tests creating) a collection of Clients.\n ' headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'} client_ids = [] for (i, api_client) ...
-5,197,643,749,388,798,000
Helper function that creates (and tests creating) a collection of Clients.
tests/api/test_all_apis.py
_post_clients
brighthive/authserver
python
def _post_clients(self, client, user_ids, token_generator): '\n \n ' headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'} client_ids = [] for (i, api_client) in enumerate(CLIENTS): api_client['user_id'] = user_ids[i] ...
def multicrop_collate_fn(samples): 'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))] for...
4,826,671,954,298,192,000
Multi-crop collate function for VISSL integration. Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
flash/image/embedding/vissl/transforms/utilities.py
multicrop_collate_fn
Darktex/lightning-flash
python
def multicrop_collate_fn(samples): 'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))] for...
def simclr_collate_fn(samples): 'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [] num_views = len(samples[0][DefaultDataKeys.INPUT]) view_idx...
1,590,668,760,028,334,600
Multi-crop collate function for VISSL integration. Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
flash/image/embedding/vissl/transforms/utilities.py
simclr_collate_fn
Darktex/lightning-flash
python
def simclr_collate_fn(samples): 'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [] num_views = len(samples[0][DefaultDataKeys.INPUT]) view_idx...
def moco_collate_fn(samples): 'MOCO collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [] for batch_ele in samples: inputs.append(torch.stack(batch_ele[D...
-102,752,008,453,979,340
MOCO collate function for VISSL integration. Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
flash/image/embedding/vissl/transforms/utilities.py
moco_collate_fn
Darktex/lightning-flash
python
def moco_collate_fn(samples): 'MOCO collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n ' result = vissl_collate_helper(samples) inputs = [] for batch_ele in samples: inputs.append(torch.stack(batch_ele[D...
@abstractmethod def __call__(self, location): 'Evaluate the time-continuous posterior for a given location\n\n Parameters\n ----------\n location : float\n Location, or time, at which to evaluate the posterior.\n\n Returns\n -------\n rv : `RandomVariable`\n ...
2,588,504,303,512,299,000
Evaluate the time-continuous posterior for a given location Parameters ---------- location : float Location, or time, at which to evaluate the posterior. Returns ------- rv : `RandomVariable`
src/probnum/filtsmooth/filtsmoothposterior.py
__call__
admdev8/probnum
python
@abstractmethod def __call__(self, location): 'Evaluate the time-continuous posterior for a given location\n\n Parameters\n ----------\n location : float\n Location, or time, at which to evaluate the posterior.\n\n Returns\n -------\n rv : `RandomVariable`\n ...
@abstractmethod def __len__(self): 'Length of the discrete-time solution\n\n Corresponds to the number of filtering/smoothing steps\n ' raise NotImplementedError
7,496,453,161,260,714,000
Length of the discrete-time solution Corresponds to the number of filtering/smoothing steps
src/probnum/filtsmooth/filtsmoothposterior.py
__len__
admdev8/probnum
python
@abstractmethod def __len__(self): 'Length of the discrete-time solution\n\n Corresponds to the number of filtering/smoothing steps\n ' raise NotImplementedError
@abstractmethod def __getitem__(self, idx): 'Return the corresponding index/slice of the discrete-time solution' raise NotImplementedError
-1,963,588,614,465,622,800
Return the corresponding index/slice of the discrete-time solution
src/probnum/filtsmooth/filtsmoothposterior.py
__getitem__
admdev8/probnum
python
@abstractmethod def __getitem__(self, idx): raise NotImplementedError
def sample(self, locations=None, size=()): '\n Draw samples from the filtering/smoothing posterior.\n\n If nothing is specified, a single sample is drawn (supported on self.locations).\n If locations are specified, the samples are drawn on those locations.\n If size is specified, more th...
4,466,780,101,186,818,600
Draw samples from the filtering/smoothing posterior. If nothing is specified, a single sample is drawn (supported on self.locations). If locations are specified, the samples are drawn on those locations. If size is specified, more than a single sample is drawn. Parameters ---------- locations : array_like, optional ...
src/probnum/filtsmooth/filtsmoothposterior.py
sample
admdev8/probnum
python
def sample(self, locations=None, size=()): '\n Draw samples from the filtering/smoothing posterior.\n\n If nothing is specified, a single sample is drawn (supported on self.locations).\n If locations are specified, the samples are drawn on those locations.\n If size is specified, more th...
def create_app(config_object='code_runner.settings'): 'Creates and returns flask app instance as well as register all the extensions and blueprints' app = Flask(__name__) register_environment() app.config.from_object(config_object) register_blueprints(app=app) register_views(app=app) registe...
3,818,114,167,602,064,400
Creates and returns flask app instance as well as register all the extensions and blueprints
code_runner/app.py
create_app
thephilomaths/code-runner-as-a-service
python
def create_app(config_object='code_runner.settings'): app = Flask(__name__) register_environment() app.config.from_object(config_object) register_blueprints(app=app) register_views(app=app) register_extensions(app=app) configure_logger(app=app) return app
def register_blueprints(app): 'Registers the blueprints' app.register_blueprint(code.views.blueprint)
-6,392,716,567,037,836,000
Registers the blueprints
code_runner/app.py
register_blueprints
thephilomaths/code-runner-as-a-service
python
def register_blueprints(app): app.register_blueprint(code.views.blueprint)
def register_views(app): 'Registers the pluggable views' run_view = code.views.RunCode.as_view('run') run_async_view = code.views.RunCodeAsync.as_view('run-async') app.add_url_rule('/run', view_func=run_view, methods=['POST']) app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST']...
2,637,482,684,825,603,000
Registers the pluggable views
code_runner/app.py
register_views
thephilomaths/code-runner-as-a-service
python
def register_views(app): run_view = code.views.RunCode.as_view('run') run_async_view = code.views.RunCodeAsync.as_view('run-async') app.add_url_rule('/run', view_func=run_view, methods=['POST']) app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST']) app.add_url_rule('/get-re...
def register_extensions(app): 'Register Flask extensions' with app.app_context(): db.init_app(app=app) db.create_all() limiter.init_app(app=app)
1,989,962,585,448,259,600
Register Flask extensions
code_runner/app.py
register_extensions
thephilomaths/code-runner-as-a-service
python
def register_extensions(app): with app.app_context(): db.init_app(app=app) db.create_all() limiter.init_app(app=app)
def register_environment(): 'Register environment' dotenv_path = (Path('./') / '.env.development.local') load_dotenv(dotenv_path=dotenv_path)
4,229,727,122,486,207,000
Register environment
code_runner/app.py
register_environment
thephilomaths/code-runner-as-a-service
python
def register_environment(): dotenv_path = (Path('./') / '.env.development.local') load_dotenv(dotenv_path=dotenv_path)
def configure_logger(app): 'Configure loggers.' handler = logging.StreamHandler(sys.stdout) if (not app.logger.handlers): app.logger.addHandler(handler)
3,422,815,523,629,059,000
Configure loggers.
code_runner/app.py
configure_logger
thephilomaths/code-runner-as-a-service
python
def configure_logger(app): handler = logging.StreamHandler(sys.stdout) if (not app.logger.handlers): app.logger.addHandler(handler)
def computeLPPTransitMetric(data, mapInfo): '\n This function takes a data class with light curve info\n and the mapInfo with information about the mapping to use.\n It then returns a lpp metric value.\n ' (binFlux, binPhase) = foldBinLightCurve(data, mapInfo.ntrfr, mapInfo.npts) (rawTLpp, trans...
7,073,059,742,202,364,000
This function takes a data class with light curve info and the mapInfo with information about the mapping to use. It then returns a lpp metric value.
lpp/newlpp/lppTransform.py
computeLPPTransitMetric
barentsen/dave
python
def computeLPPTransitMetric(data, mapInfo): '\n This function takes a data class with light curve info\n and the mapInfo with information about the mapping to use.\n It then returns a lpp metric value.\n ' (binFlux, binPhase) = foldBinLightCurve(data, mapInfo.ntrfr, mapInfo.npts) (rawTLpp, trans...
def runningMedian(t, y, dt, runt): '\n Take a running median of size dt\n Return values at times given in runt\n ' newy = np.zeros(len(y)) newt = np.zeros(len(y)) srt = np.argsort(t) newt = t[srt] newy = y[srt] runy = [] for i in range(len(runt)): tmp = [] for j ...
-5,922,158,501,723,082,000
Take a running median of size dt Return values at times given in runt
lpp/newlpp/lppTransform.py
runningMedian
barentsen/dave
python
def runningMedian(t, y, dt, runt): '\n Take a running median of size dt\n Return values at times given in runt\n ' newy = np.zeros(len(y)) newt = np.zeros(len(y)) srt = np.argsort(t) newt = t[srt] newy = y[srt] runy = [] for i in range(len(runt)): tmp = [] for j ...
def foldBinLightCurve(data, ntrfr, npts): '\n Fold and bin light curve for input to LPP metric calculation\n \n data contains time, tzero, dur, priod,mes and flux (centered around zero)\n \n ntrfr -- number of transit fraction for binning around transit ~1.5\n npts -- number of points in the final...
281,194,665,893,503,000
Fold and bin light curve for input to LPP metric calculation data contains time, tzero, dur, priod,mes and flux (centered around zero) ntrfr -- number of transit fraction for binning around transit ~1.5 npts -- number of points in the final binning.
lpp/newlpp/lppTransform.py
foldBinLightCurve
barentsen/dave
python
def foldBinLightCurve(data, ntrfr, npts): '\n Fold and bin light curve for input to LPP metric calculation\n \n data contains time, tzero, dur, priod,mes and flux (centered around zero)\n \n ntrfr -- number of transit fraction for binning around transit ~1.5\n npts -- number of points in the final...
def computeRawLPPTransitMetric(binFlux, mapInfo): '\n Perform the matrix transformation with LPP\n Do the knn test to get a raw LPP transit metric number.\n ' Yorig = mapInfo.YmapMapped lpp = LocalityPreservingProjection(n_components=mapInfo.n_dim) lpp.projection_ = mapInfo.YmapM normBinFlu...
8,917,899,535,312,045,000
Perform the matrix transformation with LPP Do the knn test to get a raw LPP transit metric number.
lpp/newlpp/lppTransform.py
computeRawLPPTransitMetric
barentsen/dave
python
def computeRawLPPTransitMetric(binFlux, mapInfo): '\n Perform the matrix transformation with LPP\n Do the knn test to get a raw LPP transit metric number.\n ' Yorig = mapInfo.YmapMapped lpp = LocalityPreservingProjection(n_components=mapInfo.n_dim) lpp.projection_ = mapInfo.YmapM normBinFlu...
def knnDistance_fromKnown(knownTransits, new, knn): '\n For a group of known transits and a new one.\n Use knn to determine how close the new one is to the known transits\n using knn minkowski p = 3 ()\n Using scipy signal to do this.\n ' nbrs = NearestNeighbors(n_neighbors=int(knn), algorithm='k...
-6,694,463,733,298,679,000
For a group of known transits and a new one. Use knn to determine how close the new one is to the known transits using knn minkowski p = 3 () Using scipy signal to do this.
lpp/newlpp/lppTransform.py
knnDistance_fromKnown
barentsen/dave
python
def knnDistance_fromKnown(knownTransits, new, knn): '\n For a group of known transits and a new one.\n Use knn to determine how close the new one is to the known transits\n using knn minkowski p = 3 ()\n Using scipy signal to do this.\n ' nbrs = NearestNeighbors(n_neighbors=int(knn), algorithm='k...
def periodNormalLPPTransitMetric(rawTLpp, newPerMes, mapInfo): '\n Normalize the rawTransitMetric value by those with the closest period.\n This part removes the period dependence of the metric at short periods.\n Plus it makes a value near one be the threshold between good and bad.\n \n newPerMes is...
-4,829,747,934,316,969,000
Normalize the rawTransitMetric value by those with the closest period. This part removes the period dependence of the metric at short periods. Plus it makes a value near one be the threshold between good and bad. newPerMes is the np.array([period, mes]) of the new sample
lpp/newlpp/lppTransform.py
periodNormalLPPTransitMetric
barentsen/dave
python
def periodNormalLPPTransitMetric(rawTLpp, newPerMes, mapInfo): '\n Normalize the rawTransitMetric value by those with the closest period.\n This part removes the period dependence of the metric at short periods.\n Plus it makes a value near one be the threshold between good and bad.\n \n newPerMes is...
def lpp_onetransit(tcedata, mapInfo, ntransit): '\n Chop down the full time series to one orbital period.\n Then gather the lpp value for that one transit.\n ' startTime = (tcedata.time[0] + (ntransit * tcedata.period)) endTime = ((tcedata.time[0] + ((ntransit + 1) * tcedata.period)) + (3 / 24.0)) ...
-5,569,252,872,100,213,000
Chop down the full time series to one orbital period. Then gather the lpp value for that one transit.
lpp/newlpp/lppTransform.py
lpp_onetransit
barentsen/dave
python
def lpp_onetransit(tcedata, mapInfo, ntransit): '\n Chop down the full time series to one orbital period.\n Then gather the lpp value for that one transit.\n ' startTime = (tcedata.time[0] + (ntransit * tcedata.period)) endTime = ((tcedata.time[0] + ((ntransit + 1) * tcedata.period)) + (3 / 24.0)) ...
def lpp_averageIndivTransit(tcedata, mapInfo): '\n \n Create the loop over individual transits and return \n array normalized lpp values, mean and std.\n Input TCE object and mapInfo object.\n \n It is unclear that this individual transit approach\n separates out several new false positives.\n ...
4,539,365,381,711,080,400
Create the loop over individual transits and return array normalized lpp values, mean and std. Input TCE object and mapInfo object. It is unclear that this individual transit approach separates out several new false positives. It probably would require retuning for low SNR signals.
lpp/newlpp/lppTransform.py
lpp_averageIndivTransit
barentsen/dave
python
def lpp_averageIndivTransit(tcedata, mapInfo): '\n \n Create the loop over individual transits and return \n array normalized lpp values, mean and std.\n Input TCE object and mapInfo object.\n \n It is unclear that this individual transit approach\n separates out several new false positives.\n ...
def get_pkg_details(in_file): 'For the new pkg format, we return the size and hashes of the inner pkg part of the file' for ext in SUPPORTED_EXTENSIONS: if in_file.endswith(ext): details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file) break else: raise ValueError...
-1,385,206,209,404,265,200
For the new pkg format, we return the size and hashes of the inner pkg part of the file
src/conda_package_handling/api.py
get_pkg_details
katietz/conda-package-handling
python
def get_pkg_details(in_file): for ext in SUPPORTED_EXTENSIONS: if in_file.endswith(ext): details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file) break else: raise ValueError("Don't know what to do with file {}".format(in_file)) return details
def __init__(self, cfg, vis_highest_scoring=True, output_dir='./vis'): '\n Args:\n cfg (CfgNode):\n vis_highest_scoring (bool): If set to True visualizes only\n the highest scoring prediction\n ' self.metadata = MetadataCatalog.get(cfg.D...
281,400,471,534,412,000
Args: cfg (CfgNode): vis_highest_scoring (bool): If set to True visualizes only the highest scoring prediction
demo/demo.py
__init__
ishanic/MeshRCNN-keypoints
python
def __init__(self, cfg, vis_highest_scoring=True, output_dir='./vis'): '\n Args:\n cfg (CfgNode):\n vis_highest_scoring (bool): If set to True visualizes only\n the highest scoring prediction\n ' self.metadata = MetadataCatalog.get(cfg.D...
def run_on_image(self, image, focal_length=10.0): '\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n focal_length (float): the focal_length of the image\n\n Returns:\n predictions (dict): th...
7,762,340,422,223,548,000
Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. focal_length (float): the focal_length of the image Returns: predictions (dict): the output of the model.
demo/demo.py
run_on_image
ishanic/MeshRCNN-keypoints
python
def run_on_image(self, image, focal_length=10.0): '\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n focal_length (float): the focal_length of the image\n\n Returns:\n predictions (dict): th...
def __init__(self, host, port=9000, schema=hdfs_schema): ' 目前只需要host和port ' self.host = host self.port = port self.schema = schema self._path = '/' self._status = None
4,427,913,885,585,468,000
目前只需要host和port
hdfshell/cluster.py
__init__
alingse/hdfshell
python
def __init__(self, host, port=9000, schema=hdfs_schema): ' ' self.host = host self.port = port self.schema = schema self._path = '/' self._status = None
@property def uri_head(self): ' 返回 uri 的 head' head = (self.schema + '{}:{}'.format(self.host, self.port)) return head
-5,477,964,233,584,933,000
返回 uri 的 head
hdfshell/cluster.py
uri_head
alingse/hdfshell
python
@property def uri_head(self): ' ' head = (self.schema + '{}:{}'.format(self.host, self.port)) return head
@property def uri(self): ' 返回当前路径' _uri = (self.schema + '{}:{}{}'.format(self.host, self.port, self._path)) return _uri
-1,669,485,123,415,073,300
返回当前路径
hdfshell/cluster.py
uri
alingse/hdfshell
python
@property def uri(self): ' ' _uri = (self.schema + '{}:{}{}'.format(self.host, self.port, self._path)) return _uri
@click.command(epilog='\x08\nExamples:\n bdt gitlab update-bob -vv\n bdt gitlab update-bob -vv --stable\n') @click.option('--stable/--beta', help='To use the stable versions in the list and pin packages.') @verbosity_option() @bdt.raise_on_error def update_bob(stable): 'Updates the Bob meta package with new p...
-5,205,953,134,817,273,000
Updates the Bob meta package with new packages.
bob/devtools/scripts/update_bob.py
update_bob
bioidiap/bob.devtools
python
@click.command(epilog='\x08\nExamples:\n bdt gitlab update-bob -vv\n bdt gitlab update-bob -vv --stable\n') @click.option('--stable/--beta', help='To use the stable versions in the list and pin packages.') @verbosity_option() @bdt.raise_on_error def update_bob(stable): import tempfile from ..ci impor...
def _testUploadFileToItem(self, item, name, user, contents): '\n Uploads a non-empty file to the server.\n ' resp = self.request(path='/file', method='POST', user=user, params={'parentType': 'item', 'parentId': item['_id'], 'name': name, 'size': len(contents)}) self.assertStatusOk(resp) up...
7,241,848,628,900,935,000
Uploads a non-empty file to the server.
tests/cases/item_test.py
_testUploadFileToItem
RemiCecchinato/girder
python
def _testUploadFileToItem(self, item, name, user, contents): '\n \n ' resp = self.request(path='/file', method='POST', user=user, params={'parentType': 'item', 'parentId': item['_id'], 'name': name, 'size': len(contents)}) self.assertStatusOk(resp) uploadId = resp.json['_id'] resp = se...
def _testDownloadSingleFileItem(self, item, user, contents): '\n Downloads a single-file item from the server\n :param item: The item to download.\n :type item: dict\n :param contents: The expected contents.\n :type contents: str\n ' resp = self.request(path=('/item/%s/...
-7,198,199,060,866,721,000
Downloads a single-file item from the server :param item: The item to download. :type item: dict :param contents: The expected contents. :type contents: str
tests/cases/item_test.py
_testDownloadSingleFileItem
RemiCecchinato/girder
python
def _testDownloadSingleFileItem(self, item, user, contents): '\n Downloads a single-file item from the server\n :param item: The item to download.\n :type item: dict\n :param contents: The expected contents.\n :type contents: str\n ' resp = self.request(path=('/item/%s/...
def testItemCrud(self): '\n Test Create, Read, Update, and Delete of items.\n ' self.ensureRequiredParams(path='/item', method='POST', required=('folderId',), user=self.users[1]) params = {'name': ' ', 'description': ' a description ', 'folderId': self.publicFolder['_id']} resp = self.requ...
648,732,367,353,630,800
Test Create, Read, Update, and Delete of items.
tests/cases/item_test.py
testItemCrud
RemiCecchinato/girder
python
def testItemCrud(self): '\n \n ' self.ensureRequiredParams(path='/item', method='POST', required=('folderId',), user=self.users[1]) params = {'name': ' ', 'description': ' a description ', 'folderId': self.publicFolder['_id']} resp = self.request(path='/item', method='POST', params=params,...
def testItemMetadataCrud(self): '\n Test CRUD of metadata.\n ' params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']} resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) ...
-7,451,827,177,975,639,000
Test CRUD of metadata.
tests/cases/item_test.py
testItemMetadataCrud
RemiCecchinato/girder
python
def testItemMetadataCrud(self): '\n \n ' params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']} resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) item = resp.json r...
def testItemFiltering(self): '\n Test filtering private metadata from items.\n ' params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']} resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertS...
-5,949,870,336,204,574,000
Test filtering private metadata from items.
tests/cases/item_test.py
testItemFiltering
RemiCecchinato/girder
python
def testItemFiltering(self): '\n \n ' params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']} resp = self.request(path='/item', method='POST', params=params, user=self.users[0]) self.assertStatusOk(resp) item = Item().load(resp.j...
def testLazyFieldComputation(self): '\n Demonstrate that an item that is saved in the database without\n derived fields (like lowerName or baseParentId) get those values\n computed at load() time.\n ' item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicF...
5,206,396,976,131,922,000
Demonstrate that an item that is saved in the database without derived fields (like lowerName or baseParentId) get those values computed at load() time.
tests/cases/item_test.py
testLazyFieldComputation
RemiCecchinato/girder
python
def testLazyFieldComputation(self): '\n Demonstrate that an item that is saved in the database without\n derived fields (like lowerName or baseParentId) get those values\n computed at load() time.\n ' item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicF...
def testParentsToRoot(self): '\n Demonstrate that forcing parentsToRoot will cause it to skip the\n filtering process.\n ' item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder) parents = Item().parentsToRoot(item, force=True) for parent in parents...
4,241,321,206,045,767,700
Demonstrate that forcing parentsToRoot will cause it to skip the filtering process.
tests/cases/item_test.py
testParentsToRoot
RemiCecchinato/girder
python
def testParentsToRoot(self): '\n Demonstrate that forcing parentsToRoot will cause it to skip the\n filtering process.\n ' item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder) parents = Item().parentsToRoot(item, force=True) for parent in parents...
def testCookieAuth(self): "\n We make sure a cookie is sufficient for authentication for the item\n download endpoint. Also, while we're at it, we make sure it's not\n sufficient for other endpoints.\n " item = self._createItem(self.privateFolder['_id'], 'cookie_auth_download', '', s...
-6,878,945,270,122,662,000
We make sure a cookie is sufficient for authentication for the item download endpoint. Also, while we're at it, we make sure it's not sufficient for other endpoints.
tests/cases/item_test.py
testCookieAuth
RemiCecchinato/girder
python
def testCookieAuth(self): "\n We make sure a cookie is sufficient for authentication for the item\n download endpoint. Also, while we're at it, we make sure it's not\n sufficient for other endpoints.\n " item = self._createItem(self.privateFolder['_id'], 'cookie_auth_download', , sel...
def subtract_signal(t, signal, fit_params=3): '\n\n Returns the subtracted signal\n\n ' coef = np.polynomial.polynomial.polyfit(t, signal, (fit_params - 1)) delta_signal = np.einsum('n,nj->j', coef, np.asarray([np.power(t, n) for n in range(fit_params)])) ht = (signal - delta_signal) return ht
-3,028,313,951,607,885,000
Returns the subtracted signal
src/signals.py
subtract_signal
delos/dm-pta-mc
python
def subtract_signal(t, signal, fit_params=3): '\n\n \n\n ' coef = np.polynomial.polynomial.polyfit(t, signal, (fit_params - 1)) delta_signal = np.einsum('n,nj->j', coef, np.asarray([np.power(t, n) for n in range(fit_params)])) ht = (signal - delta_signal) return ht
def dphi_dop_chunked(t, profile, r0_vec, v_vec, d_hat, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf): '\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n ' num_objects =...
-6,609,646,367,769,294,000
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to store in memory
src/signals.py
dphi_dop_chunked
delos/dm-pta-mc
python
def dphi_dop_chunked(t, profile, r0_vec, v_vec, d_hat, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf): '\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n ' num_objects =...
def dphi_dop_chunked_vec(t, profile, r0_vec, v_vec, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf): '\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n ' num_objects = le...
2,014,472,338,040,164,900
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to store in memory
src/signals.py
dphi_dop_chunked_vec
delos/dm-pta-mc
python
def dphi_dop_chunked_vec(t, profile, r0_vec, v_vec, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf): '\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n ' num_objects = le...
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None, interp_table=None): '\n\n Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.\n Dot with d_hat to get dphi_I\n\n TODO: add use_closest option\n\n ' v_mag = np.linalg.norm(v_vec, axis=1) r0_...
-1,128,212,848,609,852,800
Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass. Dot with d_hat to get dphi_I TODO: add use_closest option
src/signals.py
dphi_dop_vec
delos/dm-pta-mc
python
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None, interp_table=None): '\n\n Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.\n Dot with d_hat to get dphi_I\n\n TODO: add use_closest option\n\n ' v_mag = np.linalg.norm(v_vec, axis=1) r0_...
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None, interp_table=None): '\n\n Returns the phase shift due to the Doppler delay for subhalos of mass, mass\n\n TODO: add use_closest option\n\n ' v_mag = np.linalg.norm(v_vec, axis=1) r0_v = np.einsum('ij, ij -> i', r0_vec, v_...
1,391,388,544,967,027,700
Returns the phase shift due to the Doppler delay for subhalos of mass, mass TODO: add use_closest option
src/signals.py
dphi_dop
delos/dm-pta-mc
python
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None, interp_table=None): '\n\n Returns the phase shift due to the Doppler delay for subhalos of mass, mass\n\n TODO: add use_closest option\n\n ' v_mag = np.linalg.norm(v_vec, axis=1) r0_v = np.einsum('ij, ij -> i', r0_vec, v_...
@property def inserted(self): 'Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement\n\n MySQL\'s ON DUPLICATE KEY UPDATE clause allows reference to the row\n that would be inserted, via a special function called ``VALUES()``.\n This attribute provides all columns in this ro...
-8,385,649,932,417,646,000
Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row that would be inserted, via a special function called ``VALUES()``. This attribute provides all columns in this row to be referenceable such that they will render within a ``VALU...
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py
inserted
Ag-nes/Blog
python
@property def inserted(self): 'Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement\n\n MySQL\'s ON DUPLICATE KEY UPDATE clause allows reference to the row\n that would be inserted, via a special function called ``VALUES()``.\n This attribute provides all columns in this ro...
@_generative @_exclusive_against('_post_values_clause', msgs={'_post_values_clause': 'This Insert construct already has an ON DUPLICATE KEY clause present'}) def on_duplicate_key_update(self, *args, **kw): '\n Specifies the ON DUPLICATE KEY UPDATE clause.\n\n :param \\**kw: Column keys linked to UPDA...
7,189,407,818,811,196,000
Specifies the ON DUPLICATE KEY UPDATE clause. :param \**kw: Column keys linked to UPDATE values. The values may be any SQL expression or supported literal Python values. .. warning:: This dictionary does **not** take into account Python-specified default UPDATE values or generation functions, e.g. those spe...
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py
on_duplicate_key_update
Ag-nes/Blog
python
@_generative @_exclusive_against('_post_values_clause', msgs={'_post_values_clause': 'This Insert construct already has an ON DUPLICATE KEY clause present'}) def on_duplicate_key_update(self, *args, **kw): '\n Specifies the ON DUPLICATE KEY UPDATE clause.\n\n :param \\**kw: Column keys linked to UPDA...
def _compare_text_filters(self, first: TextFilter, second: TextFilter): '\n\n :param first: TextFilter\n :param second: TextFilter\n :return: bool\n ' self.assertEqual(str(first.x), str(second.x)) self.assertEqual(str(first.y), str(second.y)) self.assertEqual(first.text, seco...
2,669,470,008,177,207,300
:param first: TextFilter :param second: TextFilter :return: bool
tests/bitmovin/services/filters/text_filter_tests.py
_compare_text_filters
bitmovin/bitmovin-python
python
def _compare_text_filters(self, first: TextFilter, second: TextFilter): '\n\n :param first: TextFilter\n :param second: TextFilter\n :return: bool\n ' self.assertEqual(str(first.x), str(second.x)) self.assertEqual(str(first.y), str(second.y)) self.assertEqual(first.text, seco...
@classmethod def what_cached(self, model_name: str, path=None, learn=None): '\n Shows what keys are cached\n ' if (isNone(path) and isNone(learn)): print('path and learn cannot be None at the same time') return elif isNone(path): path = learn.path name = f'{model_na...
208,750,910,187,950,180
Shows what keys are cached
fastinference/tabular/pd.py
what_cached
floleuerer/fastinference
python
@classmethod def what_cached(self, model_name: str, path=None, learn=None): '\n \n ' if (isNone(path) and isNone(learn)): print('path and learn cannot be None at the same time') return elif isNone(path): path = learn.path name = f'{model_name}_part_dep' folder =...
@classmethod def empty_cache(self, model_name: str, path=None, learn=None): '\n deletes the cache file\n ' if (isNone(path) and isNone(learn)): print('path and learn cannot be None at the same time') return elif isNone(path): path = learn.path name = f'{model_name}_...
5,048,076,303,994,254,000
deletes the cache file
fastinference/tabular/pd.py
empty_cache
floleuerer/fastinference
python
@classmethod def empty_cache(self, model_name: str, path=None, learn=None): '\n \n ' if (isNone(path) and isNone(learn)): print('path and learn cannot be None at the same time') return elif isNone(path): path = learn.path name = f'{model_name}_part_dep' folder =...
def _cont_into_buckets(self, df_init, CONT_COLS): "\n Categorical values can be easily distiguished one from another\n But that doesn't work with continious values, we have to divede it's\n values into buckets and then use all values in a bucket as a single value\n that avarages the buck...
2,281,857,222,200,926,000
Categorical values can be easily distiguished one from another But that doesn't work with continious values, we have to divede it's values into buckets and then use all values in a bucket as a single value that avarages the bucket. This way we convert cont feture into pseudo categorical and are able to apply partial de...
fastinference/tabular/pd.py
_cont_into_buckets
floleuerer/fastinference
python
def _cont_into_buckets(self, df_init, CONT_COLS): "\n Categorical values can be easily distiguished one from another\n But that doesn't work with continious values, we have to divede it's\n values into buckets and then use all values in a bucket as a single value\n that avarages the buck...
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list: "\n This function outputs threshold to number of occurrences different variants of list of columns (fields)\n In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%\n ...
5,400,113,811,807,088,000
This function outputs threshold to number of occurrences different variants of list of columns (fields) In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10% of the least used If coef is more 1.0, then 'coef' itself is used as threshold
fastinference/tabular/pd.py
_get_field_uniq_x_coef
floleuerer/fastinference
python
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list: "\n This function outputs threshold to number of occurrences different variants of list of columns (fields)\n In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%\n ...
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame: "\n Function calculate partial dependency for column in fields.\n Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.\n For ex. fields = [['Store','StoreType']] mea...
2,957,460,990,702,026,000
Function calculate partial dependency for column in fields. Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields. For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity (it's values are substitute as a pair, not as separate v...
fastinference/tabular/pd.py
_get_part_dep_one
floleuerer/fastinference
python
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame: "\n Function calculate partial dependency for column in fields.\n Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.\n For ex. fields = [['Store','StoreType']] mea...
def _get_part_dep(self): '\n Makes a datafreme with partial dependencies for every pair of columns in fields\n ' fields = self.fields learn = self.learn cache_path = self.cache_path dep_name = self._get_dep_var() is_continue = self.is_continue l2k = self._list_to_key result...
-488,242,411,160,210,560
Makes a datafreme with partial dependencies for every pair of columns in fields
fastinference/tabular/pd.py
_get_part_dep
floleuerer/fastinference
python
def _get_part_dep(self): '\n \n ' fields = self.fields learn = self.learn cache_path = self.cache_path dep_name = self._get_dep_var() is_continue = self.is_continue l2k = self._list_to_key result = [] to_save = {} from_saved = {} if (is_continue == True): ...
def _save_cached(self): '\n Saves calculated PartDep df into path.\n Can be saved more than one with as an dict with fields as key\n ' path = self.cache_path path.mkdir(parents=True, exist_ok=True) name = self.save_name sv_dict = self._load_dict(name=name, path=path) key = s...
7,031,681,797,881,425,000
Saves calculated PartDep df into path. Can be saved more than one with as an dict with fields as key
fastinference/tabular/pd.py
_save_cached
floleuerer/fastinference
python
def _save_cached(self): '\n Saves calculated PartDep df into path.\n Can be saved more than one with as an dict with fields as key\n ' path = self.cache_path path.mkdir(parents=True, exist_ok=True) name = self.save_name sv_dict = self._load_dict(name=name, path=path) key = s...
def _load_cached(self): '\n Load calculated PartDep df if hash exist.\n ' name = self.save_name path = self.cache_path if (not Path(f'{(path / name)}.pkl').exists()): return None ld_dict = self._ld_var(name=name, path=path) key = self._list_to_key((self.fields + [self.coef]...
-5,927,804,199,348,323,000
Load calculated PartDep df if hash exist.
fastinference/tabular/pd.py
_load_cached
floleuerer/fastinference
python
def _load_cached(self): '\n \n ' name = self.save_name path = self.cache_path if (not Path(f'{(path / name)}.pkl').exists()): return None ld_dict = self._ld_var(name=name, path=path) key = self._list_to_key((self.fields + [self.coef])) if (key not in ld_dict): r...
def _load_or_calculate(self): '\n Calculates part dep or load it from cache if possible\n ' if ((self.is_use_cache == False) or isNone(self._load_cached())): self._get_part_dep() return self._save_cached() else: self.part_dep_df = self._load_cached()
4,629,582,466,019,069,000
Calculates part dep or load it from cache if possible
fastinference/tabular/pd.py
_load_or_calculate
floleuerer/fastinference
python
def _load_or_calculate(self): '\n \n ' if ((self.is_use_cache == False) or isNone(self._load_cached())): self._get_part_dep() return self._save_cached() else: self.part_dep_df = self._load_cached()
def plot_raw(self, field, sample=1.0): '\n Plot dependency graph from data itself\n field must be list of exactly one feature\n sample is a coef to len(df). Lower if kernel use to shut down on that\n ' df = self.df df = df.sample(int((len(df) * sample))) field = field[0] ...
-5,214,618,900,920,584,000
Plot dependency graph from data itself field must be list of exactly one feature sample is a coef to len(df). Lower if kernel use to shut down on that
fastinference/tabular/pd.py
plot_raw
floleuerer/fastinference
python
def plot_raw(self, field, sample=1.0): '\n Plot dependency graph from data itself\n field must be list of exactly one feature\n sample is a coef to len(df). Lower if kernel use to shut down on that\n ' df = self.df df = df.sample(int((len(df) * sample))) field = field[0] ...
def plot_model(self, field, strict_recalc=False, sample=1.0): '\n Plot dependency graph from the model.\n It also take into account times, so plot becomes much more resilient, cause not every value treats as equal\n (more occurences means more power)\n field must be list of exactly one f...
1,374,911,187,912,990,700
Plot dependency graph from the model. It also take into account times, so plot becomes much more resilient, cause not every value treats as equal (more occurences means more power) field must be list of exactly one feature strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway sample is a coef t...
fastinference/tabular/pd.py
plot_model
floleuerer/fastinference
python
def plot_model(self, field, strict_recalc=False, sample=1.0): '\n Plot dependency graph from the model.\n It also take into account times, so plot becomes much more resilient, cause not every value treats as equal\n (more occurences means more power)\n field must be list of exactly one f...
def get_pd(self, feature, min_tm=1): '\n Gets particular feature subtable from the whole one (min times is optional parameter)\n ' if isNone(self.part_dep_df): return None df = self.part_dep_df.query(f'(feature == "{feature}") and (times > {min_tm})') return self._general2partial(d...
8,928,306,377,913,288,000
Gets particular feature subtable from the whole one (min times is optional parameter)
fastinference/tabular/pd.py
get_pd
floleuerer/fastinference
python
def get_pd(self, feature, min_tm=1): '\n \n ' if isNone(self.part_dep_df): return None df = self.part_dep_df.query(f'(feature == "{feature}") and (times > {min_tm})') return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1): '\n Transforms whole features table to get_part_dep_one output table format\n ' def get_xth_el(str_list: str, indexes: list): lst = (str_list if is_listy(str_list) else ast.literal_eval(str_list)) lst = listify(lst) ...
-4,872,721,693,105,727,000
Transforms whole features table to get_part_dep_one output table format
fastinference/tabular/pd.py
get_pd_main_chained_feat
floleuerer/fastinference
python
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1): '\n \n ' def get_xth_el(str_list: str, indexes: list): lst = (str_list if is_listy(str_list) else ast.literal_eval(str_list)) lst = listify(lst) if (len(lst) == 1): return lst[0] elif (len...
def plot_part_dep(self, fields, limit=20, asc=False): '\n Plots partial dependency plot for sublist of connected `fields`\n `fields` must be sublist of `fields` given on initalization calculation\n ' def prepare_colors(df_pd: pd.DataFrame): heat_min = df_pd['times'].min() h...
8,386,247,731,553,893,000
Plots partial dependency plot for sublist of connected `fields` `fields` must be sublist of `fields` given on initalization calculation
fastinference/tabular/pd.py
plot_part_dep
floleuerer/fastinference
python
def plot_part_dep(self, fields, limit=20, asc=False): '\n Plots partial dependency plot for sublist of connected `fields`\n `fields` must be sublist of `fields` given on initalization calculation\n ' def prepare_colors(df_pd: pd.DataFrame): heat_min = df_pd['times'].min() h...
def _parse_content(response): 'parse the response body as JSON, raise on errors' if (response.status_code != 200): raise ApiError(f'unknown error: {response.content.decode()}') result = json.loads(response.content) if (not result['ok']): raise ApiError(f"{result['error']}: {result.get('d...
-553,374,406,510,625,340
parse the response body as JSON, raise on errors
examples/slack/query.py
_parse_content
ariebovenberg/snug
python
def _parse_content(response): if (response.status_code != 200): raise ApiError(f'unknown error: {response.content.decode()}') result = json.loads(response.content) if (not result['ok']): raise ApiError(f"{result['error']}: {result.get('detail')}") return result
def paginated_retrieval(methodname, itemtype): 'decorator factory for retrieval queries from query params' return compose(reusable, basic_interaction, map_yield(partial(_params_as_get, methodname)))
-6,033,415,841,283,409,000
decorator factory for retrieval queries from query params
examples/slack/query.py
paginated_retrieval
ariebovenberg/snug
python
def paginated_retrieval(methodname, itemtype): return compose(reusable, basic_interaction, map_yield(partial(_params_as_get, methodname)))
def json_post(methodname, rtype, key): 'decorator factory for json POST queries' return compose(reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield)
4,652,402,797,051,923,000
decorator factory for json POST queries
examples/slack/query.py
json_post
ariebovenberg/snug
python
def json_post(methodname, rtype, key): return compose(reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield)
def retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4): "\n **Target Assign Layer for the detector RetinaNet.**\n\n This OP finds out positive and negative samples from all anchors\n for t...
4,884,496,934,939,049,000
**Target Assign Layer for the detector RetinaNet.** This OP finds out positive and negative samples from all anchors for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , and assigns target labels for classification along with target locations for regression to each sample, then takes out the par...
python/paddle/fluid/layers/detection.py
retinanet_target_assign
92lqllearning/Paddle
python
def retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4): "\n **Target Assign Layer for the detector RetinaNet.**\n\n This OP finds out positive and negative samples from all anchors\n for t...
def rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True): "\n **Target Assign Layer for region proposal network (RPN) in Faster-RC...
-5,902,719,678,806,247,000
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.** This layer can be, for given the Intersection-over-Union (IoU) overlap between anchors and ground truth boxes, to assign classification and regression targets to each each anchor, these target labels are used for train RPN. The classi...
python/paddle/fluid/layers/detection.py
rpn_target_assign
92lqllearning/Paddle
python
def rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True): "\n **Target Assign Layer for region proposal network (RPN) in Faster-RC...