body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
'Decorator for creating CLI objects.\n '
if (obj is None):
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, 'cli'):
return obj
if (not callable(obj)):
return Clize.get_cli(obj, **kwa... | 3,488,118,777,556,546,000 | Decorator for creating CLI objects. | improver/cli/__init__.py | clizefy | anja-bom/improver | python | def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
'\n '
if (obj is None):
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, 'cli'):
return obj
if (not callable(obj)):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, hel... |
@clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
'Show command help.'
prog_name = prog_name.split()[0]
args = filter(None, [command, '--help', (usage and '--usage')])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if ((not ... | 2,624,931,623,418,684,400 | Show command help. | improver/cli/__init__.py | improver_help | anja-bom/improver | python | @clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
prog_name = prog_name.split()[0]
args = filter(None, [command, '--help', (usage and '--usage')])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if ((not command) and usage):... |
def _cli_items():
'Dynamically discover CLIs.'
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
(yield ('help', improver_help))
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if (mod_name != '__main__'):
... | 2,685,931,208,095,483,400 | Dynamically discover CLIs. | improver/cli/__init__.py | _cli_items | anja-bom/improver | python | def _cli_items():
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
(yield ('help', improver_help))
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if (mod_name != '__main__'):
mcli = importlib.... |
def unbracket(args):
"Convert input list with bracketed items into nested lists.\n\n >>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())\n ['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']\n\n "
outargs = []
stack = []
mismatch_msg = 'Mismatched bracket at position %i.'
for (i, arg) i... | 1,415,948,892,958,881,000 | Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z'] | improver/cli/__init__.py | unbracket | anja-bom/improver | python | def unbracket(args):
"Convert input list with bracketed items into nested lists.\n\n >>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())\n ['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']\n\n "
outargs = []
stack = []
mismatch_msg = 'Mismatched bracket at position %i.'
for (i, arg) i... |
def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
'Common entry point for command execution.'
args = list(args)
for (i, arg) in enumerate(args):
if isinstance(arg, (list, tuple)):
arg = execute_command(dispatcher, prog_name, *arg, verbose=verbose, dry_run=d... | -4,196,092,585,398,877,700 | Common entry point for command execution. | improver/cli/__init__.py | execute_command | anja-bom/improver | python | def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
args = list(args)
for (i, arg) in enumerate(args):
if isinstance(arg, (list, tuple)):
arg = execute_command(dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run)
if isinstance(arg, pathlib.... |
@clizefy()
def main(prog_name: parameters.pass_name, command: LAST_OPTION, *args, profile: value_converter((lambda _: _), name='FILENAME')=None, memprofile: value_converter((lambda _: _), name='FILENAME')=None, verbose=False, dry_run=False):
'IMPROVER NWP post-processing toolbox\n\n Results from commands can be ... | 8,911,020,094,292,437,000 | IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
... | improver/cli/__init__.py | main | anja-bom/improver | python | @clizefy()
def main(prog_name: parameters.pass_name, command: LAST_OPTION, *args, profile: value_converter((lambda _: _), name='FILENAME')=None, memprofile: value_converter((lambda _: _), name='FILENAME')=None, verbose=False, dry_run=False):
'IMPROVER NWP post-processing toolbox\n\n Results from commands can be ... |
def run_main(argv=None):
"Overrides argv[0] to be 'improver' then runs main.\n\n Args:\n argv (list of str):\n Arguments that were from the command line.\n\n "
import sys
from clize import run
if (argv is None):
argv = sys.argv[:]
argv[0] = 'improver'
run(main... | -5,774,211,158,403,693,000 | Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line. | improver/cli/__init__.py | run_main | anja-bom/improver | python | def run_main(argv=None):
"Overrides argv[0] to be 'improver' then runs main.\n\n Args:\n argv (list of str):\n Arguments that were from the command line.\n\n "
import sys
from clize import run
if (argv is None):
argv = sys.argv[:]
argv[0] = 'improver'
run(main... |
def add_docstring(self, docstring, *args, **kwargs):
'Adds the updated docstring.'
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs) | 3,922,062,559,688,741,000 | Adds the updated docstring. | improver/cli/__init__.py | add_docstring | anja-bom/improver | python | def add_docstring(self, docstring, *args, **kwargs):
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs) |
@staticmethod
def obj_to_name(obj, cls=None):
'Helper function to create the string.'
if (cls is None):
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return ('<%s.%s@%i>' % (cls.__module__, cls.__name__, obj_id)) | 6,488,412,296,716,952,000 | Helper function to create the string. | improver/cli/__init__.py | obj_to_name | anja-bom/improver | python | @staticmethod
def obj_to_name(obj, cls=None):
if (cls is None):
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return ('<%s.%s@%i>' % (cls.__module__, cls.__name__, obj_id)) |
@value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.Cube... | -6,835,541,266,756,884,000 | Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes. | improver/cli/__init__.py | constrained_inputcubelist_converter | anja-bom/improver | python | @value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.Cube... |
def get_symbol(self, cfg, is_train=True):
'\n return a generated symbol, it also need to be assigned to self.sym\n '
raise NotImplementedError() | 2,051,318,806,614,280,400 | return a generated symbol, it also need to be assigned to self.sym | lib/utils/symbol.py | get_symbol | 571502680/mx-DeepIM | python | def get_symbol(self, cfg, is_train=True):
'\n \n '
raise NotImplementedError() |
def art_search(art):
"\n Function to retrieve the information about collections in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: st... | 5,937,697,452,561,424,000 | Function to retrieve the information about collections in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through... | src/aicapi_yw3760/aicapi_yw3760.py | art_search | nicolewang97/AICAPI_YW3760 | python | def art_search(art):
"\n Function to retrieve the information about collections in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: st... |
def tour_search(tour):
"\n Function to retrieve the information about tour in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n ... | -7,110,535,691,618,277,000 | Function to retrieve the information about tour in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related... | src/aicapi_yw3760/aicapi_yw3760.py | tour_search | nicolewang97/AICAPI_YW3760 | python | def tour_search(tour):
"\n Function to retrieve the information about tour in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n ... |
def pic_search(pic, artist):
"\n Function to retrieve the images of artworks collected in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API req... | 4,188,129,160,413,554,700 | Function to retrieve the images of artworks collected in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
Image: jpg
The image of the searched atwork
Error Message:
... | src/aicapi_yw3760/aicapi_yw3760.py | pic_search | nicolewang97/AICAPI_YW3760 | python | def pic_search(pic, artist):
"\n Function to retrieve the images of artworks collected in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API req... |
def product_search(product_art, product_category):
"\n Function to retrieve the information about products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: st... | 130,723,497,960,180,690 | Function to retrieve the information about products sold in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the product... | src/aicapi_yw3760/aicapi_yw3760.py | product_search | nicolewang97/AICAPI_YW3760 | python | def product_search(product_art, product_category):
"\n Function to retrieve the information about products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: st... |
def product_show(product_art_show):
"\n Function to retrieve the information about top10 products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n Type in any random word\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFram... | 8,153,859,559,080,861,000 | Function to retrieve the information about top10 products sold in the Art institute of Chicago
Parameters:
-------------
Type in any random word
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the top 10 products and images of the products
... | src/aicapi_yw3760/aicapi_yw3760.py | product_show | nicolewang97/AICAPI_YW3760 | python | def product_show(product_art_show):
"\n Function to retrieve the information about top10 products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n Type in any random word\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFram... |
def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"Runs benchmarks with the given context and precision (dtype)for all the unary\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\... | -1,756,838,878,309,694,200 | Runs benchmarks with the given context and precision (dtype)for all the unary
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Nu... | benchmark/opperf/nd_operations/unary_operators.py | run_mx_unary_operators_benchmarks | Angzz/DeformableV2 | python | def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"Runs benchmarks with the given context and precision (dtype)for all the unary\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\... |
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=np.uint8, **kwargs):
"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n d... | -4,451,911,244,835,267,000 | Convert categorical variable into dummy/indicator variables.
Data must have category dtype to infer result's ``columns``.
Parameters
----------
data : Series, or DataFrame
For Series, the dtype must be categorical.
For DataFrame, at least one column must be categorical.
prefix : string, list of strings, or di... | dask/dataframe/reshape.py | get_dummies | Kirito1397/dask | python | def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=np.uint8, **kwargs):
"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n d... |
def pivot_table(df, index=None, columns=None, values=None, aggfunc='mean'):
"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or ... | -1,692,296,265,472,854,300 | Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scala... | dask/dataframe/reshape.py | pivot_table | Kirito1397/dask | python | def pivot_table(df, index=None, columns=None, values=None, aggfunc='mean'):
"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or ... |
def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None):
'\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier va... | -2,040,048,766,582,793,500 | Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to t... | dask/dataframe/reshape.py | melt | Kirito1397/dask | python | def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None):
'\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier va... |
def get_latest_questionnaire_with_concept(self, codeId):
'Find the questionnaire most recently modified that has the specified concept code.'
with self.session() as session:
return session.query(Questionnaire).join(Questionnaire.concepts).filter((QuestionnaireConcept.codeId == codeId)).order_by(Question... | -8,409,286,402,782,696,000 | Find the questionnaire most recently modified that has the specified concept code. | rdr_service/dao/questionnaire_dao.py | get_latest_questionnaire_with_concept | all-of-us/raw-data-repository | python | def get_latest_questionnaire_with_concept(self, codeId):
with self.session() as session:
return session.query(Questionnaire).join(Questionnaire.concepts).filter((QuestionnaireConcept.codeId == codeId)).order_by(Questionnaire.lastModified.desc()).options(subqueryload(Questionnaire.questions)).first() |
def _validate_update(self, session, obj, existing_obj):
'Validates that an update is OK before performing it. (Not applied on insert.)\n By default, validates that the object already exists, and if an expected semanticVersion ID is provided,\n that it matches.\n '
if (not existing_obj):
... | 6,419,327,074,788,434,000 | Validates that an update is OK before performing it. (Not applied on insert.)
By default, validates that the object already exists, and if an expected semanticVersion ID is provided,
that it matches. | rdr_service/dao/questionnaire_dao.py | _validate_update | all-of-us/raw-data-repository | python | def _validate_update(self, session, obj, existing_obj):
'Validates that an update is OK before performing it. (Not applied on insert.)\n By default, validates that the object already exists, and if an expected semanticVersion ID is provided,\n that it matches.\n '
if (not existing_obj):
... |
@classmethod
def _populate_questions(cls, group, code_map, questions):
'Recursively populate questions under this group.'
if group.question:
for question in group.question:
if (question.linkId and question.concept and (len(question.concept) == 1)):
concept = question.concept[... | 1,074,332,531,278,272 | Recursively populate questions under this group. | rdr_service/dao/questionnaire_dao.py | _populate_questions | all-of-us/raw-data-repository | python | @classmethod
def _populate_questions(cls, group, code_map, questions):
if group.question:
for question in group.question:
if (question.linkId and question.concept and (len(question.concept) == 1)):
concept = question.concept[0]
if (concept.system and concept.... |
def convert_to_int(s):
'Turn ANES data entry into an integer.\n \n >>> convert_to_int("1. Govt should provide many fewer services")\n 1\n >>> convert_to_int("2")\n 2\n '
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn(("Couldn't convert: " + s))
... | 6,199,727,918,395,590,000 | Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2 | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | convert_to_int | aryamccarthy/ANES | python | def convert_to_int(s):
'Turn ANES data entry into an integer.\n \n >>> convert_to_int("1. Govt should provide many fewer services")\n 1\n >>> convert_to_int("2")\n 2\n '
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn(("Couldn't convert: " + s))
... |
def negative_to_nan(value):
'Convert negative values to missing.\n \n ANES codes various non-answers as negative numbers.\n For instance, if a question does not pertain to the \n respondent.\n '
return (value if (value >= 0) else np.nan) | -8,710,119,248,488,928,000 | Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | negative_to_nan | aryamccarthy/ANES | python | def negative_to_nan(value):
'Convert negative values to missing.\n \n ANES codes various non-answers as negative numbers.\n For instance, if a question does not pertain to the \n respondent.\n '
return (value if (value >= 0) else np.nan) |
def lib1_cons2_neutral3(x):
'Rearrange questions where 3 is neutral.'
return (((- 3) + x) if (x != 1) else x) | 4,934,245,784,321,438,000 | Rearrange questions where 3 is neutral. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | lib1_cons2_neutral3 | aryamccarthy/ANES | python | def lib1_cons2_neutral3(x):
return (((- 3) + x) if (x != 1) else x) |
def liblow_conshigh(x):
'Reorder questions where the liberal response is low.'
return (- x) | -7,838,750,325,311,162,000 | Reorder questions where the liberal response is low. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | liblow_conshigh | aryamccarthy/ANES | python | def liblow_conshigh(x):
return (- x) |
def dem_edu_special_treatment(x):
'Eliminate negative numbers and {95. Other}'
return (np.nan if ((x == 95) or (x < 0)) else x) | 705,192,464,560,746,200 | Eliminate negative numbers and {95. Other} | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | dem_edu_special_treatment | aryamccarthy/ANES | python | def dem_edu_special_treatment(x):
return (np.nan if ((x == 95) or (x < 0)) else x) |
def merge(first, second):
'\n Recursively merges two dictionaries.\n\n Second dictionary values will take precedence over those from the first one.\n Nested dictionaries are merged too.\n\n :param dict first: The first dictionary\n :param dict second: The second dictionary\n :return: the resulting... | -7,877,707,461,125,681,000 | Recursively merges two dictionaries.
Second dictionary values will take precedence over those from the first one.
Nested dictionaries are merged too.
:param dict first: The first dictionary
:param dict second: The second dictionary
:return: the resulting merged dictionary
:rtype: dict | sanic_restplus/utils.py | merge | oliverpain/sanic-restplus | python | def merge(first, second):
'\n Recursively merges two dictionaries.\n\n Second dictionary values will take precedence over those from the first one.\n Nested dictionaries are merged too.\n\n :param dict first: The first dictionary\n :param dict second: The second dictionary\n :return: the resulting... |
def camel_to_dash(value):
'\n Transform a CamelCase string into a low_dashed one\n\n :param str value: a CamelCase string to transform\n :return: the low_dashed string\n :rtype: str\n '
first_cap = FIRST_CAP_RE.sub('\\1_\\2', value)
return ALL_CAP_RE.sub('\\1_\\2', first_cap).lower() | 258,975,552,515,779,330 | Transform a CamelCase string into a low_dashed one
:param str value: a CamelCase string to transform
:return: the low_dashed string
:rtype: str | sanic_restplus/utils.py | camel_to_dash | oliverpain/sanic-restplus | python | def camel_to_dash(value):
'\n Transform a CamelCase string into a low_dashed one\n\n :param str value: a CamelCase string to transform\n :return: the low_dashed string\n :rtype: str\n '
first_cap = FIRST_CAP_RE.sub('\\1_\\2', value)
return ALL_CAP_RE.sub('\\1_\\2', first_cap).lower() |
def default_id(resource, method):
'Default operation ID generator'
return '{0}_{1}'.format(method, camel_to_dash(resource)) | 2,429,005,458,864,332,300 | Default operation ID generator | sanic_restplus/utils.py | default_id | oliverpain/sanic-restplus | python | def default_id(resource, method):
return '{0}_{1}'.format(method, camel_to_dash(resource)) |
def not_none(data):
'\n Remove all keys where value is None\n\n :param dict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: dict\n '
return dict(((k, v) for (k, v) in data.items() if (v is not None))) | 531,670,141,387,802,300 | Remove all keys where value is None
:param dict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: dict | sanic_restplus/utils.py | not_none | oliverpain/sanic-restplus | python | def not_none(data):
'\n Remove all keys where value is None\n\n :param dict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: dict\n '
return dict(((k, v) for (k, v) in data.items() if (v is not None))) |
def not_none_sorted(data):
'\n Remove all keys where value is None\n\n :param OrderedDict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: OrderedDict\n '
return OrderedDict(((k, v) for (k, v) in sorted(d... | -6,087,313,675,626,550,000 | Remove all keys where value is None
:param OrderedDict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: OrderedDict | sanic_restplus/utils.py | not_none_sorted | oliverpain/sanic-restplus | python | def not_none_sorted(data):
'\n Remove all keys where value is None\n\n :param OrderedDict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: OrderedDict\n '
return OrderedDict(((k, v) for (k, v) in sorted(d... |
def unpack(response, default_code=HTTPStatus.OK):
'\n Unpack a Flask standard response.\n\n Flask response can be:\n - a single value\n - a 2-tuple ``(value, code)``\n - a 3-tuple ``(value, code, headers)``\n\n .. warning::\n\n When using this function, you must ensure that the tuple is not... | 7,547,906,597,733,319,000 | Unpack a Flask standard response.
Flask response can be:
- a single value
- a 2-tuple ``(value, code)``
- a 3-tuple ``(value, code, headers)``
.. warning::
When using this function, you must ensure that the tuple is not the response data.
To do so, prefer returning list instead of tuple for listings.
:param... | sanic_restplus/utils.py | unpack | oliverpain/sanic-restplus | python | def unpack(response, default_code=HTTPStatus.OK):
'\n Unpack a Flask standard response.\n\n Flask response can be:\n - a single value\n - a 2-tuple ``(value, code)``\n - a 3-tuple ``(value, code, headers)``\n\n .. warning::\n\n When using this function, you must ensure that the tuple is not... |
def parse_rule(parameter_string):
"Parse a parameter string into its constituent name, type, and\n pattern\n\n For example:\n `parse_parameter_string('<param_one:[A-z]>')` ->\n ('param_one', str, '[A-z]')\n\n :param parameter_string: String to parse\n :return: tuple containing\n (parame... | -719,285,102,467,803,600 | Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern) | sanic_restplus/utils.py | parse_rule | oliverpain/sanic-restplus | python | def parse_rule(parameter_string):
"Parse a parameter string into its constituent name, type, and\n pattern\n\n For example:\n `parse_parameter_string('<param_one:[A-z]>')` ->\n ('param_one', str, '[A-z]')\n\n :param parameter_string: String to parse\n :return: tuple containing\n (parame... |
def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D instance segmentation PointNet v1 network.\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinat... | -5,711,673,160,838,257,000 | 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_trainin... | models/frustum_pointnets_v1.py | get_instance_seg_v1_net | BPMJG/annotated-F-pointnet | python | def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D instance segmentation PointNet v1 network.\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinat... |
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D Box Estimation PointNet v1 network.\n Input:\n object_point_cloud: TF tensor in shape (B,M,C)\n point clouds in object coordinate\n one_hot_vec: TF tensor in shape (B,3)\n ... | 698,406,314,337,023,000 | 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)... | models/frustum_pointnets_v1.py | get_3d_box_estimation_v1_net | BPMJG/annotated-F-pointnet | python | def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D Box Estimation PointNet v1 network.\n Input:\n object_point_cloud: TF tensor in shape (B,M,C)\n point clouds in object coordinate\n one_hot_vec: TF tensor in shape (B,3)\n ... |
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
' Frustum PointNets model. The model predict 3D object masks and\n amodel bounding boxes for objects in frustum point clouds.\n\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensit... | 4,313,505,580,426,850,300 | Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (... | models/frustum_pointnets_v1.py | get_model | BPMJG/annotated-F-pointnet | python | def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
' Frustum PointNets model. The model predict 3D object masks and\n amodel bounding boxes for objects in frustum point clouds.\n\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensit... |
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
'Disable or enable the profanity filter.\n \n Usage: `profanity enable` / `profanity disable` \n '
self.enabled = mode
(await self.coll.update_one({'_id': 'config'}, {'$set': {'... | -6,509,972,796,830,525,000 | Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable` | profanity-filter/profanity-filter.py | profanity | officialpiyush/modmail-plugins-2 | python | @commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
'Disable or enable the profanity filter.\n \n Usage: `profanity enable` / `profanity disable` \n '
self.enabled = mode
(await self.coll.update_one({'_id': 'config'}, {'$set': {'... |
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[(Member, Role, TextChannel)]):
'Whitelist a user, role or channel from the profanity filter.\n \n Usage: `profanity whitelist @dude`\n '
self = ctx.bot.get_cog('ProfanityFilter')
if (target.id in self.white... | 3,735,439,529,420,118,500 | Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude` | profanity-filter/profanity-filter.py | whitelist | officialpiyush/modmail-plugins-2 | python | @commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[(Member, Role, TextChannel)]):
'Whitelist a user, role or channel from the profanity filter.\n \n Usage: `profanity whitelist @dude`\n '
self = ctx.bot.get_cog('ProfanityFilter')
if (target.id in self.white... |
@classmethod
def __init__(self, *args, **kwargs):
' original request string '
self.req_obj = kwargs.pop('req_obj')
self.request = self.req_obj.get('request', '')
self.req_from = self.req_obj.get('from', '')
self.response = '' | -7,662,516,164,373,698,000 | original request string | core/brain/remind/me/every/reaction.py | __init__ | vsilent/smarty-bot | python | @classmethod
def __init__(self, *args, **kwargs):
' '
self.req_obj = kwargs.pop('req_obj')
self.request = self.req_obj.get('request', )
self.req_from = self.req_obj.get('from', )
self.response = |
@classmethod
def run(self):
'default method'
sess = Session()
sender = self.req_obj.get('sender', '')
if sender:
email = sender.split('/')[0]
profile = sess.query(Profile).filter((Profile.email == email)).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN', 'monday': 'MON', 'tues... | -439,311,795,631,872,600 | default method | core/brain/remind/me/every/reaction.py | run | vsilent/smarty-bot | python | @classmethod
def run(self):
sess = Session()
sender = self.req_obj.get('sender', )
if sender:
email = sender.split('/')[0]
profile = sess.query(Profile).filter((Profile.email == email)).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN', 'monday': 'MON', 'tuesday': 'TUE', 'wedn... |
async def status_by_coordinates(self, latitude: float, longitude: float) -> Dict[(str, Any)]:
"Get symptom data for the location nearest to the user's lat/lon."
return (await self.nearest_by_coordinates(latitude, longitude)) | -322,262,233,440,783,360 | Get symptom data for the location nearest to the user's lat/lon. | pyflunearyou/user.py | status_by_coordinates | bachya/pyflunearyou | python | async def status_by_coordinates(self, latitude: float, longitude: float) -> Dict[(str, Any)]:
return (await self.nearest_by_coordinates(latitude, longitude)) |
async def status_by_zip(self, zip_code: str) -> Dict[(str, Any)]:
'Get symptom data for the provided ZIP code.'
try:
location = next((d for d in (await self.user_reports()) if (d['zip'] == zip_code)))
except StopIteration:
return {}
return (await self.status_by_coordinates(float(location... | -159,002,280,710,509,020 | Get symptom data for the provided ZIP code. | pyflunearyou/user.py | status_by_zip | bachya/pyflunearyou | python | async def status_by_zip(self, zip_code: str) -> Dict[(str, Any)]:
try:
location = next((d for d in (await self.user_reports()) if (d['zip'] == zip_code)))
except StopIteration:
return {}
return (await self.status_by_coordinates(float(location['latitude']), float(location['longitude']))) |
def query(self, document_number):
'Query the "attachment page" endpoint and set the results to self.response.\n\n :param document_number: The internal PACER document ID for the item.\n :return: a request response object\n '
assert (self.session is not None), 'session attribute of DocketRepo... | 54,769,705,627,306,050 | Query the "attachment page" endpoint and set the results to self.response.
:param document_number: The internal PACER document ID for the item.
:return: a request response object | juriscraper/pacer/attachment_page.py | query | johnhawkinson/juriscraper | python | def query(self, document_number):
'Query the "attachment page" endpoint and set the results to self.response.\n\n :param document_number: The internal PACER document ID for the item.\n :return: a request response object\n '
assert (self.session is not None), 'session attribute of DocketRepo... |
@property
def data(self):
"Get data back from the query for the matching document entry.\n\n :return: If lookup fails, an empty dict. Else, a dict containing the\n following fields:\n - document_number: The document number we're working with.\n - page_count: The number of pages o... | 8,756,024,237,014,436,000 | Get data back from the query for the matching document entry.
:return: If lookup fails, an empty dict. Else, a dict containing the
following fields:
- document_number: The document number we're working with.
- page_count: The number of pages of the item
- pacer_doc_id: The doc ID for the main document.
... | juriscraper/pacer/attachment_page.py | data | johnhawkinson/juriscraper | python | @property
def data(self):
"Get data back from the query for the matching document entry.\n\n :return: If lookup fails, an empty dict. Else, a dict containing the\n following fields:\n - document_number: The document number we're working with.\n - page_count: The number of pages o... |
def _get_document_number(self, row):
"Return the document number for an item.\n\n In district court attachment pages, this is easy to extract with an\n XPath. In bankruptcy cases, it's simply not there.\n "
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.... | 6,541,885,969,443,189,000 | Return the document number for an item.
In district court attachment pages, this is easy to extract with an
XPath. In bankruptcy cases, it's simply not there. | juriscraper/pacer/attachment_page.py | _get_document_number | johnhawkinson/juriscraper | python | def _get_document_number(self, row):
"Return the document number for an item.\n\n In district court attachment pages, this is easy to extract with an\n XPath. In bankruptcy cases, it's simply not there.\n "
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.... |
def _get_attachment_number(self, row):
'Return the attachment number for an item.\n\n In district courts, this can be easily extracted. In bankruptcy courts,\n you must extract it, then subtract 1 from the value since these are\n tallied and include the main document.\n '
number = in... | -6,560,878,718,265,846,000 | Return the attachment number for an item.
In district courts, this can be easily extracted. In bankruptcy courts,
you must extract it, then subtract 1 from the value since these are
tallied and include the main document. | juriscraper/pacer/attachment_page.py | _get_attachment_number | johnhawkinson/juriscraper | python | def _get_attachment_number(self, row):
'Return the attachment number for an item.\n\n In district courts, this can be easily extracted. In bankruptcy courts,\n you must extract it, then subtract 1 from the value since these are\n tallied and include the main document.\n '
number = in... |
def _get_description_from_tr(self, row):
'Get the description from the row'
if (not self.is_bankruptcy):
index = 2
else:
index = 3
description_text_nodes = row.xpath(('./td[%s]//text()' % index))
if (len(description_text_nodes) == 0):
return u''
else:
description ... | 7,887,229,052,828,198,000 | Get the description from the row | juriscraper/pacer/attachment_page.py | _get_description_from_tr | johnhawkinson/juriscraper | python | def _get_description_from_tr(self, row):
if (not self.is_bankruptcy):
index = 2
else:
index = 3
description_text_nodes = row.xpath(('./td[%s]//text()' % index))
if (len(description_text_nodes) == 0):
return u
else:
description = description_text_nodes[0].strip()
... |
@staticmethod
def _get_page_count_from_tr(tr):
'Take a row from the attachment table and return the page count as an\n int extracted from the cell specified by index.\n '
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if (len(pg_cnt_str_nodes) == 0):
return None
el... | 6,936,813,819,514,453,000 | Take a row from the attachment table and return the page count as an
int extracted from the cell specified by index. | juriscraper/pacer/attachment_page.py | _get_page_count_from_tr | johnhawkinson/juriscraper | python | @staticmethod
def _get_page_count_from_tr(tr):
'Take a row from the attachment table and return the page count as an\n int extracted from the cell specified by index.\n '
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if (len(pg_cnt_str_nodes) == 0):
return None
el... |
@staticmethod
def _get_pacer_doc_id(row):
'Take in a row from the attachment table and return the pacer_doc_id\n for the item in that row. Return None if the ID cannot be found.\n '
try:
url = row.xpath(u'.//a')[0]
except IndexError:
return None
else:
doc1_url = url... | -902,098,361,262,620,000 | Take in a row from the attachment table and return the pacer_doc_id
for the item in that row. Return None if the ID cannot be found. | juriscraper/pacer/attachment_page.py | _get_pacer_doc_id | johnhawkinson/juriscraper | python | @staticmethod
def _get_pacer_doc_id(row):
'Take in a row from the attachment table and return the pacer_doc_id\n for the item in that row. Return None if the ID cannot be found.\n '
try:
url = row.xpath(u'.//a')[0]
except IndexError:
return None
else:
doc1_url = url... |
def _get_pacer_case_id(self):
'Get the pacer_case_id value by inspecting the HTML\n\n :returns str: The pacer_case_id value\n '
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
e... | -7,252,420,284,333,202,000 | Get the pacer_case_id value by inspecting the HTML
:returns str: The pacer_case_id value | juriscraper/pacer/attachment_page.py | _get_pacer_case_id | johnhawkinson/juriscraper | python | def _get_pacer_case_id(self):
'Get the pacer_case_id value by inspecting the HTML\n\n :returns str: The pacer_case_id value\n '
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
e... |
def import_data_stage02_physiology_pairWiseTest_add(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_data_stage02_physiology_pairWiseTest(data.data)
data.clear_data() | -3,367,785,456,156,156,000 | table adds | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | import_data_stage02_physiology_pairWiseTest_add | dmccloskey/SBaaS_COBRA | python | def import_data_stage02_physiology_pairWiseTest_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_data_stage02_physiology_pairWiseTest(data.data)
data.clear_data() |
def export_dataStage02PhysiologyPairWiseTest_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = se... | -4,705,089,115,005,355,000 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTest_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTest_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = se... |
def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
... | 4,520,182,110,218,128,400 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTestMetabolites_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
... |
def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
d... | 8,564,029,590,552,739,000 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTestSubsystems_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
d... |
def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"\n Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations\n \n Arguments\n ---------\n gridc : object\n Grid object for cell centered variables\n\n gridx : object\n Grid... | -1,970,600,925,795,049,200 | Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations
Arguments
---------
gridc : object
Grid object for cell centered variables
gridx : object
Grid object for x-face variables
gridy : object
Grid object for y-face variables
scalars: object
Scalars ob... | flowx/ins/euler.py | advance_euler | AbhilashReddyM/flowX | python | def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"\n Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations\n \n Arguments\n ---------\n gridc : object\n Grid object for cell centered variables\n\n gridx : object\n Grid... |
def stackplot(axes, x, *args, **kwargs):
"Draws a stacked area plot.\n\n *x* : 1d array of dimension N\n\n *y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension\n 1xN. The data is assumed to be unstacked. Each of the following\n calls is legal::\n\n stackplot(... | -4,823,916,398,877,042,000 | Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) ... | lib/matplotlib/stackplot.py | stackplot | Owen-Gillespie/BeachHacks-ShowerSuite | python | def stackplot(axes, x, *args, **kwargs):
"Draws a stacked area plot.\n\n *x* : 1d array of dimension N\n\n *y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension\n 1xN. The data is assumed to be unstacked. Each of the following\n calls is legal::\n\n stackplot(... |
@staticmethod
def create(gltf, mesh_idx, skin_idx):
'Mesh creation.'
pymesh = gltf.data.meshes[mesh_idx]
bme = bmesh.new()
materials = []
for prim in pymesh.primitives:
if (prim.material is None):
material_idx = None
else:
pymaterial = gltf.data.materials[prim... | 6,512,850,218,290,860,000 | Mesh creation. | addons/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py | create | MrTheRich/glTF-Blender-IO | python | @staticmethod
def create(gltf, mesh_idx, skin_idx):
pymesh = gltf.data.meshes[mesh_idx]
bme = bmesh.new()
materials = []
for prim in pymesh.primitives:
if (prim.material is None):
material_idx = None
else:
pymaterial = gltf.data.materials[prim.material]
... |
def testV1beta1CustomResourceSubresources(self):
'Test V1beta1CustomResourceSubresources'
pass | -7,919,106,972,520,173,000 | Test V1beta1CustomResourceSubresources | test/test_v1beta1_custom_resource_subresources.py | testV1beta1CustomResourceSubresources | olitheolix/aiokubernetes | python | def testV1beta1CustomResourceSubresources(self):
pass |
def _write_locks(self):
'\n Write racefile and ADCC Startup Report\n\n '
dotadcc = get_adcc_dir()
vals = {'http_port': self.http_port, 'pid': os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, 'w') as ports:
ports.write(repr(vals))
sr = os.path.join(... | -4,548,844,850,677,403,600 | Write racefile and ADCC Startup Report | recipe_system/adcc/adcclib.py | _write_locks | Luke-Ludwig/DRAGONS | python | def _write_locks(self):
'\n \n\n '
dotadcc = get_adcc_dir()
vals = {'http_port': self.http_port, 'pid': os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, 'w') as ports:
ports.write(repr(vals))
sr = os.path.join(dotadcc, self.sreport)
write_adcc_... |
def _extend_control_events_default(control_events, events, state):
'Default function for extending control event sequence.\n\n This function extends a control event sequence by duplicating the final event\n in the sequence. The control event sequence will be extended to have length\n one longer than the generat... | 8,439,013,635,635,939,000 | Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
... | magenta/models/shared/events_rnn_model.py | _extend_control_events_default | Surya130499/magenta | python | def _extend_control_events_default(control_events, events, state):
'Default function for extending control event sequence.\n\n This function extends a control event sequence by duplicating the final event\n in the sequence. The control event sequence will be extended to have length\n one longer than the generat... |
def __init__(self, config):
'Initialize the EventSequenceRnnModel.\n\n Args:\n config: An EventSequenceRnnConfig containing the encoder/decoder and\n HParams to use.\n '
super(EventSequenceRnnModel, self).__init__()
self._config = config | 3,753,918,035,922,203,000 | Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use. | magenta/models/shared/events_rnn_model.py | __init__ | Surya130499/magenta | python | def __init__(self, config):
'Initialize the EventSequenceRnnModel.\n\n Args:\n config: An EventSequenceRnnConfig containing the encoder/decoder and\n HParams to use.\n '
super(EventSequenceRnnModel, self).__init__()
self._config = config |
def _batch_size(self):
'Extracts the batch size from the graph.'
return self._session.graph.get_collection('inputs')[0].shape[0].value | -3,716,962,435,938,537,000 | Extracts the batch size from the graph. | magenta/models/shared/events_rnn_model.py | _batch_size | Surya130499/magenta | python | def _batch_size(self):
return self._session.graph.get_collection('inputs')[0].shape[0].value |
def _generate_step_for_batch(self, event_sequences, inputs, initial_state, temperature):
'Extends a batch of event sequences by a single step each.\n\n This method modifies the event sequences in place.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like... | 3,157,986,383,050,230,300 | Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by t... | magenta/models/shared/events_rnn_model.py | _generate_step_for_batch | Surya130499/magenta | python | def _generate_step_for_batch(self, event_sequences, inputs, initial_state, temperature):
'Extends a batch of event sequences by a single step each.\n\n This method modifies the event sequences in place.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like... |
def _generate_step(self, event_sequences, model_states, logliks, temperature, extend_control_events_callback=None, modify_events_callback=None):
'Extends a list of event sequences by a single step each.\n\n This method modifies the event sequences in place. It also returns the\n modified event sequences and u... | 495,595,974,226,170,000 | Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A l... | magenta/models/shared/events_rnn_model.py | _generate_step | Surya130499/magenta | python | def _generate_step(self, event_sequences, model_states, logliks, temperature, extend_control_events_callback=None, modify_events_callback=None):
'Extends a list of event sequences by a single step each.\n\n This method modifies the event sequences in place. It also returns the\n modified event sequences and u... |
def _generate_events(self, num_steps, primer_events, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, control_events=None, control_state=None, extend_control_events_callback=_extend_control_events_default, modify_events_callback=None):
'Generate an event sequence from a primer sequence.\n\n ... | -9,193,433,878,735,797,000 | Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
befo... | magenta/models/shared/events_rnn_model.py | _generate_events | Surya130499/magenta | python | def _generate_events(self, num_steps, primer_events, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, control_events=None, control_state=None, extend_control_events_callback=_extend_control_events_default, modify_events_callback=None):
'Generate an event sequence from a primer sequence.\n\n ... |
def _evaluate_batch_log_likelihood(self, event_sequences, inputs, initial_state):
'Evaluates the log likelihood of a batch of event sequences.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n... | 4,642,773,188,067,565,000 | Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self... | magenta/models/shared/events_rnn_model.py | _evaluate_batch_log_likelihood | Surya130499/magenta | python | def _evaluate_batch_log_likelihood(self, event_sequences, inputs, initial_state):
'Evaluates the log likelihood of a batch of event sequences.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n... |
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
'Evaluate log likelihood for a list of event sequences of the same length.\n\n Args:\n event_sequences: A list of event sequences for which to evaluate the log\n likelihood.\n control_events: A sequence of control events... | 3,431,351,879,014,282,000 | Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
... | magenta/models/shared/events_rnn_model.py | _evaluate_log_likelihood | Surya130499/magenta | python | def _evaluate_log_likelihood(self, event_sequences, control_events=None):
'Evaluate log likelihood for a list of event sequences of the same length.\n\n Args:\n event_sequences: A list of event sequences for which to evaluate the log\n likelihood.\n control_events: A sequence of control events... |
def move_media(origin_server, file_id, src_paths, dest_paths):
'Move the given file, and any thumbnails, to the dest repo\n\n Args:\n origin_server (str):\n file_id (str):\n src_paths (MediaFilePaths):\n dest_paths (MediaFilePaths):\n '
logger.info('%s/%s', origin_server, file_... | 7,045,278,995,300,920,000 | Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths): | scripts/move_remote_media_to_new_store.py | move_media | AP-whitehat/synapse | python | def move_media(origin_server, file_id, src_paths, dest_paths):
'Move the given file, and any thumbnails, to the dest repo\n\n Args:\n origin_server (str):\n file_id (str):\n src_paths (MediaFilePaths):\n dest_paths (MediaFilePaths):\n '
logger.info('%s/%s', origin_server, file_... |
def __init__(self, **kwargs):
'\n Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param items:\n The value to assign to the i... | -1,884,853,907,500,217,300 | Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.
:type items... | src/oci/devops/models/compute_instance_group_selector_collection.py | __init__ | CentroidChef/oci-python-sdk | python | def __init__(self, **kwargs):
'\n Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param items:\n The value to assign to the i... |
@property
def items(self):
'\n **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :return: The items of this ComputeInstanceGroupSelectorC... | 3,002,309,167,187,873,300 | **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:return: The items of this ComputeInstanceGroupSelectorCollection.
:rtype: list[oci.devops.models.ComputeInstanceGroup... | src/oci/devops/models/compute_instance_group_selector_collection.py | items | CentroidChef/oci-python-sdk | python | @property
def items(self):
'\n **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :return: The items of this ComputeInstanceGroupSelectorC... |
@items.setter
def items(self, items):
'\n Sets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :param items: The items of this ComputeInstanceGroupSelector... | 3,188,827,730,419,083,000 | Sets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:param items: The items of this ComputeInstanceGroupSelectorCollection.
:type: list[oci.devops.models.ComputeInstanceGroupSelector] | src/oci/devops/models/compute_instance_group_selector_collection.py | items | CentroidChef/oci-python-sdk | python | @items.setter
def items(self, items):
'\n Sets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :param items: The items of this ComputeInstanceGroupSelector... |
def updateAdresPosistie(cur, X, Y, herkomst, ADRESID):
'herkomst: 2= perceel, 3= gebouw'
cur.execute("UPDATE ADRESPOSITIES\n SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),\n HERKOMSTADRESPOSITIE=? WHERE ID=? ;", (X, Y, herkomst,... | -7,462,245,121,174,849,000 | herkomst: 2= perceel, 3= gebouw | update_terrein_adrespositie.py | updateAdresPosistie | warrieka/xgrab2db | python | def updateAdresPosistie(cur, X, Y, herkomst, ADRESID):
cur.execute("UPDATE ADRESPOSITIES\n SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),\n HERKOMSTADRESPOSITIE=? WHERE ID=? ;", (X, Y, herkomst, ADRESID)) |
def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"\n much faster computation\n min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted\n \n You can specify either of two modes:\n \n 1. 'binary': Returns 1 at posit... | 2,108,503,413,948,878,800 | much faster computation
min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted
You can specify either of two modes:
1. 'binary': Returns 1 at positions where distance is less than or equal to thresh
2. 'distances': Returns inter-residue distance wherever this distances... | dbfold/analyze_structures.py | compute_contacts_matrix | amirbitran/dbfold | python | def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"\n much faster computation\n min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted\n \n You can specify either of two modes:\n \n 1. 'binary': Returns 1 at posit... |
def compute_RG(snapshot, atom='CA'):
'\n Radius of gyration...\n '
(coords, resis) = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = (coords - R_cm)
mag_R = np.sum((dR * dR), axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG | -5,014,992,422,287,519,000 | Radius of gyration... | dbfold/analyze_structures.py | compute_RG | amirbitran/dbfold | python | def compute_RG(snapshot, atom='CA'):
'\n \n '
(coords, resis) = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = (coords - R_cm)
mag_R = np.sum((dR * dR), axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG |
def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh, min_seq_separation=8, substructures=[], colours=[]):
"\n Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol\n \n Ex. Create_sub... | -9,167,810,917,161,769,000 | Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol
Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)
You can also pre-enter the substructures as an optional argument
Otherwise, it wil... | dbfold/analyze_structures.py | create_substructure_PML | amirbitran/dbfold | python | def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh, min_seq_separation=8, substructures=[], colours=[]):
"\n Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol\n \n Ex. Create_sub... |
def find_native_contacts(native_file, thresh, min_seq_separation, mode='binary'):
'\n finds all native contacts from native PDB file\n '
(native_coords, resis) = read_PDB(native_file, atom='CA')
native_contacts = compute_contacts_matrix(native_coords, thresh=thresh, min_seq_separation=min_seq_separati... | -7,727,523,894,113,354,000 | finds all native contacts from native PDB file | dbfold/analyze_structures.py | find_native_contacts | amirbitran/dbfold | python | def find_native_contacts(native_file, thresh, min_seq_separation, mode='binary'):
'\n \n '
(native_coords, resis) = read_PDB(native_file, atom='CA')
native_contacts = compute_contacts_matrix(native_coords, thresh=thresh, min_seq_separation=min_seq_separation, mode=mode)
return native_contacts |
def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, atom='CA', labelsize=30, fontsize=30, max_res=None, plot=True, ax=None, native_contacts=[], verbose=False):
'\n Identify substructures within native file contact map\n Using the following strategy... | 8,276,949,333,124,320,000 | Identify substructures within native file contact map
Using the following strategy
We produce a contact map which is a bunch of dots
Contacts correspond to pairs of residues that are less than d_cutoff apart
6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact... | dbfold/analyze_structures.py | identify_native_substructures | amirbitran/dbfold | python | def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, atom='CA', labelsize=30, fontsize=30, max_res=None, plot=True, ax=None, native_contacts=[], verbose=False):
'\n Identify substructures within native file contact map\n Using the following strategy... |
def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot=True, mode='binary'):
'\n Input PDB file, plots contacts matrix\n \n '
(coords, resis) = read_PDB(PDB_file, 'CA')
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'bina... | -1,211,378,378,583,712,500 | Input PDB file, plots contacts matrix | dbfold/analyze_structures.py | PDB_contacts_matrix | amirbitran/dbfold | python | def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot=True, mode='binary'):
'\n \n \n '
(coords, resis) = read_PDB(PDB_file, 'CA')
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'binary'):
contacts = np.zeros(np.... |
def read_PDB(file, atom):
"\n extracts coordinates for some side chain atom in some PDB file\n For instance, atom will have value 'CA' if you care about the alpha carbons\n \n TODO: Fix this so it can deal with chain labels\n Right now if the PDB has a chain label in the fifth column, this will give ... | -4,223,119,219,618,408,000 | extracts coordinates for some side chain atom in some PDB file
For instance, atom will have value 'CA' if you care about the alpha carbons
TODO: Fix this so it can deal with chain labels
Right now if the PDB has a chain label in the fifth column, this will give nonsense results | dbfold/analyze_structures.py | read_PDB | amirbitran/dbfold | python | def read_PDB(file, atom):
"\n extracts coordinates for some side chain atom in some PDB file\n For instance, atom will have value 'CA' if you care about the alpha carbons\n \n TODO: Fix this so it can deal with chain labels\n Right now if the PDB has a chain label in the fifth column, this will give ... |
def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8):
'\n Assigns a set of scores for a snapshot\n the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto\n If the score is close to the charac... | 1,263,311,548,412,532,700 | Assigns a set of scores for a snapshot
the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto
If the score is close to the characteristic contact distnace, then the substructure should be mostly formed | dbfold/analyze_structures.py | score_snapshot | amirbitran/dbfold | python | def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8):
'\n Assigns a set of scores for a snapshot\n the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto\n If the score is close to the charac... |
def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return=False, cbar=True, filter_natives=True, filter_distance=2, vmax=1, alpha=1, custom_filter=None, ax=None, labelsize=40):
"\n Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shadin... | 7,431,308,676,125,319,000 | Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih
that contact is observed
d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be
the same as whatever was used to identi... | dbfold/analyze_structures.py | visualize_nonnatives | amirbitran/dbfold | python | def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return=False, cbar=True, filter_natives=True, filter_distance=2, vmax=1, alpha=1, custom_filter=None, ax=None, labelsize=40):
"\n Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shadin... |
def visualize_substructures(native_contacts, substructures, max_res=None, ax=None, labelsize=30, fontsize=30):
'\n Visualizes substructures as follows\n Everything that is a native contact but not part of any substructure will have value -1 on shown image\n (Update 10/1/18, actually will only show contacts... | -7,961,988,868,218,634,000 | Visualizes substructures as follows
Everything that is a native contact but not part of any substructure will have value -1 on shown image
(Update 10/1/18, actually will only show contacts that are part of substructures)
Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will ha... | dbfold/analyze_structures.py | visualize_substructures | amirbitran/dbfold | python | def visualize_substructures(native_contacts, substructures, max_res=None, ax=None, labelsize=30, fontsize=30):
'\n Visualizes substructures as follows\n Everything that is a native contact but not part of any substructure will have value -1 on shown image\n (Update 10/1/18, actually will only show contacts... |
@property
def data_field(self) -> str:
'\n Field of the response containing data.\n By default the value self.name will be used if this property is empty or None\n '
return None | -4,658,865,884,078,469,000 | Field of the response containing data.
By default the value self.name will be used if this property is empty or None | airbyte-integrations/connectors/source-cart/source_cart/streams.py | data_field | 52-entertainment/airbyte | python | @property
def data_field(self) -> str:
'\n Field of the response containing data.\n By default the value self.name will be used if this property is empty or None\n '
return None |
def backoff_time(self, response: requests.Response) -> Optional[float]:
'\n We dont need to check the response.status_code == 429 since this header exists only in this case.\n '
retry_after = response.headers.get('Retry-After')
if retry_after:
return float(retry_after) | -4,580,624,153,590,376,400 | We dont need to check the response.status_code == 429 since this header exists only in this case. | airbyte-integrations/connectors/source-cart/source_cart/streams.py | backoff_time | 52-entertainment/airbyte | python | def backoff_time(self, response: requests.Response) -> Optional[float]:
'\n \n '
retry_after = response.headers.get('Retry-After')
if retry_after:
return float(retry_after) |
def request_params(self, stream_state: Mapping[(str, Any)], **kwargs) -> MutableMapping[(str, Any)]:
'\n Generates a query for incremental logic\n\n Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md\n '
params = super().request_params(stream_state=stream_state, **kwargs)
... | -5,349,757,518,194,969,000 | Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md | airbyte-integrations/connectors/source-cart/source_cart/streams.py | request_params | 52-entertainment/airbyte | python | def request_params(self, stream_state: Mapping[(str, Any)], **kwargs) -> MutableMapping[(str, Any)]:
'\n Generates a query for incremental logic\n\n Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md\n '
params = super().request_params(stream_state=stream_state, **kwargs)
... |
def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n... | 3,993,137,306,451,256,300 | Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object. | airbyte-integrations/connectors/source-cart/source_cart/streams.py | get_updated_state | 52-entertainment/airbyte | python | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n... |
def printchapter28note():
'\n print chapter28 note.\n '
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note() | 6,848,029,913,907,678,000 | print chapter28 note. | src/chapter28/chapter28note.py | printchapter28note | HideLakitu/IntroductionToAlgorithm.Python | python | def printchapter28note():
'\n \n '
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note() |
def note(self):
'\n Summary\n ====\n Print chapter28.1 note\n\n Example\n ====\n ```python\n Chapter28_1().note()\n ```\n '
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.... | -1,073,519,064,665,087,400 | Summary
====
Print chapter28.1 note
Example
====
```python
Chapter28_1().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.1 note\n\n Example\n ====\n ```python\n Chapter28_1().note()\n ```\n '
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.... |
def note(self):
'\n Summary\n ====\n Print chapter28.2 note\n\n Example\n ====\n ```python\n Chapter28_2().note()\n ```\n '
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n... | -924,092,662,678,552,300 | Summary
====
Print chapter28.2 note
Example
====
```python
Chapter28_2().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.2 note\n\n Example\n ====\n ```python\n Chapter28_2().note()\n ```\n '
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n... |
def note(self):
'\n Summary\n ====\n Print chapter28.3 note\n\n Example\n ====\n ```python\n Chapter28_3().note()\n ```\n '
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程... | -2,181,509,471,979,686,400 | Summary
====
Print chapter28.3 note
Example
====
```python
Chapter28_3().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.3 note\n\n Example\n ====\n ```python\n Chapter28_3().note()\n ```\n '
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程... |
def note(self):
'\n Summary\n ====\n Print chapter28.4 note\n\n Example\n ====\n ```python\n Chapter28_4().note()\n ```\n '
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方... | -5,879,448,951,198,253,000 | Summary
====
Print chapter28.4 note
Example
====
```python
Chapter28_4().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.4 note\n\n Example\n ====\n ```python\n Chapter28_4().note()\n ```\n '
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方... |
def note(self):
'\n Summary\n ====\n Print chapter28.5 note\n\n Example\n ====\n ```python\n Chapter28_5().note()\n ```\n '
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无... | 1,030,241,814,436,185,900 | Summary
====
Print chapter28.5 note
Example
====
```python
Chapter28_5().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.5 note\n\n Example\n ====\n ```python\n Chapter28_5().note()\n ```\n '
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无... |
def readin():
'Reading from stdin and displaying menu'
global positionrequest, ptz
selection = sys.stdin.readline().strip('\n')
lov = [x for x in selection.split(' ') if (x != '')]
if lov:
if (lov[0].lower() in ['u', 'up']):
move_up(ptz, positionrequest)
elif (lov[0].lowe... | -3,910,166,043,833,411,000 | Reading from stdin and displaying menu | examples/AbsoluteMove.py | readin | intflow/python-onvif-zeep | python | def readin():
global positionrequest, ptz
selection = sys.stdin.readline().strip('\n')
lov = [x for x in selection.split(' ') if (x != )]
if lov:
if (lov[0].lower() in ['u', 'up']):
move_up(ptz, positionrequest)
elif (lov[0].lower() in ['d', 'do', 'dow', 'down']):
... |
def create_inactive_user(self, form):
'\n Create the inactive user account and send an email containing\n activation instructions.\n\n '
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user | 3,215,324,053,063,257,000 | Create the inactive user account and send an email containing
activation instructions. | polyaxon/api/users/views.py | create_inactive_user | AntoineToubhans/polyaxon | python | def create_inactive_user(self, form):
'\n Create the inactive user account and send an email containing\n activation instructions.\n\n '
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user |
def get_activation_key(self, user):
'\n Generate the activation key which will be emailed to the user.\n\n '
return signing.dumps(obj=getattr(user, user.USERNAME_FIELD), salt=self.key_salt) | 6,403,821,274,535,462,000 | Generate the activation key which will be emailed to the user. | polyaxon/api/users/views.py | get_activation_key | AntoineToubhans/polyaxon | python | def get_activation_key(self, user):
'\n \n\n '
return signing.dumps(obj=getattr(user, user.USERNAME_FIELD), salt=self.key_salt) |
def get_email_context(self, activation_key):
'\n Build the template context used for the activation email.\n\n '
return {'activation_key': activation_key, 'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'), 'site': get_current_site(self.request)} | -3,627,736,984,849,235,500 | Build the template context used for the activation email. | polyaxon/api/users/views.py | get_email_context | AntoineToubhans/polyaxon | python | def get_email_context(self, activation_key):
'\n \n\n '
return {'activation_key': activation_key, 'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'), 'site': get_current_site(self.request)} |
def send_activation_email(self, user):
'\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n '
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({'user': user})
subject ... | -7,646,028,755,603,774,000 | Send the activation email. The activation key is the username,
signed using TimestampSigner. | polyaxon/api/users/views.py | send_activation_email | AntoineToubhans/polyaxon | python | def send_activation_email(self, user):
'\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n '
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({'user': user})
subject ... |
def create_inactive_user(self, form):
'Create the inactive user account and wait for validation from superuser'
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user | 5,030,324,730,589,924,000 | Create the inactive user account and wait for validation from superuser | polyaxon/api/users/views.py | create_inactive_user | AntoineToubhans/polyaxon | python | def create_inactive_user(self, form):
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.