body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def create_corp_faucet_config(self): 'Create Faucet config for corp network' setup_vlan = SETUP_VLAN switch = 'corp' dps = {} interfaces = self._build_dp_interfaces(CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1, native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT) d...
-8,864,064,651,153,820,000
Create Faucet config for corp network
testing/python_lib/build_config.py
create_corp_faucet_config
henry54809/forch
python
def create_corp_faucet_config(self): setup_vlan = SETUP_VLAN switch = 'corp' dps = {} interfaces = self._build_dp_interfaces(CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1, native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT) dps[switch] = self._build_datapath_confi...
def scan(): 'Caller function that tries to scans the file and write the report.' spec_path = settings['spec_path'] try: api_spec = load_config_file(spec_path) except FileNotFoundError as e: error_message = f'Could not find API spec file: {spec_path}. {str(e)}' logger.error(error_...
-2,529,913,209,857,200,000
Caller function that tries to scans the file and write the report.
scanapi/scan.py
scan
hebertjulio/scanapi
python
def scan(): spec_path = settings['spec_path'] try: api_spec = load_config_file(spec_path) except FileNotFoundError as e: error_message = f'Could not find API spec file: {spec_path}. {str(e)}' logger.error(error_message) raise SystemExit(ExitCode.USAGE_ERROR) except E...
def write_report(results): 'Constructs a Reporter object and calls the write method of Reporter to\n push the results to a file.\n ' reporter = Reporter(settings['output_path'], settings['template']) reporter.write(results)
-3,180,117,976,623,210,500
Constructs a Reporter object and calls the write method of Reporter to push the results to a file.
scanapi/scan.py
write_report
hebertjulio/scanapi
python
def write_report(results): 'Constructs a Reporter object and calls the write method of Reporter to\n push the results to a file.\n ' reporter = Reporter(settings['output_path'], settings['template']) reporter.write(results)
def _ConvertBoxToCOCOFormat(box): 'Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.\n\n This is a utility function for converting from our internal\n [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API\n i.e., [xmin, ymin, width, height].\n\n Args:\n box: a [ymin, xmin...
-6,747,070,920,789,550,000
Converts a box in [ymin, xmin, ymax, xmax] format to COCO format. This is a utility function for converting from our internal [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API i.e., [xmin, ymin, width, height]. Args: box: a [ymin, xmin, ymax, xmax] numpy array Returns: a list of floats r...
research/object_detection/metrics/coco_tools.py
_ConvertBoxToCOCOFormat
1911590204/models
python
def _ConvertBoxToCOCOFormat(box): 'Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.\n\n This is a utility function for converting from our internal\n [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API\n i.e., [xmin, ymin, width, height].\n\n Args:\n box: a [ymin, xmin...
def _RleCompress(masks): 'Compresses mask using Run-length encoding provided by pycocotools.\n\n Args:\n masks: uint8 numpy array of shape [mask_height, mask_width] with values in\n {0, 1}.\n\n Returns:\n A pycocotools Run-length encoding of the mask.\n ' rle = mask.encode(np.asfortranarray(masks)) ...
-4,503,842,151,480,810,000
Compresses mask using Run-length encoding provided by pycocotools. Args: masks: uint8 numpy array of shape [mask_height, mask_width] with values in {0, 1}. Returns: A pycocotools Run-length encoding of the mask.
research/object_detection/metrics/coco_tools.py
_RleCompress
1911590204/models
python
def _RleCompress(masks): 'Compresses mask using Run-length encoding provided by pycocotools.\n\n Args:\n masks: uint8 numpy array of shape [mask_height, mask_width] with values in\n {0, 1}.\n\n Returns:\n A pycocotools Run-length encoding of the mask.\n ' rle = mask.encode(np.asfortranarray(masks)) ...
def ExportSingleImageGroundtruthToCoco(image_id, next_annotation_id, category_id_set, groundtruth_boxes, groundtruth_classes, groundtruth_keypoints=None, groundtruth_keypoint_visibilities=None, groundtruth_masks=None, groundtruth_is_crowd=None, groundtruth_area=None): 'Export groundtruth of a single image to COCO f...
-6,087,324,160,309,731,000
Export groundtruth of a single image to COCO format. This function converts groundtruth detection annotations represented as numpy arrays to dictionaries that can be ingested by the COCO evaluation API. Note that the image_ids provided here must match the ones given to ExportSingleImageDetectionsToCoco. We assume that...
research/object_detection/metrics/coco_tools.py
ExportSingleImageGroundtruthToCoco
1911590204/models
python
def ExportSingleImageGroundtruthToCoco(image_id, next_annotation_id, category_id_set, groundtruth_boxes, groundtruth_classes, groundtruth_keypoints=None, groundtruth_keypoint_visibilities=None, groundtruth_masks=None, groundtruth_is_crowd=None, groundtruth_area=None): 'Export groundtruth of a single image to COCO f...
def ExportGroundtruthToCOCO(image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=None): 'Export groundtruth detection annotations in numpy arrays to COCO API.\n\n This function converts a set of groundtruth detection annotations represented\n as numpy arrays to dictionaries that can be inges...
-3,856,544,612,097,964,000
Export groundtruth detection annotations in numpy arrays to COCO API. This function converts a set of groundtruth detection annotations represented as numpy arrays to dictionaries that can be ingested by the COCO API. Inputs to this function are three lists: image ids for each groundtruth image, groundtruth boxes for ...
research/object_detection/metrics/coco_tools.py
ExportGroundtruthToCOCO
1911590204/models
python
def ExportGroundtruthToCOCO(image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=None): 'Export groundtruth detection annotations in numpy arrays to COCO API.\n\n This function converts a set of groundtruth detection annotations represented\n as numpy arrays to dictionaries that can be inges...
def ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, detection_boxes, detection_scores, detection_classes, detection_keypoints=None, detection_keypoint_visibilities=None): 'Export detections of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dicti...
3,486,113,173,692,428,300
Export detections of a single image to COCO format. This function converts detections represented as numpy arrays to dictionaries that can be ingested by the COCO evaluation API. Note that the image_ids provided here must match the ones given to the ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and class...
research/object_detection/metrics/coco_tools.py
ExportSingleImageDetectionBoxesToCoco
1911590204/models
python
def ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, detection_boxes, detection_scores, detection_classes, detection_keypoints=None, detection_keypoint_visibilities=None): 'Export detections of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dicti...
def ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, detection_masks, detection_scores, detection_classes): 'Export detection masks of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API....
38,152,405,171,328,380
Export detection masks of a single image to COCO format. This function converts detections represented as numpy arrays to dictionaries that can be ingested by the COCO evaluation API. We assume that detection_masks, detection_scores, and detection_classes are in correspondence - that is: detection_masks[i, :], detecti...
research/object_detection/metrics/coco_tools.py
ExportSingleImageDetectionMasksToCoco
1911590204/models
python
def ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, detection_masks, detection_scores, detection_classes): 'Export detection masks of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API....
def ExportDetectionsToCOCO(image_ids, detection_boxes, detection_scores, detection_classes, categories, output_path=None): "Export detection annotations in numpy arrays to COCO API.\n\n This function converts a set of predicted detections represented\n as numpy arrays to dictionaries that can be ingested by the C...
-1,430,712,689,237,600,800
Export detection annotations in numpy arrays to COCO API. This function converts a set of predicted detections represented as numpy arrays to dictionaries that can be ingested by the COCO API. Inputs to this function are lists, consisting of boxes, scores and classes, respectively, corresponding to each image for whic...
research/object_detection/metrics/coco_tools.py
ExportDetectionsToCOCO
1911590204/models
python
def ExportDetectionsToCOCO(image_ids, detection_boxes, detection_scores, detection_classes, categories, output_path=None): "Export detection annotations in numpy arrays to COCO API.\n\n This function converts a set of predicted detections represented\n as numpy arrays to dictionaries that can be ingested by the C...
def ExportSegmentsToCOCO(image_ids, detection_masks, detection_scores, detection_classes, categories, output_path=None): "Export segmentation masks in numpy arrays to COCO API.\n\n This function converts a set of predicted instance masks represented\n as numpy arrays to dictionaries that can be ingested by the CO...
-927,010,710,476,147,200
Export segmentation masks in numpy arrays to COCO API. This function converts a set of predicted instance masks represented as numpy arrays to dictionaries that can be ingested by the COCO API. Inputs to this function are lists, consisting of segments, scores and classes, respectively, corresponding to each image for ...
research/object_detection/metrics/coco_tools.py
ExportSegmentsToCOCO
1911590204/models
python
def ExportSegmentsToCOCO(image_ids, detection_masks, detection_scores, detection_classes, categories, output_path=None): "Export segmentation masks in numpy arrays to COCO API.\n\n This function converts a set of predicted instance masks represented\n as numpy arrays to dictionaries that can be ingested by the CO...
def ExportKeypointsToCOCO(image_ids, detection_keypoints, detection_scores, detection_classes, categories, output_path=None): "Exports keypoints in numpy arrays to COCO API.\n\n This function converts a set of predicted keypoints represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\...
7,840,153,829,423,577,000
Exports keypoints in numpy arrays to COCO API. This function converts a set of predicted keypoints represented as numpy arrays to dictionaries that can be ingested by the COCO API. Inputs to this function are lists, consisting of keypoints, scores and classes, respectively, corresponding to each image for which detect...
research/object_detection/metrics/coco_tools.py
ExportKeypointsToCOCO
1911590204/models
python
def ExportKeypointsToCOCO(image_ids, detection_keypoints, detection_scores, detection_classes, categories, output_path=None): "Exports keypoints in numpy arrays to COCO API.\n\n This function converts a set of predicted keypoints represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\...
def __init__(self, dataset, detection_type='bbox'): "COCOWrapper constructor.\n\n See http://mscoco.org/dataset/#format for a description of the format.\n By default, the coco.COCO class constructor reads from a JSON file.\n This function duplicates the same behavior but loads from a dictionary,\n allow...
3,777,113,071,917,594,000
COCOWrapper constructor. See http://mscoco.org/dataset/#format for a description of the format. By default, the coco.COCO class constructor reads from a JSON file. This function duplicates the same behavior but loads from a dictionary, allowing us to perform evaluation without writing to external storage. Args: dat...
research/object_detection/metrics/coco_tools.py
__init__
1911590204/models
python
def __init__(self, dataset, detection_type='bbox'): "COCOWrapper constructor.\n\n See http://mscoco.org/dataset/#format for a description of the format.\n By default, the coco.COCO class constructor reads from a JSON file.\n This function duplicates the same behavior but loads from a dictionary,\n allow...
def LoadAnnotations(self, annotations): "Load annotations dictionary into COCO datastructure.\n\n See http://mscoco.org/dataset/#format for a description of the annotations\n format. As above, this function replicates the default behavior of the API\n but does not require writing to external storage.\n\n ...
8,178,324,416,221,913,000
Load annotations dictionary into COCO datastructure. See http://mscoco.org/dataset/#format for a description of the annotations format. As above, this function replicates the default behavior of the API but does not require writing to external storage. Args: annotations: python list holding object detection result...
research/object_detection/metrics/coco_tools.py
LoadAnnotations
1911590204/models
python
def LoadAnnotations(self, annotations): "Load annotations dictionary into COCO datastructure.\n\n See http://mscoco.org/dataset/#format for a description of the annotations\n format. As above, this function replicates the default behavior of the API\n but does not require writing to external storage.\n\n ...
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, iou_type='bbox', oks_sigmas=None): "COCOEvalWrapper constructor.\n\n Note that for the area-based metrics to be meaningful, detection and\n groundtruth boxes must be in image coordinates measured in pixels.\n\n Args:\n groundtr...
-4,644,386,061,494,226,000
COCOEvalWrapper constructor. Note that for the area-based metrics to be meaningful, detection and groundtruth boxes must be in image coordinates measured in pixels. Args: groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding groundtruth annotations detections: a coco.COCO (or coco_tools.COCOWrap...
research/object_detection/metrics/coco_tools.py
__init__
1911590204/models
python
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, iou_type='bbox', oks_sigmas=None): "COCOEvalWrapper constructor.\n\n Note that for the area-based metrics to be meaningful, detection and\n groundtruth boxes must be in image coordinates measured in pixels.\n\n Args:\n groundtr...
def GetCategory(self, category_id): "Fetches dictionary holding category information given category id.\n\n Args:\n category_id: integer id\n Returns:\n dictionary holding 'id', 'name'.\n " return self.cocoGt.cats[category_id]
-3,998,284,783,981,275,000
Fetches dictionary holding category information given category id. Args: category_id: integer id Returns: dictionary holding 'id', 'name'.
research/object_detection/metrics/coco_tools.py
GetCategory
1911590204/models
python
def GetCategory(self, category_id): "Fetches dictionary holding category information given category id.\n\n Args:\n category_id: integer id\n Returns:\n dictionary holding 'id', 'name'.\n " return self.cocoGt.cats[category_id]
def GetAgnosticMode(self): 'Returns true if COCO Eval is configured to evaluate in agnostic mode.' return (self.params.useCats == 0)
-4,317,986,916,639,350,300
Returns true if COCO Eval is configured to evaluate in agnostic mode.
research/object_detection/metrics/coco_tools.py
GetAgnosticMode
1911590204/models
python
def GetAgnosticMode(self): return (self.params.useCats == 0)
def GetCategoryIdList(self): 'Returns list of valid category ids.' return self.params.catIds
-2,981,913,091,674,385,400
Returns list of valid category ids.
research/object_detection/metrics/coco_tools.py
GetCategoryIdList
1911590204/models
python
def GetCategoryIdList(self): return self.params.catIds
def ComputeMetrics(self, include_metrics_per_category=False, all_metrics_per_category=False): "Computes detection/keypoint metrics.\n\n Args:\n include_metrics_per_category: If True, will include metrics per category.\n all_metrics_per_category: If true, include all the summery metrics for\n eac...
5,216,740,938,967,259,000
Computes detection/keypoint metrics. Args: include_metrics_per_category: If True, will include metrics per category. all_metrics_per_category: If true, include all the summery metrics for each category in per_category_ap. Be careful with setting it to true if you have more than handful of categories, becau...
research/object_detection/metrics/coco_tools.py
ComputeMetrics
1911590204/models
python
def ComputeMetrics(self, include_metrics_per_category=False, all_metrics_per_category=False): "Computes detection/keypoint metrics.\n\n Args:\n include_metrics_per_category: If True, will include metrics per category.\n all_metrics_per_category: If true, include all the summery metrics for\n eac...
def accept(self): '\n Override the accept method so that we can confirm saving an\n invalid configuration.\n ' result = QtWidgets.QMessageBox.Yes if (not self.validate()): result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration', "This configuration is invalid. Unpr...
7,433,577,860,333,540,000
Override the accept method so that we can confirm saving an invalid configuration.
mapclientplugins/filechooserstep/configuredialog.py
accept
mapclient-plugins/mapclientplugins.filechooserstep
python
def accept(self): '\n Override the accept method so that we can confirm saving an\n invalid configuration.\n ' result = QtWidgets.QMessageBox.Yes if (not self.validate()): result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration', "This configuration is invalid. Unpr...
def validate(self): '\n Validate the configuration dialog fields. For any field that is not valid\n set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the\n overall validity of the configuration.\n ' value = self.identifierOccursCount(self._ui.lineEdit0.text()) ...
441,426,544,836,570,000
Validate the configuration dialog fields. For any field that is not valid set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the overall validity of the configuration.
mapclientplugins/filechooserstep/configuredialog.py
validate
mapclient-plugins/mapclientplugins.filechooserstep
python
def validate(self): '\n Validate the configuration dialog fields. For any field that is not valid\n set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the\n overall validity of the configuration.\n ' value = self.identifierOccursCount(self._ui.lineEdit0.text()) ...
def getConfig(self): '\n Get the current value of the configuration from the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n ' self._previousIdentifier = self._ui.lineEdit0.text() config = {'i...
-1,545,015,863,487,636,500
Get the current value of the configuration from the dialog. Also set the _previousIdentifier value so that we can check uniqueness of the identifier over the whole of the workflow.
mapclientplugins/filechooserstep/configuredialog.py
getConfig
mapclient-plugins/mapclientplugins.filechooserstep
python
def getConfig(self): '\n Get the current value of the configuration from the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n ' self._previousIdentifier = self._ui.lineEdit0.text() config = {'i...
def setConfig(self, config): '\n Set the current value of the configuration for the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n ' self._previousIdentifier = config['identifier'] self._ui.l...
5,738,320,274,872,744,000
Set the current value of the configuration for the dialog. Also set the _previousIdentifier value so that we can check uniqueness of the identifier over the whole of the workflow.
mapclientplugins/filechooserstep/configuredialog.py
setConfig
mapclient-plugins/mapclientplugins.filechooserstep
python
def setConfig(self, config): '\n Set the current value of the configuration for the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n ' self._previousIdentifier = config['identifier'] self._ui.l...
def verify(self, hash, sig): 'Verify a DER signature' return (ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1)
-2,046,395,906,615,599,900
Verify a DER signature
test/functional/test_framework/key.py
verify
Alonewolf-123/AmbankCoin-Core
python
def verify(self, hash, sig): return (ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1)
def parse_python_version(output): "Parse a Python version output returned by `python --version`.\n\n Return a dict with three keys: major, minor, and micro. Each value is a\n string containing a version part.\n\n Note: The micro part would be `'0'` if it's missing from the input string.\n " version_...
-7,576,245,133,647,391,000
Parse a Python version output returned by `python --version`. Return a dict with three keys: major, minor, and micro. Each value is a string containing a version part. Note: The micro part would be `'0'` if it's missing from the input string.
pipenv/utils.py
parse_python_version
bryant1410/pipenv
python
def parse_python_version(output): "Parse a Python version output returned by `python --version`.\n\n Return a dict with three keys: major, minor, and micro. Each value is a\n string containing a version part.\n\n Note: The micro part would be `'0'` if it's missing from the input string.\n " version_...
def escape_grouped_arguments(s): 'Prepares a string for the shell (on Windows too!)\n\n Only for use on grouped arguments (passed as a string to Popen)\n ' if (s is None): return None if (os.name == 'nt'): s = '{}'.format(s.replace('\\', '\\\\')) return (('"' + s.replace("'", "'\\'...
2,562,507,320,774,941,700
Prepares a string for the shell (on Windows too!) Only for use on grouped arguments (passed as a string to Popen)
pipenv/utils.py
escape_grouped_arguments
bryant1410/pipenv
python
def escape_grouped_arguments(s): 'Prepares a string for the shell (on Windows too!)\n\n Only for use on grouped arguments (passed as a string to Popen)\n ' if (s is None): return None if (os.name == 'nt'): s = '{}'.format(s.replace('\\', '\\\\')) return (('"' + s.replace("'", "'\\"...
def clean_pkg_version(version): 'Uses pip to prepare a package version string, from our internal version.' return six.u(pep440_version(str(version).replace('==', '')))
1,798,999,973,971,679,200
Uses pip to prepare a package version string, from our internal version.
pipenv/utils.py
clean_pkg_version
bryant1410/pipenv
python
def clean_pkg_version(version): return six.u(pep440_version(str(version).replace('==', )))
def resolve_deps(deps, which, project, sources=None, verbose=False, python=False, clear=False, pre=False, allow_global=False): 'Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip9.\n ' index_lookup = {} markers_loo...
5,097,824,507,640,910,000
Given a list of dependencies, return a resolved list of dependencies, using pip-tools -- and their hashes, using the warehouse API / pip9.
pipenv/utils.py
resolve_deps
bryant1410/pipenv
python
def resolve_deps(deps, which, project, sources=None, verbose=False, python=False, clear=False, pre=False, allow_global=False): 'Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip9.\n ' index_lookup = {} markers_loo...
def multi_split(s, split): 'Splits on multiple given separators.' for r in split: s = s.replace(r, '|') return [i for i in s.split('|') if (len(i) > 0)]
-6,995,361,326,840,965,000
Splits on multiple given separators.
pipenv/utils.py
multi_split
bryant1410/pipenv
python
def multi_split(s, split): for r in split: s = s.replace(r, '|') return [i for i in s.split('|') if (len(i) > 0)]
def convert_deps_from_pip(dep): '"Converts a pip-formatted dependency to a Pipfile-formatted one.' dependency = {} req = get_requirement(dep) extras = {'extras': req.extras} if ((req.uri or req.path or is_installable_file(req.name)) and (not req.vcs)): if ((not req.uri) and (not req.path)): ...
6,363,460,669,016,941,000
"Converts a pip-formatted dependency to a Pipfile-formatted one.
pipenv/utils.py
convert_deps_from_pip
bryant1410/pipenv
python
def convert_deps_from_pip(dep): dependency = {} req = get_requirement(dep) extras = {'extras': req.extras} if ((req.uri or req.path or is_installable_file(req.name)) and (not req.vcs)): if ((not req.uri) and (not req.path)): req.path = os.path.abspath(req.name) hashable_...
def convert_deps_to_pip(deps, project=None, r=True, include_index=False): '"Converts a Pipfile-formatted dependency to a pip-formatted one.' dependencies = [] for dep in deps.keys(): extra = (deps[dep] if isinstance(deps[dep], six.string_types) else '') version = '' index = '' ...
3,140,597,842,437,439,500
"Converts a Pipfile-formatted dependency to a pip-formatted one.
pipenv/utils.py
convert_deps_to_pip
bryant1410/pipenv
python
def convert_deps_to_pip(deps, project=None, r=True, include_index=False): dependencies = [] for dep in deps.keys(): extra = (deps[dep] if isinstance(deps[dep], six.string_types) else ) version = index = if (is_star(deps[dep]) or (str(extra) == '{}')): extra = ...
def mkdir_p(newdir): 'works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n ' if o...
-8,025,579,765,829,738,000
works the way a good mkdir should :) - already exists, silently complete - regular file in the way, raise an exception - parent directory(ies) does not exist, make them as well From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
pipenv/utils.py
mkdir_p
bryant1410/pipenv
python
def mkdir_p(newdir): 'works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n ' if o...
def is_required_version(version, specified_version): "Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n " if isinstance(specified_version, dict): specified_version = specified_version.get('version', '') if specified_version.startswith('=='): ret...
3,528,375,736,170,234,000
Check to see if there's a hard requirement for version number provided in the Pipfile.
pipenv/utils.py
is_required_version
bryant1410/pipenv
python
def is_required_version(version, specified_version): "Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n " if isinstance(specified_version, dict): specified_version = specified_version.get('version', ) if specified_version.startswith('=='): retur...
def strip_ssh_from_git_uri(uri): 'Return git+ssh:// formatted URI to git+git@ format' if isinstance(uri, six.string_types): uri = uri.replace('git+ssh://', 'git+') return uri
-5,153,976,107,256,773,000
Return git+ssh:// formatted URI to git+git@ format
pipenv/utils.py
strip_ssh_from_git_uri
bryant1410/pipenv
python
def strip_ssh_from_git_uri(uri): if isinstance(uri, six.string_types): uri = uri.replace('git+ssh://', 'git+') return uri
def clean_git_uri(uri): 'Cleans VCS uris from pip9 format' if isinstance(uri, six.string_types): if (uri.startswith('git+') and ('://' not in uri)): uri = uri.replace('git+', 'git+ssh://') return uri
8,837,214,570,924,101,000
Cleans VCS uris from pip9 format
pipenv/utils.py
clean_git_uri
bryant1410/pipenv
python
def clean_git_uri(uri): if isinstance(uri, six.string_types): if (uri.startswith('git+') and ('://' not in uri)): uri = uri.replace('git+', 'git+ssh://') return uri
def is_installable_file(path): 'Determine if a path can potentially be installed' from .vendor.pip9.utils import is_installable_dir from .vendor.pip9.utils.packaging import specifiers if (hasattr(path, 'keys') and any((key for key in path.keys() if (key in ['file', 'path'])))): path = (urlparse(...
-8,326,956,013,517,452,000
Determine if a path can potentially be installed
pipenv/utils.py
is_installable_file
bryant1410/pipenv
python
def is_installable_file(path): from .vendor.pip9.utils import is_installable_dir from .vendor.pip9.utils.packaging import specifiers if (hasattr(path, 'keys') and any((key for key in path.keys() if (key in ['file', 'path'])))): path = (urlparse(path['file']).path if ('file' in path) else path['...
def is_file(package): 'Determine if a package name is for a File dependency.' if hasattr(package, 'keys'): return any((key for key in package.keys() if (key in ['file', 'path']))) if os.path.exists(str(package)): return True for start in SCHEME_LIST: if str(package).startswith(st...
1,091,657,782,702,303,400
Determine if a package name is for a File dependency.
pipenv/utils.py
is_file
bryant1410/pipenv
python
def is_file(package): if hasattr(package, 'keys'): return any((key for key in package.keys() if (key in ['file', 'path']))) if os.path.exists(str(package)): return True for start in SCHEME_LIST: if str(package).startswith(start): return True return False
def pep440_version(version): 'Normalize version to PEP 440 standards' from .vendor.pip9.index import parse_version return str(parse_version(version))
5,361,031,010,979,994,000
Normalize version to PEP 440 standards
pipenv/utils.py
pep440_version
bryant1410/pipenv
python
def pep440_version(version): from .vendor.pip9.index import parse_version return str(parse_version(version))
def pep423_name(name): 'Normalize package name to PEP 423 style standard.' name = name.lower() if any(((i not in name) for i in (VCS_LIST + SCHEME_LIST))): return name.replace('_', '-') else: return name
6,748,167,606,597,170,000
Normalize package name to PEP 423 style standard.
pipenv/utils.py
pep423_name
bryant1410/pipenv
python
def pep423_name(name): name = name.lower() if any(((i not in name) for i in (VCS_LIST + SCHEME_LIST))): return name.replace('_', '-') else: return name
def proper_case(package_name): 'Properly case project name from pypi.org.' r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True) if (not r.ok): raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name)) r = parse.parse('https:...
5,332,965,172,988,998,000
Properly case project name from pypi.org.
pipenv/utils.py
proper_case
bryant1410/pipenv
python
def proper_case(package_name): r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True) if (not r.ok): raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name)) r = parse.parse('https://pypi.org/pypi/{name}/json', r.url) go...
def split_section(input_file, section_suffix, test_function): '\n Split a pipfile or a lockfile section out by section name and test function\n\n :param dict input_file: A dictionary containing either a pipfile or lockfile\n :param str section_suffix: A string of the name of the section\n :p...
3,888,405,553,536,379,400
Split a pipfile or a lockfile section out by section name and test function :param dict input_file: A dictionary containing either a pipfile or lockfile :param str section_suffix: A string of the name of the section :param func test_function: A test function to test against the value in the key/value pair ...
pipenv/utils.py
split_section
bryant1410/pipenv
python
def split_section(input_file, section_suffix, test_function): '\n Split a pipfile or a lockfile section out by section name and test function\n\n :param dict input_file: A dictionary containing either a pipfile or lockfile\n :param str section_suffix: A string of the name of the section\n :p...
def split_file(file_dict): 'Split VCS and editable dependencies out from file.' sections = {'vcs': is_vcs, 'editable': (lambda x: (hasattr(x, 'keys') and x.get('editable')))} for (k, func) in sections.items(): file_dict = split_section(file_dict, k, func) return file_dict
1,330,811,071,559,589,000
Split VCS and editable dependencies out from file.
pipenv/utils.py
split_file
bryant1410/pipenv
python
def split_file(file_dict): sections = {'vcs': is_vcs, 'editable': (lambda x: (hasattr(x, 'keys') and x.get('editable')))} for (k, func) in sections.items(): file_dict = split_section(file_dict, k, func) return file_dict
def merge_deps(file_dict, project, dev=False, requirements=False, ignore_hashes=False, blocking=False, only=False): '\n Given a file_dict, merges dependencies and converts them to pip dependency lists.\n :param dict file_dict: The result of calling :func:`pipenv.utils.split_file`\n :param :class:`p...
6,053,193,627,376,801,000
Given a file_dict, merges dependencies and converts them to pip dependency lists. :param dict file_dict: The result of calling :func:`pipenv.utils.split_file` :param :class:`pipenv.project.Project` project: Pipenv project :param bool dev=False: Flag indicating whether dev dependencies are to be installed ...
pipenv/utils.py
merge_deps
bryant1410/pipenv
python
def merge_deps(file_dict, project, dev=False, requirements=False, ignore_hashes=False, blocking=False, only=False): '\n Given a file_dict, merges dependencies and converts them to pip dependency lists.\n :param dict file_dict: The result of calling :func:`pipenv.utils.split_file`\n :param :class:`p...
def recase_file(file_dict): 'Recase file before writing to output.' if (('packages' in file_dict) or ('dev-packages' in file_dict)): sections = ('packages', 'dev-packages') elif (('default' in file_dict) or ('develop' in file_dict)): sections = ('default', 'develop') for section in secti...
-392,200,137,092,393,150
Recase file before writing to output.
pipenv/utils.py
recase_file
bryant1410/pipenv
python
def recase_file(file_dict): if (('packages' in file_dict) or ('dev-packages' in file_dict)): sections = ('packages', 'dev-packages') elif (('default' in file_dict) or ('develop' in file_dict)): sections = ('default', 'develop') for section in sections: file_section = file_dict.g...
def get_windows_path(*args): 'Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path' return os.path.normpath(os.path.join(*args))
-5,803,461,582,242,583,000
Sanitize a path for windows environments Accepts an arbitrary list of arguments and makes a clean windows path
pipenv/utils.py
get_windows_path
bryant1410/pipenv
python
def get_windows_path(*args): 'Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path' return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name): 'Given an executable name, search the given location for an executable' requested_path = get_windows_path(bin_path, exe_name) if os.path.exists(requested_path): return requested_path exe_name = os.path.splitext(exe_name)[0] files = ['{0}.{1}'....
-2,987,833,260,518,996,000
Given an executable name, search the given location for an executable
pipenv/utils.py
find_windows_executable
bryant1410/pipenv
python
def find_windows_executable(bin_path, exe_name): requested_path = get_windows_path(bin_path, exe_name) if os.path.exists(requested_path): return requested_path exe_name = os.path.splitext(exe_name)[0] files = ['{0}.{1}'.format(exe_name, ext) for ext in [, 'py', 'exe', 'bat']] exec_paths...
def get_converted_relative_path(path, relative_to=os.curdir): 'Given a vague relative path, return the path relative to the given location' return os.path.join('.', os.path.relpath(path, start=relative_to))
-8,656,903,140,058,767,000
Given a vague relative path, return the path relative to the given location
pipenv/utils.py
get_converted_relative_path
bryant1410/pipenv
python
def get_converted_relative_path(path, relative_to=os.curdir): return os.path.join('.', os.path.relpath(path, start=relative_to))
def walk_up(bottom): "Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n " bottom = os.path.realpath(bottom) try: names = os.listdir(bottom) except Exception: return (dirs, nondirs) = ([], []) for name in name...
-7,195,392,152,588,847,000
Mimic os.walk, but walk 'up' instead of down the directory tree. From: https://gist.github.com/zdavkeos/1098474
pipenv/utils.py
walk_up
bryant1410/pipenv
python
def walk_up(bottom): "Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n " bottom = os.path.realpath(bottom) try: names = os.listdir(bottom) except Exception: return (dirs, nondirs) = ([], []) for name in name...
def find_requirements(max_depth=3): 'Returns the path of a Pipfile in parent directories.' i = 0 for (c, d, f) in walk_up(os.getcwd()): i += 1 if (i < max_depth): if 'requirements.txt': r = os.path.join(c, 'requirements.txt') if os.path.isfile(r): ...
-8,605,925,904,386,501,000
Returns the path of a Pipfile in parent directories.
pipenv/utils.py
find_requirements
bryant1410/pipenv
python
def find_requirements(max_depth=3): i = 0 for (c, d, f) in walk_up(os.getcwd()): i += 1 if (i < max_depth): if 'requirements.txt': r = os.path.join(c, 'requirements.txt') if os.path.isfile(r): return r raise RuntimeError('N...
@contextmanager def temp_environ(): 'Allow the ability to set os.environ temporarily' environ = dict(os.environ) try: (yield) finally: os.environ.clear() os.environ.update(environ)
-5,083,302,786,420,072,000
Allow the ability to set os.environ temporarily
pipenv/utils.py
temp_environ
bryant1410/pipenv
python
@contextmanager def temp_environ(): environ = dict(os.environ) try: (yield) finally: os.environ.clear() os.environ.update(environ)
def is_valid_url(url): 'Checks if a given string is an url' pieces = urlparse(url) return all([pieces.scheme, pieces.netloc])
-4,789,592,044,157,309,000
Checks if a given string is an url
pipenv/utils.py
is_valid_url
bryant1410/pipenv
python
def is_valid_url(url): pieces = urlparse(url) return all([pieces.scheme, pieces.netloc])
def download_file(url, filename): 'Downloads file from url to a path with filename' r = requests.get(url, stream=True) if (not r.ok): raise IOError('Unable to download file') with open(filename, 'wb') as f: f.write(r.content)
-7,474,985,168,864,853,000
Downloads file from url to a path with filename
pipenv/utils.py
download_file
bryant1410/pipenv
python
def download_file(url, filename): r = requests.get(url, stream=True) if (not r.ok): raise IOError('Unable to download file') with open(filename, 'wb') as f: f.write(r.content)
def need_update_check(): 'Determines whether we need to check for updates.' mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) if (not os.path.exists(p)): return True out_of_date_time = (time() - ((24 * 60) * 60)) if (os.path.isfile(p) and (os.path.getm...
-8,032,898,415,673,751,000
Determines whether we need to check for updates.
pipenv/utils.py
need_update_check
bryant1410/pipenv
python
def need_update_check(): mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) if (not os.path.exists(p)): return True out_of_date_time = (time() - ((24 * 60) * 60)) if (os.path.isfile(p) and (os.path.getmtime(p) <= out_of_date_time)): return True...
def touch_update_stamp(): 'Touches PIPENV_CACHE_DIR/.pipenv_update_check' mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) try: os.utime(p, None) except OSError: with open(p, 'w') as fh: fh.write('')
-4,278,246,743,979,614,000
Touches PIPENV_CACHE_DIR/.pipenv_update_check
pipenv/utils.py
touch_update_stamp
bryant1410/pipenv
python
def touch_update_stamp(): mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) try: os.utime(p, None) except OSError: with open(p, 'w') as fh: fh.write()
def normalize_drive(path): 'Normalize drive in path so they stay consistent.\n\n This currently only affects local drives on Windows, which can be\n identified with either upper or lower cased drive names. The case is\n always converted to uppercase because it seems to be preferred.\n\n See: <https://gi...
7,206,725,071,959,051,000
Normalize drive in path so they stay consistent. This currently only affects local drives on Windows, which can be identified with either upper or lower cased drive names. The case is always converted to uppercase because it seems to be preferred. See: <https://github.com/pypa/pipenv/issues/1218>
pipenv/utils.py
normalize_drive
bryant1410/pipenv
python
def normalize_drive(path): 'Normalize drive in path so they stay consistent.\n\n This currently only affects local drives on Windows, which can be\n identified with either upper or lower cased drive names. The case is\n always converted to uppercase because it seems to be preferred.\n\n See: <https://gi...
def is_readonly_path(fn): 'Check if a provided path exists and is readonly.\n\n Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`\n ' if os.path.exists(fn): return ((os.stat(fn).st_mode & stat.S_IREAD) or (not os.access(fn, os.W_OK))) return False
4,072,325,937,409,912,000
Check if a provided path exists and is readonly. Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
pipenv/utils.py
is_readonly_path
bryant1410/pipenv
python
def is_readonly_path(fn): 'Check if a provided path exists and is readonly.\n\n Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`\n ' if os.path.exists(fn): return ((os.stat(fn).st_mode & stat.S_IREAD) or (not os.access(fn, os.W_OK))) return False
def handle_remove_readonly(func, path, exc): 'Error handler for shutil.rmtree.\n\n Windows source repo folders are read-only by default, so this error handler\n attempts to set them as writeable and then proceed with deletion.' default_warning_message = 'Unable to remove file due to permissions restrictio...
-2,753,335,397,450,273,000
Error handler for shutil.rmtree. Windows source repo folders are read-only by default, so this error handler attempts to set them as writeable and then proceed with deletion.
pipenv/utils.py
handle_remove_readonly
bryant1410/pipenv
python
def handle_remove_readonly(func, path, exc): 'Error handler for shutil.rmtree.\n\n Windows source repo folders are read-only by default, so this error handler\n attempts to set them as writeable and then proceed with deletion.' default_warning_message = 'Unable to remove file due to permissions restrictio...
def _deduplicate(data): 'Remove duplicated records.' cnt = collections.Counter((row['id'] for row in data)) nonuniq_ids = set((id for (id, count) in cnt.items() if (count > 1))) nonuniq_data = [row for row in data if (row['id'] in nonuniq_ids)] unique_data = [row for row in data if (row['id'] not in...
4,788,760,498,953,770,000
Remove duplicated records.
tensorflow_datasets/text/reddit_disentanglement.py
_deduplicate
Ak0303/datasets
python
def _deduplicate(data): cnt = collections.Counter((row['id'] for row in data)) nonuniq_ids = set((id for (id, count) in cnt.items() if (count > 1))) nonuniq_data = [row for row in data if (row['id'] in nonuniq_ids)] unique_data = [row for row in data if (row['id'] not in nonuniq_ids)] nonuniq_d...
def _split_generators(self, dl_manager): 'Returns SplitGenerators.' return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'train.csv')}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'val.cs...
-2,188,673,168,850,584,000
Returns SplitGenerators.
tensorflow_datasets/text/reddit_disentanglement.py
_split_generators
Ak0303/datasets
python
def _split_generators(self, dl_manager): return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'train.csv')}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'val.csv')}), tfds.core.SplitGene...
def _generate_examples(self, path): 'Yields examples.' data = list(_read_csv(path)) data = _deduplicate(data) for (link_id, one_topic_data) in itertools.groupby(data, (lambda row: row['link_id'])): one_topic_data = list(one_topic_data) for row in one_topic_data: row['text'] =...
6,543,013,553,364,795,000
Yields examples.
tensorflow_datasets/text/reddit_disentanglement.py
_generate_examples
Ak0303/datasets
python
def _generate_examples(self, path): data = list(_read_csv(path)) data = _deduplicate(data) for (link_id, one_topic_data) in itertools.groupby(data, (lambda row: row['link_id'])): one_topic_data = list(one_topic_data) for row in one_topic_data: row['text'] = row.pop('body') ...
def detect(image: str, verbose: bool=False): 'Detects faces on a given image using dlib and returns matches.\n\n :param image: Path to access the image to be searched\n :type image: [string]\n :param verbose: Wether or not command should output informations\n :type image: [bool], default to False\n\n ...
-7,453,832,317,566,232,000
Detects faces on a given image using dlib and returns matches. :param image: Path to access the image to be searched :type image: [string] :param verbose: Wether or not command should output informations :type image: [bool], default to False :raises RuntimeError: When the provided image_path is invalid :return: The ...
face_cropper/core/detector.py
detect
Dave-Lopper/face_cropper
python
def detect(image: str, verbose: bool=False): 'Detects faces on a given image using dlib and returns matches.\n\n :param image: Path to access the image to be searched\n :type image: [string]\n :param verbose: Wether or not command should output informations\n :type image: [bool], default to False\n\n ...
def download_progress_hook(count, blockSize, totalSize): 'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 5% change in download progress.\n ' global last_percent_reported percent = int((((count * blockSize) * 100) / totalSi...
2,470,292,000,998,774,300
A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 5% change in download progress.
udacity_deep_learning/download_data.py
download_progress_hook
fcarsten/ai_playground
python
def download_progress_hook(count, blockSize, totalSize): 'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 5% change in download progress.\n ' global last_percent_reported percent = int((((count * blockSize) * 100) / totalSi...
def maybe_download(filename, expected_bytes, force=False): "Download a file if not present, and make sure it's the right size." dest_filename = os.path.join(data_root, filename) if (force or (not os.path.exists(dest_filename))): print('Attempting to download:', filename) (filename, _) = urlr...
2,058,923,476,989,784,600
Download a file if not present, and make sure it's the right size.
udacity_deep_learning/download_data.py
maybe_download
fcarsten/ai_playground
python
def maybe_download(filename, expected_bytes, force=False): dest_filename = os.path.join(data_root, filename) if (force or (not os.path.exists(dest_filename))): print('Attempting to download:', filename) (filename, _) = urlretrieve((url + filename), dest_filename, reporthook=download_progres...
def channel_split_naive(r, channel_ranges): 'Slower but simpler implementation of straxen.split_channel_ranges' results = [] for (left, right) in channel_ranges: results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))]) return results
-3,514,169,492,615,701,500
Slower but simpler implementation of straxen.split_channel_ranges
tests/test_channel_split.py
channel_split_naive
AlexElykov/straxen
python
def channel_split_naive(r, channel_ranges): results = [] for (left, right) in channel_ranges: results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))]) return results
def __init__(self, obs_space, action_space, config, loss_fn, stats_fn=None, grad_stats_fn=None, before_loss_init=None, make_model=None, action_sampler_fn=None, existing_inputs=None, existing_model=None, get_batch_divisibility_req=None, obs_include_prev_action_reward=True): 'Initialize a dynamic TF policy.\n\n ...
5,892,416,507,873,919,000
Initialize a dynamic TF policy. Arguments: observation_space (gym.Space): Observation space of the policy. action_space (gym.Space): Action space of the policy. config (dict): Policy-specific configuration data. loss_fn (func): function that returns a loss tensor the policy graph, and dict of e...
rllib/policy/dynamic_tf_policy.py
__init__
lisadunlap/ray
python
def __init__(self, obs_space, action_space, config, loss_fn, stats_fn=None, grad_stats_fn=None, before_loss_init=None, make_model=None, action_sampler_fn=None, existing_inputs=None, existing_model=None, get_batch_divisibility_req=None, obs_include_prev_action_reward=True): 'Initialize a dynamic TF policy.\n\n ...
@override(TFPolicy) def copy(self, existing_inputs): 'Creates a copy of self using existing input placeholders.' if self._state_inputs: num_state_inputs = (len(self._state_inputs) + 1) else: num_state_inputs = 0 if ((len(self._loss_inputs) + num_state_inputs) != len(existing_inputs)): ...
-2,234,550,702,876,427,800
Creates a copy of self using existing input placeholders.
rllib/policy/dynamic_tf_policy.py
copy
lisadunlap/ray
python
@override(TFPolicy) def copy(self, existing_inputs): if self._state_inputs: num_state_inputs = (len(self._state_inputs) + 1) else: num_state_inputs = 0 if ((len(self._loss_inputs) + num_state_inputs) != len(existing_inputs)): raise ValueError('Tensor list mismatch', self._loss_i...
def main(): "The entry point for the console script xbmcswift2.\n\n The 'xbcmswift2' script is command bassed, so the second argument is always\n the command to execute. Each command has its own parser options and usages.\n If no command is provided or the -h flag is used without any other\n commands, t...
-6,102,954,789,046,832,000
The entry point for the console script xbmcswift2. The 'xbcmswift2' script is command bassed, so the second argument is always the command to execute. Each command has its own parser options and usages. If no command is provided or the -h flag is used without any other commands, the general help message is shown.
resources/lib/xbmcswift2/cli/cli.py
main
liberty-developer/plugin.video.metalliq-forqed
python
def main(): "The entry point for the console script xbmcswift2.\n\n The 'xbcmswift2' script is command bassed, so the second argument is always\n the command to execute. Each command has its own parser options and usages.\n If no command is provided or the -h flag is used without any other\n commands, t...
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3): 'Compute the scores of a synthesizer over a list of datasets.\n\n The results are returned in a raw format as a ``pandas.DataFrame`` containing:\n - One row for each dataset+scoring method (for example, a classifier)\n - O...
6,867,888,405,591,949,000
Compute the scores of a synthesizer over a list of datasets. The results are returned in a raw format as a ``pandas.DataFrame`` containing: - One row for each dataset+scoring method (for example, a classifier) - One column for each computed metric - The columns: - dataset - distance ...
sdgym/benchmark.py
compute_benchmark
csala/SDGym
python
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3): 'Compute the scores of a synthesizer over a list of datasets.\n\n The results are returned in a raw format as a ``pandas.DataFrame`` containing:\n - One row for each dataset+scoring method (for example, a classifier)\n - O...
def _summarize_scores(scores): 'Computes a summary of the scores obtained by a synthesizer.\n\n The raw scores returned by the ``compute_benchmark`` function are summarized\n by grouping them by dataset and computing the average.\n\n The results are then put in a ``pandas.Series`` object with one value per...
-9,160,691,643,630,375,000
Computes a summary of the scores obtained by a synthesizer. The raw scores returned by the ``compute_benchmark`` function are summarized by grouping them by dataset and computing the average. The results are then put in a ``pandas.Series`` object with one value per dataset and metric. As an example, the summary of a...
sdgym/benchmark.py
_summarize_scores
csala/SDGym
python
def _summarize_scores(scores): 'Computes a summary of the scores obtained by a synthesizer.\n\n The raw scores returned by the ``compute_benchmark`` function are summarized\n by grouping them by dataset and computing the average.\n\n The results are then put in a ``pandas.Series`` object with one value per...
def _get_synthesizer_name(synthesizer): 'Get the name of the synthesizer function or class.\n\n If the given synthesizer is a function, return its name.\n If it is a method, return the name of the class to which\n the method belongs.\n\n Args:\n synthesizer (function or method):\n The ...
6,233,313,625,423,672,000
Get the name of the synthesizer function or class. If the given synthesizer is a function, return its name. If it is a method, return the name of the class to which the method belongs. Args: synthesizer (function or method): The synthesizer function or method. Returns: str: Name of the functi...
sdgym/benchmark.py
_get_synthesizer_name
csala/SDGym
python
def _get_synthesizer_name(synthesizer): 'Get the name of the synthesizer function or class.\n\n If the given synthesizer is a function, return its name.\n If it is a method, return the name of the class to which\n the method belongs.\n\n Args:\n synthesizer (function or method):\n The ...
def _get_synthesizers(synthesizers): 'Get the dict of synthesizers from the input value.\n\n If the input is a synthesizer or an iterable of synthesizers, get their names\n and put them on a dict.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n A synthesizer (function o...
256,732,817,812,438,270
Get the dict of synthesizers from the input value. If the input is a synthesizer or an iterable of synthesizers, get their names and put them on a dict. Args: synthesizers (function, class, list, tuple or dict): A synthesizer (function or method or class) or an iterable of synthesizers or a dict c...
sdgym/benchmark.py
_get_synthesizers
csala/SDGym
python
def _get_synthesizers(synthesizers): 'Get the dict of synthesizers from the input value.\n\n If the input is a synthesizer or an iterable of synthesizers, get their names\n and put them on a dict.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n A synthesizer (function o...
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True, leaderboard_path=LEADERBOARD_PATH, replace_existing=True): 'Compute the benchmark scores for the synthesizers and return a leaderboard.\n\n The ``synthesizers`` object can either be a single synthesizer or, an iterable of\...
-6,008,760,859,194,131,000
Compute the benchmark scores for the synthesizers and return a leaderboard. The ``synthesizers`` object can either be a single synthesizer or, an iterable of synthesizers or a dict containing synthesizer names as keys and synthesizers as values. If ``add_leaderboard`` is ``True``, append the obtained scores to the le...
sdgym/benchmark.py
benchmark
csala/SDGym
python
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True, leaderboard_path=LEADERBOARD_PATH, replace_existing=True): 'Compute the benchmark scores for the synthesizers and return a leaderboard.\n\n The ``synthesizers`` object can either be a single synthesizer or, an iterable of\...
def hello(): '\n This is a docstring\n ' print('hello')
-6,392,466,694,877,974,000
This is a docstring
tests/example.py
hello
bwohlberg/py2jn
python
def hello(): '\n \n ' print('hello')
def parse_env(config_schema, env): 'Parse the values from a given environment against a given config schema\n\n Args:\n config_schema: A dict which maps the variable name to a Schema object\n that describes the requested value.\n env: A dict which represents the value of each variable in...
2,493,724,030,623,137,300
Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment.
envpy/parser.py
parse_env
jonathanlloyd/envpy
python
def parse_env(config_schema, env): 'Parse the values from a given environment against a given config schema\n\n Args:\n config_schema: A dict which maps the variable name to a Schema object\n that describes the requested value.\n env: A dict which represents the value of each variable in...
def parse(self, key, value): 'Parse the environment value for a given key against the schema.\n\n Args:\n key: The name of the environment variable.\n value: The value to be parsed.\n ' if (value is not None): try: return self._parser(value) except...
-3,832,913,277,313,911,300
Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed.
envpy/parser.py
parse
jonathanlloyd/envpy
python
def parse(self, key, value): 'Parse the environment value for a given key against the schema.\n\n Args:\n key: The name of the environment variable.\n value: The value to be parsed.\n ' if (value is not None): try: return self._parser(value) except...
def global_scope(): '\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n fluid.global_scope(...
-2,561,556,626,074,283,000
Get the global/default scope instance. There are a lot of APIs use :code:`global_scope` as its default value, e.g., :code:`Executor.run` Examples: .. code-block:: python import paddle.fluid as fluid import numpy fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlac...
python/paddle/fluid/executor.py
global_scope
AnKingOne/Paddle
python
def global_scope(): '\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n fluid.global_scope(...
@signature_safe_contextmanager def scope_guard(scope): '\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Args:\n scope: The new global/default scope.\n\n Examples:\n .. code-block:: python\n\n i...
1,367,163,491,478,758,700
Change the global/default scope instance by Python `with` statement. All variable in runtime will assigned to the new scope. Args: scope: The new global/default scope. Examples: .. code-block:: python import paddle.fluid as fluid import numpy new_scope = fluid.Scope() with fl...
python/paddle/fluid/executor.py
scope_guard
AnKingOne/Paddle
python
@signature_safe_contextmanager def scope_guard(scope): '\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Args:\n scope: The new global/default scope.\n\n Examples:\n .. code-block:: python\n\n i...
def as_numpy(tensor): '\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n ...
-7,444,017,813,485,285,000
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information. For higher dimensional sequence data, please use LoDTensor directly. Examples: .. code-block:: python import paddle.fluid as fluid import numpy new_scope = fluid.Scope() with fluid.scope_guard(new_scope)...
python/paddle/fluid/executor.py
as_numpy
AnKingOne/Paddle
python
def as_numpy(tensor): '\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n ...
def has_feed_operators(block, feed_targets, feed_holder_name): ' Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the...
-4,258,719,829,844,028,000
Check whether the block already has feed operators. Return false if the block does not have any feed operators. If some feed operators have been prepended to the block, check that the info contained in these feed operators matches the feed_targets and feed_holder_name. Raise exception when any mismatch is found. Retur...
python/paddle/fluid/executor.py
has_feed_operators
AnKingOne/Paddle
python
def has_feed_operators(block, feed_targets, feed_holder_name): ' Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the...
def has_fetch_operators(block, fetch_targets, fetch_holder_name): ' Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators match...
-1,140,413,373,672,059,300
Check whether the block already has fetch operators. Return false if the block does not have any fetch operators. If some fetch operators have been appended to the block, check that the info contained in these fetch operators matches the fetch_targets and fetch_holder_name. Raise exception when any mismatch is found. ...
python/paddle/fluid/executor.py
has_fetch_operators
AnKingOne/Paddle
python
def has_fetch_operators(block, fetch_targets, fetch_holder_name): ' Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators match...
def _fetch_var(name, scope=None, return_numpy=True): '\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(...
-6,382,690,931,197,901,000
Fetch the value of the variable with the given name from the given scope. Args: name(str): name of the variable. Typically, only persistable variables can be found in the scope used for running the program. scope(core.Scope|None): scope object. It should be the scope where you pass to Executor....
python/paddle/fluid/executor.py
_fetch_var
AnKingOne/Paddle
python
def _fetch_var(name, scope=None, return_numpy=True): '\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(...
def _as_lodtensor(data, place): '\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n ...
-7,073,756,137,704,113,000
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information. For higher dimensional sequence data, please use LoDTensor directly. Examples: >>> import paddle.fluid as fluid >>> place = fluid.CPUPlace() >>> exe = fluid.executor(place) >>> data = np.array(size=(100, 200, 300)) >>...
python/paddle/fluid/executor.py
_as_lodtensor
AnKingOne/Paddle
python
def _as_lodtensor(data, place): '\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n ...
def close(self): '\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource\n on PServers related to the current Trainer.\n\n Examples:\n .. code-block:: python\n\n ...
-7,197,737,734,027,222,000
Close this executor. You can no longer use this executor after calling this method. For the distributed training, this method would free the resource on PServers related to the current Trainer. Examples: .. code-block:: python import paddle.fluid as fluid cpu = fluid.CPUPlace() exe = fluid.Exe...
python/paddle/fluid/executor.py
close
AnKingOne/Paddle
python
def close(self): '\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource\n on PServers related to the current Trainer.\n\n Examples:\n .. code-block:: python\n\n ...
def run(self, program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False): '\n Run program by this Executor. Feed data by feed map, fetch result by\n fetch_list. Python executor takes a program, add feed operators and\...
-8,958,766,470,868,862,000
Run program by this Executor. Feed data by feed map, fetch result by fetch_list. Python executor takes a program, add feed operators and fetch operators to this program according to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides the variables(or names) that user want to get a...
python/paddle/fluid/executor.py
run
AnKingOne/Paddle
python
def run(self, program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False): '\n Run program by this Executor. Feed data by feed map, fetch result by\n fetch_list. Python executor takes a program, add feed operators and\...
def infer_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100): '\n The document of infer_from_dataset is almost the same as\n train_from_dataset, except that in distributed training,\n push gradients will be disab...
5,420,110,943,490,376,000
The document of infer_from_dataset is almost the same as train_from_dataset, except that in distributed training, push gradients will be disabled in infer_from_dataset. infer_from_dataset() can be used for evaluation in multi-thread very easily. Args: program(Program|CompiledProgram): the program that needs to be ...
python/paddle/fluid/executor.py
infer_from_dataset
AnKingOne/Paddle
python
def infer_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100): '\n The document of infer_from_dataset is almost the same as\n train_from_dataset, except that in distributed training,\n push gradients will be disab...
def train_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100): '\n Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.\n Given a program, either a program or compiled program, train_from_dataset...
-4,721,268,134,907,001,000
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset. Given a program, either a program or compiled program, train_from_dataset will consume all data samples in dataset. Input scope can be given by users. By default, scope is global_scope(). The total number of thread run in training is `thread`...
python/paddle/fluid/executor.py
train_from_dataset
AnKingOne/Paddle
python
def train_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100): '\n Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.\n Given a program, either a program or compiled program, train_from_dataset...
def placeholder_inputs(batch_size): 'Generate placeholder variables to represent the input tensors.\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n Args:\n batch_size: The batch size will be baked ...
4,792,516,056,658,818,000
Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. Returns: images_placeholder: Imag...
c3d_model/predict_c3d_ucf101.py
placeholder_inputs
b-safwat/multi_action_recognition
python
def placeholder_inputs(batch_size): 'Generate placeholder variables to represent the input tensors.\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n Args:\n batch_size: The batch size will be baked ...
def GenerateCSRFToken(user_id, time): 'Generates a CSRF token based on a secret key, id and time.' precondition.AssertType(user_id, Text) precondition.AssertOptionalType(time, int) time = (time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()) secret = config.CONFIG.Get('AdminUI.csrf_secret_...
6,125,651,692,541,662,000
Generates a CSRF token based on a secret key, id and time.
grr/server/grr_response_server/gui/wsgiapp.py
GenerateCSRFToken
Codehardt/grr
python
def GenerateCSRFToken(user_id, time): precondition.AssertType(user_id, Text) precondition.AssertOptionalType(time, int) time = (time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()) secret = config.CONFIG.Get('AdminUI.csrf_secret_key', None) if (secret is None): raise ValueErro...
def StoreCSRFCookie(user, response): 'Decorator for WSGI handler that inserts CSRF cookie into response.' csrf_token = GenerateCSRFToken(user, None) response.set_cookie('csrftoken', csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
4,536,204,827,103,691,300
Decorator for WSGI handler that inserts CSRF cookie into response.
grr/server/grr_response_server/gui/wsgiapp.py
StoreCSRFCookie
Codehardt/grr
python
def StoreCSRFCookie(user, response): csrf_token = GenerateCSRFToken(user, None) response.set_cookie('csrftoken', csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
def ValidateCSRFTokenOrRaise(request): 'Decorator for WSGI handler that checks CSRF cookie against the request.' if (request.method in ('GET', 'HEAD')): return csrf_token = request.headers.get('X-CSRFToken', '').encode('ascii') if (not csrf_token): logging.info('Did not find headers CSRF...
-7,794,270,443,633,931,000
Decorator for WSGI handler that checks CSRF cookie against the request.
grr/server/grr_response_server/gui/wsgiapp.py
ValidateCSRFTokenOrRaise
Codehardt/grr
python
def ValidateCSRFTokenOrRaise(request): if (request.method in ('GET', 'HEAD')): return csrf_token = request.headers.get('X-CSRFToken', ).encode('ascii') if (not csrf_token): logging.info('Did not find headers CSRF token for: %s', request.path) raise werkzeug_exceptions.Forbidden(...
def LogAccessWrapper(func): 'Decorator that ensures that HTTP access is logged.' def Wrapper(request, *args, **kwargs): 'Wrapping function.' try: response = func(request, *args, **kwargs) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) except Except...
-115,557,866,535,678,200
Decorator that ensures that HTTP access is logged.
grr/server/grr_response_server/gui/wsgiapp.py
LogAccessWrapper
Codehardt/grr
python
def LogAccessWrapper(func): def Wrapper(request, *args, **kwargs): 'Wrapping function.' try: response = func(request, *args, **kwargs) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) except Exception: response = werkzeug_wrappers.Respon...
def Wrapper(request, *args, **kwargs): 'Wrapping function.' try: response = func(request, *args, **kwargs) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) except Exception: response = werkzeug_wrappers.Response('', status=500) server_logging.LOGGER.LogHttpAdminU...
-986,668,722,510,930,300
Wrapping function.
grr/server/grr_response_server/gui/wsgiapp.py
Wrapper
Codehardt/grr
python
def Wrapper(request, *args, **kwargs): try: response = func(request, *args, **kwargs) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) except Exception: response = werkzeug_wrappers.Response(, status=500) server_logging.LOGGER.LogHttpAdminUIAccess(request, respo...
def _BuildToken(self, request, execution_time): 'Build an ACLToken from the request.' token = access_control.ACLToken(username=request.user, reason=request.args.get('reason', ''), process='GRRAdminUI', expiry=(rdfvalue.RDFDatetime.Now() + execution_time)) for field in ['Remote_Addr', 'X-Forwarded-For']: ...
3,942,364,055,699,815,400
Build an ACLToken from the request.
grr/server/grr_response_server/gui/wsgiapp.py
_BuildToken
Codehardt/grr
python
def _BuildToken(self, request, execution_time): token = access_control.ACLToken(username=request.user, reason=request.args.get('reason', ), process='GRRAdminUI', expiry=(rdfvalue.RDFDatetime.Now() + execution_time)) for field in ['Remote_Addr', 'X-Forwarded-For']: remote_addr = request.headers.get(...
def _HandleHomepage(self, request): 'Renders GRR home page by rendering base.html Jinja template.' _ = request env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.CONFIG['AdminUI.template_root']), autoescape=True) create_time = psutil.Process(os.getpid()).create_time() context = {'heading...
6,814,278,485,432,112,000
Renders GRR home page by rendering base.html Jinja template.
grr/server/grr_response_server/gui/wsgiapp.py
_HandleHomepage
Codehardt/grr
python
def _HandleHomepage(self, request): _ = request env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.CONFIG['AdminUI.template_root']), autoescape=True) create_time = psutil.Process(os.getpid()).create_time() context = {'heading': config.CONFIG['AdminUI.heading'], 'report_url': config.CONF...
def _HandleApi(self, request): 'Handles API requests.' ValidateCSRFTokenOrRaise(request) response = http_api.RenderHttpResponse(request) if (('csrftoken' not in request.cookies) or (response.headers.get('X-API-Method', '') == 'GetPendingUserNotificationsCount')): StoreCSRFCookie(request.user, re...
6,756,775,622,371,802,000
Handles API requests.
grr/server/grr_response_server/gui/wsgiapp.py
_HandleApi
Codehardt/grr
python
def _HandleApi(self, request): ValidateCSRFTokenOrRaise(request) response = http_api.RenderHttpResponse(request) if (('csrftoken' not in request.cookies) or (response.headers.get('X-API-Method', ) == 'GetPendingUserNotificationsCount')): StoreCSRFCookie(request.user, response) return respon...
def _RedirectToRemoteHelp(self, path): 'Redirect to GitHub-hosted documentation.' allowed_chars = set(((string.ascii_letters + string.digits) + '._-/')) if (not (set(path) <= allowed_chars)): raise RuntimeError(('Unusual chars in path %r - possible exploit attempt.' % path)) target_path = os.pat...
-4,929,114,115,641,130,000
Redirect to GitHub-hosted documentation.
grr/server/grr_response_server/gui/wsgiapp.py
_RedirectToRemoteHelp
Codehardt/grr
python
def _RedirectToRemoteHelp(self, path): allowed_chars = set(((string.ascii_letters + string.digits) + '._-/')) if (not (set(path) <= allowed_chars)): raise RuntimeError(('Unusual chars in path %r - possible exploit attempt.' % path)) target_path = os.path.join(config.CONFIG['AdminUI.docs_locatio...
def _HandleHelp(self, request): 'Handles help requests.' help_path = request.path.split('/', 2)[(- 1)] if (not help_path): raise werkzeug_exceptions.Forbidden('Error: Invalid help path.') return self._RedirectToRemoteHelp(help_path)
-810,152,685,980,187,800
Handles help requests.
grr/server/grr_response_server/gui/wsgiapp.py
_HandleHelp
Codehardt/grr
python
def _HandleHelp(self, request): help_path = request.path.split('/', 2)[(- 1)] if (not help_path): raise werkzeug_exceptions.Forbidden('Error: Invalid help path.') return self._RedirectToRemoteHelp(help_path)
@werkzeug_wsgi.responder def __call__(self, environ, start_response): 'Dispatches a request.' request = self._BuildRequest(environ) matcher = self.routing_map.bind_to_environ(environ) try: (endpoint, _) = matcher.match(request.path, request.method) return endpoint(request) except wer...
-6,936,825,454,743,817,000
Dispatches a request.
grr/server/grr_response_server/gui/wsgiapp.py
__call__
Codehardt/grr
python
@werkzeug_wsgi.responder def __call__(self, environ, start_response): request = self._BuildRequest(environ) matcher = self.routing_map.bind_to_environ(environ) try: (endpoint, _) = matcher.match(request.path, request.method) return endpoint(request) except werkzeug_exceptions.NotFou...
def WSGIHandler(self): "Returns GRR's WSGI handler." sdm = werkzeug_wsgi.SharedDataMiddleware(self, {'/': config.CONFIG['AdminUI.document_root']}) return werkzeug_wsgi.DispatcherMiddleware(self, {'/static': sdm})
-4,133,702,679,565,647,400
Returns GRR's WSGI handler.
grr/server/grr_response_server/gui/wsgiapp.py
WSGIHandler
Codehardt/grr
python
def WSGIHandler(self): sdm = werkzeug_wsgi.SharedDataMiddleware(self, {'/': config.CONFIG['AdminUI.document_root']}) return werkzeug_wsgi.DispatcherMiddleware(self, {'/static': sdm})
def scope_vars(scope, trainable_only=False): '\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables t...
-3,037,051,232,383,622,000
Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [tf.Variable] list of variables in ...
baselines/deepq/build_graph.py
scope_vars
rwill128/baselines
python
def scope_vars(scope, trainable_only=False): '\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables t...