code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if coord.step is not None: raise NotImplementedError('no slice step support') elif coord.start is not None and coord.stop is not None: return DoubleSlice.from_slice(coord) elif coord.start is not None: xcol, xrow, col, row = cls._parse(coord.start) if xcol is not None: return StartCell(cls._cint(xcol), cls._rint(xrow)) elif col is not None: return StartCol(cls._cint(col)) return StartRow(cls._rint(row)) elif coord.stop is not None: xcol, xrow, col, row = cls._parse(coord.stop) if xcol is not None: return StopCell(cls._cint(xcol) + 1, cls._rint(xrow) + 1) elif col is not None: return StopCol(cls._cint(col) + 1) return StopRow(cls._rint(row) + 1) return cls()
def from_slice(cls, coord)
Return a value fetching callable given a slice of coordinate strings.
2.160717
2.114063
1.022068
if title not in self._titles: raise KeyError(title) return self._titles[title][0]
def find(self, title)
Return the first worksheet with the given title. Args: title(str): title/name of the worksheet to return Returns: WorkSheet: contained worksheet object Raises: KeyError: if the spreadsheet has no no worksheet with the given ``title``
4.035687
5.791496
0.69683
if title is None: return list(self._sheets) if title not in self._titles: return [] return list(self._titles[title])
def findall(self, title=None)
Return a list of worksheets with the given title. Args: title(str): title/name of the worksheets to return, or ``None`` for all Returns: list: list of contained worksheet instances (possibly empty)
3.864478
3.781441
1.021959
for s in self._sheets: s.to_csv(None, encoding, dialect, make_filename)
def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT, make_filename=export.MAKE_FILENAME)
Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``.
4.450481
6.824598
0.652124
if unique: return tools.uniqued(s.title for s in self._items) return [s.title for s in self._items]
def titles(self, unique=False)
Return a list of contained worksheet titles. Args: unique (bool): drop duplicates Returns: list: list of titles/name strings
5.077573
6.955988
0.729957
if not (isinstance(row, int) and isinstance(col, int)): raise TypeError(row, col) return self._values[row][col]
def at(self, row, col)
Return the value at the given cell position. Args: row (int): zero-based row number col (int): zero-based column number Returns: cell value Raises: TypeError: if ``row`` or ``col`` is not an ``int`` IndexError: if the position is out of range
3.443781
3.864201
0.891201
if column_major: return list(map(list, zip(*self._values))) return [row[:] for row in self._values]
def values(self, column_major=False)
Return a nested list with the worksheet values. Args: column_major (bool): as list of columns (default list of rows) Returns: list: list of lists with values
3.756261
4.005173
0.937852
if filename is None: if make_filename is None: make_filename = export.MAKE_FILENAME infos = { 'id': self._spreadsheet._id, 'title': self._spreadsheet._title, 'sheet': self._title, 'gid': self._id, 'index': self._index, 'dialect': dialect, } if isinstance(make_filename, string_types): filename = make_filename % infos else: filename = make_filename(infos) with export.open_csv(filename, 'w', encoding=encoding) as fd: export.write_csv(fd, self._values, encoding, dialect)
def to_csv(self, filename=None, encoding=export.ENCODING, dialect=export.DIALECT, make_filename=export.MAKE_FILENAME)
Dump the worksheet to a CSV file. Args: filename (str): result filename (if ``None`` use ``make_filename``) encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``.
2.948634
2.234381
1.319665
r df = export.write_dataframe(self._values, **kwargs) df.name = self.title return df
def to_frame(self, **kwargs)
r"""Return a pandas DataFrame loaded from the worksheet data. Args: \**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``) Returns: pandas.DataFrame: new ``DataFrame`` instance
17.546984
22.249298
0.788653
creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver) return cls(creds)
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False)
Return a spreadsheet collection making OAauth 2.0 credentials. Args: secrets (str): location of secrets file (default: ``%r``) storage (str): location of storage file (default: ``%r``) scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``) no_webserver (bool): URL/code prompt instead of webbrowser auth Returns: Sheets: new Sheets instance with OAauth 2.0 credentials
5.524098
8.042366
0.686875
if '/' in id_or_url: id = urls.SheetUrl.from_string(id_or_url).id else: id = id_or_url try: return self[id] except KeyError: return default
def get(self, id_or_url, default=None)
Fetch and return the spreadsheet with the given id or url. Args: id_or_url (str): unique alphanumeric id or URL of the spreadsheet Returns: New SpreadSheet instance or given default if none is found Raises: ValueError: if an URL is given from which no id could be extracted
3.183855
3.549749
0.896924
files = backend.iterfiles(self._drive, name=title) try: return next(self[id] for id, _ in files) except StopIteration: raise KeyError(title)
def find(self, title)
Fetch and return the first spreadsheet with the given title. Args: title(str): title/name of the spreadsheet to return Returns: SpreadSheet: new SpreadSheet instance Raises: KeyError: if no spreadsheet with the given ``title`` is found
7.583538
8.054119
0.941573
if title is None: return list(self) files = backend.iterfiles(self._drive, name=title) return [self[id] for id, _ in files]
def findall(self, title=None)
Fetch and return a list of spreadsheets with the given title. Args: title(str): title/name of the spreadsheets to return, or ``None`` for all Returns: list: list of new SpreadSheet instances (possibly empty)
8.073072
8.279346
0.975086
if unique: return tools.uniqued(title for _, title in self.iterfiles()) return [title for _, title in self.iterfiles()]
def titles(self, unique=False)
Return a list of all available spreadsheet titles. Args: unique (bool): drop duplicates Returns: list: list of title/name strings
6.009467
7.689212
0.781545
data = { 'name': name, 'description': description, 'type': data_source_type, 'url': url, } credentials = {} self._copy_if_defined(credentials, user=credential_user, password=credential_pass) credentials = credentials or s3_credentials self._copy_if_defined(data, is_public=is_public, is_protected=is_protected, credentials=credentials) return self._create('/data-sources', data, 'data_source')
def create(self, name, description, data_source_type, url, credential_user=None, credential_pass=None, is_public=None, is_protected=None, s3_credentials=None)
Create a Data Source.
2.237131
2.251049
0.993817
if self.version >= 2: UPDATE_FUNC = self._patch else: UPDATE_FUNC = self._update return UPDATE_FUNC('/data-sources/%s' % data_source_id, update_data)
def update(self, data_source_id, update_data)
Update a Data Source. :param dict update_data: dict that contains fields that should be updated with new values. Fields that can be updated: * name * description * type * url * is_public * is_protected * credentials - dict with the keys `user` and `password` for data source in Swift, or with the keys `accesskey`, `secretkey`, `endpoint`, `ssl`, and `bucket_in_path` for data source in S3
5.109373
6.015604
0.849353
return reduce( lambda d, k: d[k], path, d )
def getitem_by_path(d, path)
Access item in d using path. a = { 0: { 1: 'item' } } getitem_by_path(a, [0, 1]) == 'item'
3.614007
6.475816
0.558077
if d is DEFAULT: d = self if isinstance(d, list): return [v for v in (self.clean_empty(v) for v in d) if v or v == 0] elif isinstance(d, type(self)): return type(self)({k: v for k, v in ((k, self.clean_empty(v)) for k, v in d.items()) if v or v == 0}) elif isinstance(d, dict): return {k: v for k, v in ((k, self.clean_empty(v)) for k, v in d.items()) if v or v == 0} return d
def clean_empty(self, d=DEFAULT)
Returns a copy of d without empty leaves. https://stackoverflow.com/questions/27973988/python-how-to-remove-all-empty-fields-in-a-nested-dict/35263074
1.684691
1.557446
1.081701
if d is DEFAULT: d = self if isinstance(d, list): l = [v for v in (self.compress(v) for v in d)] try: return list(set(l)) except TypeError: # list contains not hashables ret = [] for i in l: if i not in ret: ret.append(i) return ret elif isinstance(d, type(self)): return type(self)({k: v for k, v in ((k, self.compress(v)) for k, v in d.items())}) elif isinstance(d, dict): return {k: v for k, v in ((k, self.compress(v)) for k, v in d.items())} return d
def compress(self, d=DEFAULT)
Returns a copy of d with compressed leaves.
2.254221
2.107525
1.069606
if to is DEFAULT: to = type(self) if d is DEFAULT: d = self if isinstance(d, list): return [v for v in (self.cast_dicts(to, v) for v in d)] elif isinstance(d, dict): return to({k: v for k, v in ((k, self.cast_dicts(to, v)) for k, v in d.items())}) return d
def cast_dicts(self, to=DEFAULT, d=DEFAULT)
Returns a copy of d with all dicts casted to the type 'to'.
2.055612
1.858657
1.105966
if a is DEFAULT: a = self for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): self.merge(b[key], a[key]) else: if type(a[key]) is list and type(b[key]) is list: a[key] += b[key] elif type(a[key]) is list and type(b[key]) is not list: a[key] += [b[key]] elif type(a[key]) is not list and type(b[key]) is list: a[key] = [a[key]] + b[key] elif type(a[key]) is not list and type(b[key]) is not list: a[key] = [a[key]] + [b[key]] else: a[key] = b[key] return a
def merge(self, b, a=DEFAULT)
Merges b into a recursively, if a is not given: merges into self. also merges lists and: * merge({a:a},{a:b}) = {a:[a,b]} * merge({a:[a]},{a:b}) = {a:[a,b]} * merge({a:a},{a:[b]}) = {a:[a,b]} * merge({a:[a]},{a:[b]}) = {a:[a,b]}
1.375367
1.401362
0.98145
data = { "name": name, "url": url } self._copy_if_defined(data, description=description, extra=extra, is_public=is_public, is_protected=is_protected) return self._create('/job-binaries', data, 'job_binary')
def create(self, name, url, description=None, extra=None, is_public=None, is_protected=None)
Create a Job Binary. :param dict extra: authentication info needed for some job binaries, containing the keys `user` and `password` for job binary in Swift or the keys `accesskey`, `secretkey`, and `endpoint` for job binary in S3
3.114176
3.304671
0.942356
resp = self.api.get('/job-binaries/%s/data' % job_binary_id) if resp.status_code != 200: self._raise_api_exception(resp) return resp.content
def get_file(self, job_binary_id)
Download a Job Binary.
3.160381
3.080362
1.025977
if self.version >= 2: UPDATE_FUNC = self._patch else: UPDATE_FUNC = self._update return UPDATE_FUNC( '/job-binaries/%s' % job_binary_id, data, 'job_binary')
def update(self, job_binary_id, data)
Update Job Binary. :param dict data: dict that contains fields that should be updated with new values. Fields that can be updated: * name * description * url * is_public * is_protected * extra - dict with the keys `user` and `password` for job binary in Swift, or with the keys `accesskey`, `secretkey`, and `endpoint` for job binary in S3
4.736733
5.696411
0.831529
for name, value in conf.items(): if value is not None: setattr(Conf, name.upper(), value)
def set(conf)
Applies a configuration to the global config object
4.325369
4.529655
0.9549
return { attr: getattr(Conf, attr) for attr in dir(Conf()) if not callable(getattr(Conf, attr)) and not attr.startswith("__") }
def get()
Gets the configuration as a dict
4.900907
3.81083
1.286047
argparser = ArgumentParser( description = description, prefix_chars = '-+' ) argparser.add_argument( '--version', dest = 'PRINT_VERSION', action = 'store_true', help = 'Print version and exit' ) add_arguments_cb(argparser) # set up plugin argument argparser plugin_argparser = argparser.add_argument_group('Plugins') plugins = {} def load_plugin_group(group): for entry_point in iter_entry_points(group = group): name = str(entry_point).split(' =',1)[0] plugin = entry_point.load() if isclass(plugin) \ and not plugin in Conf.SUPPORTED_PLUGIN_INTERFACES \ and any([ issubclass(plugin, supported_plugin_interface) for supported_plugin_interface in Conf.SUPPORTED_PLUGIN_INTERFACES ]): plugin_argparser.add_argument( '+{}'.format(name), dest = 'PLUGIN_{}'.format(name), type = str, nargs = '?', default = DEFAULT, metavar = 'args'.format(name), help = make_argparse_help_safe( call_plugin( plugin, 'help' ) ) ) # register plugin plugins[name] = plugin else: warning('Plugin not supported: {}'.format(name)) load_plugin_group(Conf.PLUGIN_GROUP_BASE) if Conf.LOAD_PLUGINS: load_plugin_group(Conf.PLUGIN_GROUP) conf = vars( argparser.parse_args([ v if i == 0 or v[0] == '+' or Conf.ARGS[i-1][0] != '+' else b32encode(v.encode()).decode() for i, v in enumerate(Conf.ARGS) ]) ) postprocess_conf_cb(conf) # apply configuration Conf.set(conf) if Conf.PRINT_VERSION: print( 'pdml2flow version {}'.format( Conf.VERSION ), file = Conf.OUT ) sys.exit(0) # initialize plugins Conf.PLUGINS = [] for conf_name, args in conf.items(): if conf_name.startswith('PLUGIN_') and args != DEFAULT: plugin_name = conf_name[7:] Conf.PLUGINS.append( # instantiate plugin plugins[plugin_name]( *split( b32decode(args.encode()).decode() if args is not None else '' ) ) )
def load(description, add_arguments_cb = lambda x: None, postprocess_conf_cb = lambda x: None)
Loads the global Conf object from command line arguments. Encode the next argument after +plugin to ensure that it does not start with a prefix_char
3.358346
3.31021
1.014542
fmt = parser._get_formatter() fmt.add_usage(parser.usage, parser._actions, parser._mutually_exclusive_groups, prefix='') return fmt.format_help().strip()
def _format_usage_without_prefix(parser)
Use private argparse APIs to get the usage string without the 'usage: ' prefix.
2.831112
2.670039
1.060326
data = {} self._copy_if_updated( data, name=name, plugin_name=plugin_name, hadoop_version=hadoop_version, flavor_id=flavor_id, description=description, volumes_per_node=volumes_per_node, volumes_size=volumes_size, node_processes=node_processes, node_configs=node_configs, floating_ip_pool=floating_ip_pool, security_groups=security_groups, auto_security_group=auto_security_group, availability_zone=availability_zone, volumes_availability_zone=volumes_availability_zone, volume_type=volume_type, image_id=image_id, is_proxy_gateway=is_proxy_gateway, volume_local_to_instance=volume_local_to_instance, use_autoconfig=use_autoconfig, shares=shares, is_public=is_public, is_protected=is_protected, volume_mount_prefix=volume_mount_prefix ) return self._update('/node-group-templates/%s' % ng_template_id, data, 'node_group_template')
def update(self, ng_template_id, name=NotUpdated, plugin_name=NotUpdated, hadoop_version=NotUpdated, flavor_id=NotUpdated, description=NotUpdated, volumes_per_node=NotUpdated, volumes_size=NotUpdated, node_processes=NotUpdated, node_configs=NotUpdated, floating_ip_pool=NotUpdated, security_groups=NotUpdated, auto_security_group=NotUpdated, availability_zone=NotUpdated, volumes_availability_zone=NotUpdated, volume_type=NotUpdated, image_id=NotUpdated, is_proxy_gateway=NotUpdated, volume_local_to_instance=NotUpdated, use_autoconfig=NotUpdated, shares=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated, volume_mount_prefix=NotUpdated)
Update a Node Group Template.
1.345173
1.33058
1.010968
data = { 'name': name, 'plugin_name': plugin_name, 'plugin_version': plugin_version, 'flavor_id': flavor_id, 'node_processes': node_processes } return self._do_create(data, description, volumes_per_node, volumes_size, node_configs, floating_ip_pool, security_groups, auto_security_group, availability_zone, volumes_availability_zone, volume_type, image_id, is_proxy_gateway, volume_local_to_instance, use_autoconfig, shares, is_public, is_protected, volume_mount_prefix, boot_from_volume, boot_volume_type, boot_volume_availability_zone, boot_volume_local_to_instance)
def create(self, name, plugin_name, plugin_version, flavor_id, description=None, volumes_per_node=None, volumes_size=None, node_processes=None, node_configs=None, floating_ip_pool=None, security_groups=None, auto_security_group=None, availability_zone=None, volumes_availability_zone=None, volume_type=None, image_id=None, is_proxy_gateway=None, volume_local_to_instance=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None, volume_mount_prefix=None, boot_from_volume=None, boot_volume_type=None, boot_volume_availability_zone=None, boot_volume_local_to_instance=None)
Create a Node Group Template.
1.356871
1.400851
0.968605
data = {} self._copy_if_updated( data, name=name, plugin_name=plugin_name, plugin_version=plugin_version, flavor_id=flavor_id, description=description, volumes_per_node=volumes_per_node, volumes_size=volumes_size, node_processes=node_processes, node_configs=node_configs, floating_ip_pool=floating_ip_pool, security_groups=security_groups, auto_security_group=auto_security_group, availability_zone=availability_zone, volumes_availability_zone=volumes_availability_zone, volume_type=volume_type, image_id=image_id, is_proxy_gateway=is_proxy_gateway, volume_local_to_instance=volume_local_to_instance, use_autoconfig=use_autoconfig, shares=shares, is_public=is_public, is_protected=is_protected, volume_mount_prefix=volume_mount_prefix, boot_from_volume=boot_from_volume, boot_volume_type=boot_volume_type, boot_volume_availability_zone=boot_volume_availability_zone, boot_volume_local_to_instance=boot_volume_local_to_instance ) return self._patch('/node-group-templates/%s' % ng_template_id, data, 'node_group_template')
def update(self, ng_template_id, name=NotUpdated, plugin_name=NotUpdated, plugin_version=NotUpdated, flavor_id=NotUpdated, description=NotUpdated, volumes_per_node=NotUpdated, volumes_size=NotUpdated, node_processes=NotUpdated, node_configs=NotUpdated, floating_ip_pool=NotUpdated, security_groups=NotUpdated, auto_security_group=NotUpdated, availability_zone=NotUpdated, volumes_availability_zone=NotUpdated, volume_type=NotUpdated, image_id=NotUpdated, is_proxy_gateway=NotUpdated, volume_local_to_instance=NotUpdated, use_autoconfig=NotUpdated, shares=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated, volume_mount_prefix=NotUpdated, boot_from_volume=NotUpdated, boot_volume_type=NotUpdated, boot_volume_availability_zone=NotUpdated, boot_volume_local_to_instance=NotUpdated)
Update a Node Group Template.
1.292683
1.277984
1.011502
desc = desc if desc else '' data = {"username": user_name, "description": desc} return self._post('/images/%s' % image_id, data)
def update_image(self, image_id, user_name, desc=None)
Create or update an Image in Image Registry.
3.94608
3.71284
1.06282
# Do not add :param list in the docstring above until this is solved: # https://github.com/sphinx-doc/sphinx/issues/2549 old_image = self.get(image_id) old_tags = frozenset(old_image.tags) new_tags = frozenset(new_tags) to_add = list(new_tags - old_tags) to_remove = list(old_tags - new_tags) add_response, remove_response = None, None if to_add: add_response = self._post('/images/%s/tag' % image_id, {'tags': to_add}, 'image') if to_remove: remove_response = self._post('/images/%s/untag' % image_id, {'tags': to_remove}, 'image') return remove_response or add_response or self.get(image_id)
def update_tags(self, image_id, new_tags)
Update an Image tags. :param new_tags: list of tags that will replace currently assigned tags
2.649098
2.73961
0.966962
parser.add_argument( "--os-data-processing-api-version", metavar="<data-processing-api-version>", default=utils.env( 'OS_DATA_PROCESSING_API_VERSION', default=DEFAULT_DATA_PROCESSING_API_VERSION), help=("Data processing API version, default=" + DEFAULT_DATA_PROCESSING_API_VERSION + ' (Env: OS_DATA_PROCESSING_API_VERSION)')) parser.add_argument( "--os-data-processing-url", default=utils.env( "OS_DATA_PROCESSING_URL"), help=("Data processing API URL, " "(Env: OS_DATA_PROCESSING_API_URL)")) return parser
def build_option_parser(parser)
Hook to add global options.
1.963923
1.949737
1.007276
data = { 'name': name, 'plugin_name': plugin_name, 'hadoop_version': hadoop_version, } return self._do_create(data, description, cluster_configs, node_groups, anti_affinity, net_id, default_image_id, use_autoconfig, shares, is_public, is_protected, domain_name)
def create(self, name, plugin_name, hadoop_version, description=None, cluster_configs=None, node_groups=None, anti_affinity=None, net_id=None, default_image_id=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None, domain_name=None)
Create a Cluster Template.
1.63844
1.721439
0.951785
data = {} self._copy_if_updated(data, name=name, plugin_name=plugin_name, plugin_version=plugin_version, description=description, cluster_configs=cluster_configs, node_groups=node_groups, anti_affinity=anti_affinity, neutron_management_network=net_id, default_image_id=default_image_id, use_autoconfig=use_autoconfig, shares=shares, is_public=is_public, is_protected=is_protected, domain_name=domain_name) return self._patch('/cluster-templates/%s' % cluster_template_id, data, 'cluster_template')
def update(self, cluster_template_id, name=NotUpdated, plugin_name=NotUpdated, plugin_version=NotUpdated, description=NotUpdated, cluster_configs=NotUpdated, node_groups=NotUpdated, anti_affinity=NotUpdated, net_id=NotUpdated, default_image_id=NotUpdated, use_autoconfig=NotUpdated, shares=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated, domain_name=NotUpdated)
Update a Cluster Template.
1.622629
1.645172
0.986297
url = ('/clusters/%(cluster_id)s?%(params)s' % {"cluster_id": cluster_id, "params": parse.urlencode({"show_progress": show_progress})}) return self._get(url, 'cluster')
def get(self, cluster_id, show_progress=False)
Get information about a Cluster.
3.298888
3.066595
1.07575
data = {} self._copy_if_updated(data, name=name, description=description, is_public=is_public, is_protected=is_protected, shares=shares) return self._patch('/clusters/%s' % cluster_id, data)
def update(self, cluster_id, name=NotUpdated, description=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated, shares=NotUpdated)
Update a Cluster.
2.437259
2.458746
0.991261
data = {'verification': {'status': status}} return self._patch("/clusters/%s" % cluster_id, data)
def verification_update(self, cluster_id, status)
Start a verification for a Cluster.
4.035614
3.888997
1.037701
data = { 'name': name, 'plugin_name': plugin_name, 'plugin_version': plugin_version, } return self._do_create(data, cluster_template_id, default_image_id, is_transient, description, cluster_configs, node_groups, user_keypair_id, anti_affinity, net_id, count, use_autoconfig, shares, is_public, is_protected, api_ver=2)
def create(self, name, plugin_name, plugin_version, cluster_template_id=None, default_image_id=None, is_transient=None, description=None, cluster_configs=None, node_groups=None, user_keypair_id=None, anti_affinity=None, net_id=None, count=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None)
Launch a Cluster.
1.728167
1.840298
0.939069
commands = {} setup_file = os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')), 'setup.cfg') for line in open(setup_file, 'r'): for cl in classes: if cl in line: commands[cl] = line.split(' = ')[0].strip().replace('_', ' ') return commands
def _get_command(classes)
Associates each command class with command depending on setup.cfg
2.900124
2.559907
1.132902
json_field_or_function = getattr(response, 'json', None) if callable(json_field_or_function): return response.json() else: return jsonutils.loads(response.content)
def get_json(response)
Provide backward compatibility with old versions of requests library.
4.116695
3.322224
1.239138
return self._update('/job-binary-internals/%s' % urlparse.quote(name.encode('utf-8')), data, 'job_binary_internal', dump_json=False)
def create(self, name, data)
Create a Job Binary Internal. :param str data: raw data of script text
10.103166
9.295247
1.086917
data = {} self._copy_if_updated(data, name=name, is_public=is_public, is_protected=is_protected) return self._patch('/job-binary-internals/%s' % job_binary_id, data)
def update(self, job_binary_id, name=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated)
Update a Job Binary Internal.
3.220246
2.96811
1.084948
for fn in (boolify, int, float): try: return fn(string) except ValueError: pass return string
def autoconvert(string)
Try to convert variables into datatypes.
4.659544
5.547068
0.840001
try: getattr(plugin, f) except AttributeError: return None if kwargs: getattr(plugin, f)( *args, **kwargs ) else: return getattr(plugin, f)( *args )
def call_plugin(plugin, f, *args, **kwargs)
Calls function f from plugin, returns None if plugin does not implement f.
3.163886
2.883931
1.097074
url = "/jobs/%s/execute" % job_id data = { "cluster_id": cluster_id, } self._copy_if_defined(data, input_id=input_id, output_id=output_id, job_configs=configs, interface=interface, is_public=is_public, is_protected=is_protected) return self._create(url, data, 'job_execution')
def create(self, job_id, cluster_id, input_id=None, output_id=None, configs=None, interface=None, is_public=None, is_protected=None)
Launch a Job.
2.431196
2.410787
1.008466
data = {} self._copy_if_updated(data, is_public=is_public, is_protected=is_protected) return self._patch('/job-executions/%s' % obj_id, data)
def update(self, obj_id, is_public=NotUpdated, is_protected=NotUpdated)
Update a Job Execution.
4.55797
3.284139
1.387874
text = str(super().visitTerminal(ctx)) quotes = ["'", '"'] if not (text[0] in quotes and text[-1] in quotes): text = text.lower() return Terminal.from_text(text, ctx)
def visitTerminal(self, ctx)
Converts case insensitive keywords and identifiers to lowercase Identifiers in quotes are not lowercased even though there is case sensitivity in quotes for identifiers, to prevent lowercasing quoted values.
4.819185
4.048171
1.19046
query = base.get_query_string(search_opts) return self._list('/plugins%s' % query, 'plugins')
def list(self, search_opts=None)
Get a list of Plugins.
5.260734
3.442747
1.528063
resp = self.api.post('/plugins/%s/%s/convert-config/%s' % (plugin_name, hadoop_version, urlparse.quote(template_name)), data=filecontent) if resp.status_code != 202: raise RuntimeError('Failed to upload template file for plugin "%s"' ' and version "%s"' % (plugin_name, hadoop_version)) else: return base.get_json(resp)['cluster_template']
def convert_to_cluster_template(self, plugin_name, hadoop_version, template_name, filecontent)
Convert to cluster template Create Cluster Template directly, avoiding Cluster Template mechanism.
3.435166
3.74104
0.918238
data = { 'name': name, 'type': type } self._copy_if_defined(data, description=description, mains=mains, libs=libs, interface=interface, is_public=is_public, is_protected=is_protected) return self._create('/jobs', data, 'job')
def create(self, name, type, mains=None, libs=None, description=None, interface=None, is_public=None, is_protected=None)
Create a Job.
2.55478
2.394829
1.06679
query = base.get_query_string(search_opts, limit=limit, marker=marker, sort_by=sort_by, reverse=reverse) url = "/jobs%s" % query return self._page(url, 'jobs', limit)
def list(self, search_opts=None, limit=None, marker=None, sort_by=None, reverse=None)
Get a list of Jobs.
3.199689
2.831036
1.130219
data = {} self._copy_if_updated(data, name=name, description=description, is_public=is_public, is_protected=is_protected) return self._patch('/jobs/%s' % job_id, data)
def update(self, job_id, name=NotUpdated, description=NotUpdated, is_public=NotUpdated, is_protected=NotUpdated)
Update a Job.
2.675376
2.672319
1.001144
# have to do some stupid f/Decimal/str stuff to (a) ensure we get as much # decimal places as the user already specified and (b) to ensure we don't # get e-5 stuff return "{0:f},{1:f}".format(Decimal(str(lat)), Decimal(str(lng)))
def _query_for_reverse_geocoding(lat, lng)
Given a lat & lng, what's the string search query. If the API changes, change this function. Only for internal use.
12.69206
13.539177
0.937432
if isinstance(input_value, collections.Mapping): if len(input_value) == 2 and sorted(input_value.keys()) == ['lat', 'lng']: # This dict has only 2 keys 'lat' & 'lon' return {'lat': float_if_float(input_value["lat"]), 'lng': float_if_float(input_value["lng"])} else: return dict((key, floatify_latlng(value)) for key, value in input_value.items()) elif isinstance(input_value, collections.MutableSequence): return [floatify_latlng(x) for x in input_value] else: return input_value
def floatify_latlng(input_value)
Work around a JSON dict with string, not float, lat/lngs. Given anything (list/dict/etc) it will return that thing again, *but* any dict (at any level) that has only 2 elements lat & lng, will be replaced with the lat & lng turned into floats. If the API returns the lat/lng as strings, and not numbers, then this function will 'clean them up' to be floats.
2.221924
2.156483
1.030346
if six.PY2: # py3 doesn't have unicode() function, and instead we check the text_type later try: query = unicode(query) except UnicodeDecodeError: raise InvalidInputError(bad_value=query) if not isinstance(query, six.text_type): raise InvalidInputError(bad_value=query) data = { 'q': query, 'key': self.key } # Add user parameters data.update(kwargs) url = self.url response = requests.get(url, params=data) if (response.status_code == 402 or response.status_code == 429): # Rate limit exceeded reset_time = datetime.utcfromtimestamp(response.json()['rate']['reset']) raise RateLimitExceededError(reset_to=int(response.json()['rate']['limit']), reset_time=reset_time) elif response.status_code == 500: raise UnknownError("500 status code from API") try: response_json = response.json() except ValueError: raise UnknownError("Non-JSON result from server") if 'results' not in response_json: raise UnknownError("JSON from API doesn't have a 'results' key") return floatify_latlng(response_json['results'])
def geocode(self, query, **kwargs)
Given a string to search for, return the results from OpenCage's Geocoder. :param string query: String to search for :returns: Dict results :raises InvalidInputError: if the query string is not a unicode string :raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again :raises UnknownError: if something goes wrong with the OpenCage API
3.350262
3.174498
1.055368
return self.geocode(_query_for_reverse_geocoding(lat, lng), **kwargs)
def reverse_geocode(self, lat, lng, **kwargs)
Given a latitude & longitude, return an address for that point from OpenCage's Geocoder. :param lat: Latitude :param lng: Longitude :return: Results from OpenCageData :rtype: dict :raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again :raises UnknownError: if something goes wrong with the OpenCage API
7.532845
14.031994
0.536834
''' Get a new connection from the pool. This will return an existing connection, if one is available in the pool, or create a new connection. .. warning:: If the pool was created with `maxsize` and `block=True`, this method may block until a connection is available in the pool. ''' self._condition.acquire() try: # Wait for a connection if there is an upper bound to the pool. if self._maxsize is not None and self._block: while not self._pool and self._nconnections == self._maxsize: self._condition.wait(timeout=None) # block indefinitely # Check the pool for a non-stale connection. while self._pool: pooledconn = self._pool.pop(0) # get least recently used connection if self._idlettl is not None and (pooledconn.released + self._idlettl) < time.time(): pooledconn.connection.close() self._nconnections -= 1 else: return pooledconn.connection connection = self._dbapi2.connect(*(), **self._connection_args.copy()) self._nconnections += 1 return connection finally: self._condition.release()
def acquire(self)
Get a new connection from the pool. This will return an existing connection, if one is available in the pool, or create a new connection. .. warning:: If the pool was created with `maxsize` and `block=True`, this method may block until a connection is available in the pool.
4.107451
2.96418
1.385696
''' Return a connection back to the pool. Prior to release, :py:meth:`ctds.Connection.rollback()` is called to rollback any pending transaction. .. note:: This must be called once for every successful call to :py:meth:`.acquire()`. :param connection: The connection object returned by :py:meth:`.acquire()`. ''' try: # Rollback the existing connection, closing on failure. connection.rollback() except self._dbapi2.Error: self._close(connection) return self._condition.acquire() try: if self._maxsize is None or self._maxsize > len(self._pool): self._pool.append(PooledConnection(connection, time.time())) self._condition.notify() else: self._close(connection) finally: self._condition.release()
def release(self, connection)
Return a connection back to the pool. Prior to release, :py:meth:`ctds.Connection.rollback()` is called to rollback any pending transaction. .. note:: This must be called once for every successful call to :py:meth:`.acquire()`. :param connection: The connection object returned by :py:meth:`.acquire()`.
4.726261
2.463603
1.918434
''' Release all connections contained in the pool. .. note:: This should be called to cleanly shutdown the pool, i.e. on process exit. ''' self._condition.acquire() try: if self._nconnections != len(self._pool): warnings.warn('finalize() called with unreleased connections', RuntimeWarning, 2) while self._pool: self._close(self._pool.pop().connection) self._nconnections = 0 finally: self._condition.release()
def finalize(self)
Release all connections contained in the pool. .. note:: This should be called to cleanly shutdown the pool, i.e. on process exit.
5.159991
2.908761
1.773948
for k, v in merge_dct.items(): if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)): _dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k]
def _dict_merge(dct, merge_dct)
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 Arguments: dct: dict onto which the merge is executed merge_dct: dct merged into dct
1.433203
1.497966
0.956766
if method and schema.get('readOnly', False): return _READONLY_PROPERTY # allOf: Must be valid against all of the subschemas if 'allOf' in schema: schema_ = copy.deepcopy(schema['allOf'][0]) for x in schema['allOf'][1:]: _dict_merge(schema_, x) return _parse_schema(schema_, method) # anyOf: Must be valid against any of the subschemas # TODO(stephenfin): Handle anyOf # oneOf: Must be valid against exactly one of the subschemas if 'oneOf' in schema: # we only show the first one since we can't show everything return _parse_schema(schema['oneOf'][0], method) if 'enum' in schema: # we only show the first one since we can't show everything return schema['enum'][0] schema_type = schema.get('type', 'object') if schema_type == 'array': # special case oneOf so that we can show examples for all possible # combinations if 'oneOf' in schema['items']: return [ _parse_schema(x, method) for x in schema['items']['oneOf']] return [_parse_schema(schema['items'], method)] if schema_type == 'object': if method and all(v.get('readOnly', False) for v in schema['properties'].values()): return _READONLY_PROPERTY results = [] for name, prop in schema.get('properties', {}).items(): result = _parse_schema(prop, method) if result != _READONLY_PROPERTY: results.append((name, result)) return collections.OrderedDict(results) if (schema_type, schema.get('format')) in _TYPE_MAPPING: return _TYPE_MAPPING[(schema_type, schema.get('format'))] return _TYPE_MAPPING[(schema_type, None)]
def _parse_schema(schema, method)
Convert a Schema Object to a Python object. Args: schema: An ``OrderedDict`` representing the schema object.
2.579024
2.628465
0.98119
indent = ' ' extra_indent = indent * nb_indent if method is not None: method = method.upper() else: try: # one of possible values for status might be 'default'. # in the case, just fallback to '-' status_text = http_status_codes[int(status)] except (ValueError, KeyError): status_text = '-' for content_type, content in media_type_objects.items(): examples = content.get('examples') example = content.get('example') if examples is None: examples = {} if not example: if content_type != 'application/json': LOG.info('skipping non-JSON example generation.') continue example = _parse_schema(content['schema'], method=method) if method is None: examples['Example response'] = { 'value': example, } else: examples['Example request'] = { 'value': example, } for example in examples.values(): if not isinstance(example['value'], six.string_types): example['value'] = json.dumps( example['value'], indent=4, separators=(',', ': ')) for example_name, example in examples.items(): if 'summary' in example: example_title = '{example_name} - {example[summary]}'.format( **locals()) else: example_title = example_name yield '' yield '{extra_indent}**{example_title}:**'.format(**locals()) yield '' yield '{extra_indent}.. sourcecode:: http'.format(**locals()) yield '' # Print http request example if method: yield '{extra_indent}{indent}{method} {endpoint} HTTP/1.1' \ .format(**locals()) yield '{extra_indent}{indent}Host: example.com' \ .format(**locals()) yield '{extra_indent}{indent}Content-Type: {content_type}' \ .format(**locals()) # Print http response example else: yield '{extra_indent}{indent}HTTP/1.1 {status} {status_text}' \ .format(**locals()) yield '{extra_indent}{indent}Content-Type: {content_type}' \ .format(**locals()) yield '' for example_line in example['value'].splitlines(): yield '{extra_indent}{indent}{example_line}'.format(**locals()) yield ''
def _example(media_type_objects, method=None, endpoint=None, status=None, nb_indent=0)
Format examples in `Media Type Object` openapi v3 to HTTP request or HTTP response example. If method and endpoint is provided, this fonction prints a request example else status should be provided to print a response example. Arguments: media_type_objects (Dict[str, Dict]): Dict containing Media Type Objects. method: The HTTP method to use in example. endpoint: The HTTP route to use in example. status: The HTTP status to use in example.
2.548114
2.626427
0.970183
resolver = jsonschema.RefResolver(uri, spec) def _do_resolve(node): if isinstance(node, collections.Mapping) and '$ref' in node: with resolver.resolving(node['$ref']) as resolved: return resolved elif isinstance(node, collections.Mapping): for k, v in node.items(): node[k] = _do_resolve(v) elif isinstance(node, (list, tuple)): for i in range(len(node)): node[i] = _do_resolve(node[i]) return node return _do_resolve(spec)
def _resolve_refs(uri, spec)
Resolve JSON references in a given dictionary. OpenAPI spec may contain JSON references to its nodes or external sources, so any attempt to rely that there's some expected attribute in the spec may fail. So we need to resolve JSON references before we use it (i.e. replace with referenced object). For details see: https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-02 The input spec is modified in-place despite being returned from the function.
1.843155
2.016328
0.914115
myList = list() myList.append(int(doubleValue & 0x0000FFFF)) #Append Least Significant Word myList.append(int((doubleValue & 0xFFFF0000)>>16)) #Append Most Significant Word return myList
def convert_double_to_two_registers(doubleValue)
Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers doubleValue: Value to be converted return: 16 Bit Register values int[]
4.326924
3.757234
1.151625
myList = list() s = bytearray(struct.pack('<f', floatValue) ) #little endian myList.append(s[0] | (s[1]<<8)) #Append Least Significant Word myList.append(s[2] | (s[3]<<8)) #Append Most Significant Word return myList
def convert_float_to_two_registers(floatValue)
Convert 32 Bit real Value to two 16 Bit Value to send as Modbus Registers floatValue: Value to be converted return: 16 Bit Register values int[]
4.419186
4.321434
1.02262
b = bytearray(4) b [0] = registers[0] & 0xff b [1] = (registers[0] & 0xff00)>>8 b [2] = (registers[1] & 0xff) b [3] = (registers[1] & 0xff00)>>8 returnValue = struct.unpack('<f', b) #little Endian return returnValue
def convert_registers_to_float(registers)
Convert two 16 Bit Registers to 32 Bit real value - Used to receive float values from Modbus (Modbus Registers are 16 Bit long) registers: 16 Bit Registers return: 32 bit value real
2.780149
2.746413
1.012284
if (self.__ser is not None): serial = importlib.import_module("serial") if self.__stopbits == 0: self.__ser.stopbits = serial.STOPBITS_ONE elif self.__stopbits == 1: self.__ser.stopbits = serial.STOPBITS_TWO elif self.__stopbits == 2: self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE if self.__parity == 0: self.__ser.parity = serial.PARITY_EVEN elif self.__parity == 1: self.__ser.parity = serial.PARITY_ODD elif self.__parity == 2: self.__ser.parity = serial.PARITY_NONE self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0) self.__ser.writeTimeout = self.__timeout #print (self.ser) if (self.__tcpClientSocket is not None): self.__tcpClientSocket.settimeout(5) self.__tcpClientSocket.connect((self.__ipAddress, self.__port)) self.__connected = True self.__thread = threading.Thread(target=self.__listen, args=()) self.__thread.start()
def connect(self)
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
2.149854
2.056387
1.045452
if (self.__ser is not None): self.__ser.close() if (self.__tcpClientSocket is not None): self.__stoplistening = True self.__tcpClientSocket.shutdown(socket.SHUT_RDWR) self.__tcpClientSocket.close() self.__connected = False
def close(self)
Closes Serial port, or TCP-Socket connection
3.266548
2.707277
1.206581
# Specific service group requested if service_group is not None: if service_group not in EFConfig.SERVICE_GROUPS: raise RuntimeError("service registry: {} doesn't have '{}' section listed in EFConfig".format( self._service_registry_file, service_group)) else: return self.service_registry_json[service_group] # Specific service group not requested - flatten and return all service records else: result = dict() for service_group in EFConfig.SERVICE_GROUPS: result.update(self.service_registry_json[service_group]) return result
def services(self, service_group=None)
Args: service_group: optional name of service group Returns: if service_group is omitted or None, flattened dict of all service records in the service registry if service_group is present, dict of service records in that group
4.87066
4.221815
1.153689
if service_group is not None: if service_group not in EFConfig.SERVICE_GROUPS: raise RuntimeError("service registry: {} doesn't have '{}' section listed in EFConfig".format( self._service_registry_file, service_group)) return self.service_registry_json[service_group].iteritems() else: return self.services().iteritems()
def iter_services(self, service_group=None)
Args: service_group: optional name of service group Returns: if service_group is omitted or None, an Iterator over all flattened service records in the service registry if service_group is present, an Iterator over all service records in that group
7.752981
6.992028
1.108831
service_record = self.service_record(service_name) if service_record is None: raise RuntimeError("service registry doesn't have service: {}".format(service_name)) # Return empty list if service has no "environments" section if not (service_record.has_key("environments")): return [] # Otherwise gather up the envs service_record_envs = service_record["environments"] result = [] for service_env in service_record_envs: if service_env not in EFConfig.PROTECTED_ENVS and service_env in EFConfig.EPHEMERAL_ENVS: result.extend((lambda env=service_env: [env + str(x) for x in range(EFConfig.EPHEMERAL_ENVS[env])])()) else: result.append(service_env) return result
def valid_envs(self, service_name)
Args: service_name: the name of the service in the service registry Returns: a list of strings - all the valid environments for 'service' Raises: RuntimeError if the service wasn't found
3.945972
3.726597
1.058867
if not self.services().has_key(service_name): return None return self.services()[service_name]
def service_record(self, service_name)
Args: service_name: the name of the service in the service registry Returns: the entire service record from the service registry or None if the record was not found
3.453815
3.614364
0.95558
for group in EFConfig.SERVICE_GROUPS: if self.services(group).has_key(service_name): return group return None
def service_group(self, service_name)
Args: service_name: the name of the service in the service registry Returns: the name of the group the service is in, or None of the service was not found
7.711739
7.557097
1.020463
if not self.services()[service_name].has_key("region"): return EFConfig.DEFAULT_REGION else: return self.services()[service_name]["region"]
def service_region(self, service_name)
Args: service_name: the name of the service in the service registry Returns: the region the service is in, or EFConfig.DEFAULT_REGION if the region was not found
5.349444
3.055806
1.750584
if checkpoint_path is None: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(checkpoint_path)) estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None) # Create the estimator. estimator = tf.estimator.Estimator(estimator_spec, model_dir=checkpoint_path, params=hyper_params) return estimator
def create_prediction_estimator(hyper_params, model, checkpoint_path=None)
Create an estimator for prediction purpose only. :param hyper_params: The hyper params file. :param model: The keras model. :param checkpoint_path: (Optional) Path to the specific checkpoint to use. :return:
3.22769
3.437268
0.939028
def class_rebuilder(cls): class EFPlugin(cls): def __init__(self, context, clients): self.service = service_name self.context = context self.clients = clients self.oInstance = cls() def __getattribute__(self, s): try: x = super(EFPlugin, self).__getattribute__(s) except AttributeError: pass else: return x return self.oInstance.__getattribute__(s) return EFPlugin return class_rebuilder
def ef_plugin(service_name)
Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code. Args: service_name (str): The name of the service being extended. Example: @ef_plugin('ef-generate') class NewRelicPlugin(object): def run(self): exec_code()
3.891577
4.320184
0.900789
def print_if_verbose(message): if context_obj.verbose: print(message) service_name = os.path.basename(sys.argv[0]).replace(".py", "") try: import plugins except ImportError: print_if_verbose("no plugins detected.") return else: for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__): if plugin_ispkg: plugin_package = importlib.import_module("plugins.{}".format(plugin_name)) for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__): plugin_module = importlib.import_module("plugins.{}.{}".format(plugin_name, modname)) for name, obj in inspect.getmembers(plugin_module): if inspect.isclass(obj) and obj.__name__ == "EFPlugin": plugin_class = getattr(plugin_module, name) plugin_instance = plugin_class(context=context_obj, clients=boto3_clients) if plugin_instance.service == service_name: print_if_verbose("plugin '{}' loaded".format(plugin_name)) if not context_obj.commit: print_if_verbose("dryrun: skipping plugin execution.") else: try: plugin_instance.run() except AttributeError: print("error executing plugin '{}'".format(modname))
def run_plugins(context_obj, boto3_clients)
Executes all loaded plugins designated for the service calling the function. Args: context_obj (obj:EFContext): The EFContext object created by the service. boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
2.469487
2.361377
1.045782
return img[start_y:start_y + h, start_x:start_x + w, :].copy()
def crop(img, start_y, start_x, h, w)
Crop an image given the top left corner. :param img: The image :param start_y: The top left corner y coord :param start_x: The top left corner x coord :param h: The result height :param w: The result width :return: The cropped image.
2.131152
2.966486
0.71841
h, w = target_height, target_width max_h, max_w, c = img.shape # crop img = crop_center(img, min(max_h, h), min(max_w, w)) # pad padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype) padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img return padded_img
def resize_image_with_crop_or_pad(img, target_height, target_width)
Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either cropping the image or padding it with zeros. NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right) :param img: Numpy array representing the image. :param target_height: Target height. :param target_width: Target width. :return: The cropped and padded image.
2.168764
2.298622
0.943506
if w <= 0 or h <= 0: return 0,0 width_is_longer = w >= h side_long, side_short = (w,h) if width_is_longer else (h,w) # since the solutions for angle, -angle and 180-angle are all the same, # if suffices to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.*sin_a*cos_a*side_long or abs(sin_a-cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5*side_short wr,hr = (x/sin_a,x/cos_a) if width_is_longer else (x/cos_a,x/sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a*cos_a - sin_a*sin_a wr,hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a return wr,hr
def _rotatedRectWithMaxArea(w, h, angle)
Given a rectangle of size wxh that has been rotated by 'angle' (in radians), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. Answer from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
1.852882
1.571882
1.178767
h, w, _ = img.shape img = scipy.ndimage.interpolation.rotate(img, angle) w, h = _rotatedRectWithMaxArea(w, h, math.radians(angle)) return crop_center(img, int(h), int(w))
def rotate_img_and_crop(img, angle)
Rotate an image and then crop it so that there is no black area. :param img: The image to rotate. :param angle: The rotation angle in degrees. :return: The rotated and cropped image.
4.224468
4.602922
0.91778
s1 = string_a.strip().splitlines() s2 = string_b.strip().splitlines() diffs = unified_diff(s2, s1, fromfile='deployed', tofile='local', lineterm='') return '\n'.join(diffs)
def diff_string_templates(string_a, string_b)
Determine the diff of two strings. Return an empty string if the strings are identical, and the diff output string if they are not.
3.125587
3.075147
1.016402
cmd = 'cd {} && ef-cf {} {} --devel --verbose'.format(repo_root, template_file, environment) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: stderr = indentify('\n{}'.format(stderr)) stdout = indentify('\n{}'.format(stdout)) raise Exception('Service: `{}`, Env: `{}`, Msg: `{}{}`' .format(service_name, environment, stderr, stdout)) logger.debug('Rendered template for `%s` in `%s`', template_file, environment) r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL) return jsonify(json.loads(r.group(1)))
def render_local_template(service_name, environment, repo_root, template_file)
Render a given service's template for a given environment and return it
3.67766
3.626489
1.01411
stack_name = get_stack_name(environment, service_name) logger.debug('Fetching template for `%s`', stack_name) result = cf_client.get_template(StackName=stack_name) return jsonify(result['TemplateBody'])
def fetch_current_cloudformation_template(service_name, environment, cf_client)
Fetch the currently-deployed template for the given service in the given environment and return it.
2.900817
2.925718
0.991489
global ret_code logger.info('Investigating textual diff for `%s`:`%s` in environment `%s`', service['type'], service_name, environment) try: local_template = render_local_template(service_name, environment, repo_root, service['template_file']) current_template = fetch_current_cloudformation_template( service_name, environment, cf_client) except Exception as e: ret_code = 2 logger.error(e) return ret = diff_string_templates(local_template, current_template) if not ret: logger.info('Deployed service `%s` in environment `%s` matches ' 'the local template.', service_name, environment) else: ret_code = 1 logger.error('Service `%s` in environment `%s` differs from ' 'the local template.', service_name, environment) logger.info('Change details:\n %s', indentify(ret))
def diff_sevice_by_text(service_name, service, environment, cf_client, repo_root)
Render the local template and compare it to the template that was last applied in the target environment.
3.620631
3.46279
1.045582
global ret_code logger.info('Investigating changeset for `%s`:`%s` in environment `%s`', service['type'], service_name, environment) delete_any_existing_changesets(cf_client, service_name, environment) try: changeset = generate_changeset(service_name, environment, repo_root, service['template_file']) except Exception as e: ret_code = 2 logger.error(e) return wait_for_changeset_creation(cf_client, changeset['Id'], changeset['StackId']) logger.info('Created Changeset ID: `%s`', changeset['Id']) desc = cf_client.describe_change_set( ChangeSetName=changeset['Id'], StackName=changeset['StackId']) cf_client.delete_change_set( ChangeSetName=changeset['Id'], StackName=changeset['StackId']) if changeset_is_empty(desc): logger.info('Deployed service `%s` in environment `%s` matches ' 'the local template.', service_name, environment) else: ret_code = 1 logger.error('Service `%s` in environment `%s` differs from ' 'the local template.', service_name, environment) details = jsonify(desc['Changes']) logger.info('Change details:\n %s', indentify(details))
def diff_sevice_by_changeset(service_name, service, environment, cf_client, repo_root)
If an ef-cf call fails, the error will be logged, the retcode set to 2, but the function will run to completion and return the list of non-error results.
3.224478
3.199561
1.007788
region = service_registry.service_region(service_name) if whereami() == 'ec2': profile = None else: profile = get_account_alias(environment_name) clients = create_aws_clients(region, profile, 'cloudformation') return clients['cloudformation']
def get_cloudformation_client(service_name, environment_name)
Given a service name and an environment name, return a boto CloudFormation client object.
5.983041
5.985887
0.999525
for service_name, service in services.iteritems(): for env_category in service['environments']: if env_category not in get_env_categories(envs): logger.debug('Skipping not-included environment `%s` for service `%s`', env_category, service_name) continue environment = generate_test_environment_name(env_category) cf_client = get_cloudformation_client(service_name, environment) func(service_name, service, environment, cf_client, repo_root)
def evaluate_service_changes(services, envs, repo_root, func)
Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped.
3.924307
4.251355
0.923072
# If this is a subservice, use the parent service's template service_name = service_name.split('.')[0] if service_name in template_files: return template_files[service_name] return None
def get_matching_service_template_file(service_name, template_files)
Return the template file that goes with the given service name, or return None if there's no match. Subservices return the parent service's file.
3.462314
2.772177
1.248951
with open(registry) as fr: parsed_registry = json.load(fr) services = {} for type, type_services in parsed_registry.iteritems(): for name, service in type_services.iteritems(): if name in services: logger.warning("Template name appears twice, ignoring later items: `%s`", name) continue template_file = get_matching_service_template_file(name, template_files) if not template_file: if warn_missing_files: logger.warning("No template file for `%s` (%s) `%s`", type, service['type'], name) continue services[name] = { 'type': type, 'template_file': template_file, 'environments': service['environments'] } return services
def get_dict_registry_services(registry, template_files, warn_missing_files=True)
Return a dict mapping service name to a dict containing the service's type ('fixtures', 'platform_services', 'application_services', 'internal_services'), the template file's absolute path, and a list of environments to which the service is intended to deploy. Service names that appear twice in the output list will emit a warning and ignore the latter records. Services which have no template file will not appear in the returned dict. If the `warn_missing_files` boolean is True these files will emit a warning.
3.058358
2.984477
1.024755
template_files = {} cf_dir = os.path.join(search_dir, 'cloudformation') for type in os.listdir(cf_dir): template_dir = os.path.join(cf_dir, type, 'templates') for x in os.listdir(template_dir): name = os.path.splitext(x)[0] template_files[name] = os.path.join(template_dir, x) return template_files
def scan_dir_for_template_files(search_dir)
Return a map of "likely service/template name" to "template file". This includes all the template files in fixtures and in services.
2.187738
2.108498
1.037581
alphabet = string.ascii_letters + string.digits random_bytes = os.urandom(length) indices = [int(len(alphabet) * (ord(byte) / 256.0)) for byte in random_bytes] return "".join([alphabet[index] for index in indices])
def generate_secret(length=32)
Generate a random secret consisting of mixed-case letters and numbers Args: length (int): Length of the generated password Returns: a randomly generated secret string Raises: None
3.234962
3.185519
1.015521
changed = False with open(file_path) as json_file: data = json.load(json_file, object_pairs_hook=OrderedDict) try: for key, value in data["params"][environment].items(): if pattern in key: if "aws:kms:decrypt" in value: print("Found match, key {} but value is encrypted already; skipping...".format(key)) else: print("Found match, encrypting key {}".format(key)) encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value) data["params"][environment][key] = format_secret(encrypted_password) changed = True except KeyError: ef_utils.fail("Error env: {} does not exist in parameters file".format(environment)) if changed: with open(file_path, "w") as encrypted_file: json.dump(data, encrypted_file, indent=2, separators=(',', ': ')) # Writing new line here so it conforms to WG14 N1256 5.1.1.1 (so github doesn't complain) encrypted_file.write("\n")
def generate_secret_file(file_path, pattern, service, environment, clients)
Generate a parameter files with it's secrets encrypted in KMS Args: file_path (string): Path to the parameter file to be encrypted pattern (string): Pattern to do fuzzy string matching service (string): Service to use KMS key to encrypt file environment (string): Environment to encrypt values clients (dict): KMS AWS client that has been instantiated Returns: None Raises: IOError: If the file does not exist
4.690775
4.557752
1.029186
parser = argparse.ArgumentParser() parser.add_argument("service", help="name of service password is being generated for") parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST)) group = parser.add_mutually_exclusive_group() group.add_argument("--decrypt", help="encrypted string to be decrypted", default="") group.add_argument("--plaintext", help="secret to be encrypted rather than a randomly generated one", default="") group.add_argument("--secret_file", help="json file containing secrets to be encrypted", default="") parser.add_argument("--match", help="used in conjunction with --secret_file to match against keys to be encrypted", default="") parser.add_argument("--length", help="length of generated password (default 32)", default=32) parsed_args = vars(parser.parse_args(args)) context = EFPWContext() try: context.env = parsed_args["env"] except ValueError as e: ef_utils.fail("Error in env: {}".format(e.message)) context.service = parsed_args["service"] context.decrypt = parsed_args["decrypt"] context.length = parsed_args["length"] context.plaintext = parsed_args["plaintext"] context.secret_file = parsed_args["secret_file"] context.match = parsed_args["match"] if context.match or context.secret_file: if not context.match or not context.secret_file: raise ValueError("Must have both --match and --secret_file flag") return context
def handle_args_and_set_context(args)
Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated EFPWContext object Raises: RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH ValueError: if a parameter is invalid
3.022749
2.759291
1.09548
size = input.get_shape().as_list() c, h, w = size[3], size[1], size[2] batch_size = size[0] if batch_size is None: batch_size = -1 # Check if tiling is possible and define output shape. assert c % (k_x * k_y) == 0 tmp = input if reorder_required: output_channels = int(c / (k_x * k_y)) channels = tf.unstack(tmp, axis=-1) reordered_channels = [None for _ in range(len(channels))] for o in range(output_channels): for i in range(k_x * k_y): target = o + i * output_channels source = o * (k_x * k_y) + i reordered_channels[target] = channels[source] tmp = tf.stack(reordered_channels, axis=-1) # Actual tilining with tf.variable_scope(name) as scope: tmp = tf.transpose(tmp, [0, 2, 1, 3]) tmp = tf.reshape(tmp, (batch_size, w, int(h * k_y), int(c / (k_y)))) tmp = tf.transpose(tmp, [0, 2, 1, 3]) tmp = tf.reshape(tmp, (batch_size, int(h * k_y), int(w * k_x), int(c / (k_y * k_x)))) return tmp
def tile_2d(input, k_x, k_y, name, reorder_required=True)
A tiling layer like introduced in overfeat and huval papers. :param input: Your input tensor. :param k_x: The tiling factor in x direction. :param k_y: The tiling factor in y direction. :param name: The name of the layer. :param reorder_required: To implement an exact huval tiling you need reordering. However not using it is more efficient and when training from scratch setting this to false is highly recommended. :return: The output tensor.
2.196095
2.210443
0.993509
batch_size, h, w, c = input.get_shape().as_list() if batch_size is None: batch_size = -1 # Check if tiling is possible and define output shape. assert w % k_x == 0 and h % k_y == 0 # Actual inverse tilining with tf.variable_scope(name) as scope: tmp = input tmp = tf.reshape(tmp, (batch_size, int(h * k_y), w, int(c * k_x))) tmp = tf.transpose(tmp, [0, 2, 1, 3]) tmp = tf.reshape(tmp, (batch_size, w, h, int(c * k_y * k_x))) tmp = tf.transpose(tmp, [0, 2, 1, 3]) return tmp
def inverse_tile_2d(input, k_x, k_y, name)
An inverse tiling layer. An inverse to the tiling layer can be of great use, since you can keep the resolution of your output low, but harness the benefits of the resolution of a higher level feature layer. If you insist on a source you can call it very lightly inspired by yolo9000 "passthrough layer". :param input: Your input tensor. (Assert input.shape[1] % k_y = 0 and input.shape[2] % k_x = 0) :param k_x: The tiling factor in x direction [int]. :param k_y: The tiling factor in y direction [int]. :param name: The name of the layer. :return: The output tensor of shape [batch_size, inp.height / k_y, inp.width / k_x, inp.channels * k_x * k_y].
2.408649
2.495896
0.965044
_, h_early, w_early, c_early = early_feat.get_shape().as_list() _, h_late, w_late, c_late = late_feat.get_shape().as_list() s_x = int(w_early / w_late) s_y = int(h_early / h_late) assert h_late * s_y == h_early and w_late * s_x == w_early with tf.variable_scope(name) as scope: early_conv = tf.layers.conv2d(early_feat, filters=filters, kernel_size=(s_x * kernel_size[0], s_y * kernel_size[1]), strides=(s_x, s_y), padding="same") late_conv = tf.layers.conv2d(late_feat, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same") return early_conv + late_conv
def feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1))
A feature passthrough layer inspired by yolo9000 and the inverse tiling layer. It can be proven, that this layer does the same as conv(concat(inverse_tile(early_feat), late_feat)). This layer has no activation function. :param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _]. s_x and s_y are integers computed internally describing the scale between the layers. :param late_feat: The late feature layer of shape [batch_size, h, w, _]. :param filters: The number of convolution filters. :param name: The name of the layer. :param kernel_size: The size of the kernel. Default (1x1). :return: The output tensor of shape [batch_size, h, w, outputs]
1.689942
1.633702
1.034425
_, h_early, w_early, c_early = early_feat.get_shape().as_list() _, h_late, w_late, c_late = late_feat.get_shape().as_list() s_x = int(w_early / w_late) s_y = int(h_early / h_late) assert h_late * s_y == h_early and w_late * s_x == w_early with tf.variable_scope(name) as scope: tiled = tile_2d(late_feat, s_x, s_y, "tile_2d", reorder_required=False) concated = tf.concat([early_feat, tiled], axis=-1) return tf.layers.conv2d(concated, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same")
def upsampling_feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1))
An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer. It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))). This layer has no activation function. :param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _]. s_x and s_y are integers computed internally describing the scale between the layers. :param late_feat: The late feature layer of shape [batch_size, h, w, _]. :param filters: The number of convolution filters. :param name: The name of the layer. :param kernel_size: The size of the kernel. Default (1x1). :return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs]
2.069358
1.899311
1.089531