code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if host in self.hosts: # this check is for when user passes a host at request time
# Keep this thread safe: set hosts atomically and update it before the timestamp
self.hosts = [host] + [h for h in self.hosts if h != host]
self._last_time_recorded_active = time.time() | def _record_last_active(self, host) | Put host first in our host list, so we try it first next time
The implementation of get_active_namenode relies on this reordering. | 11.70417 | 10.375149 | 1.128097 |
hosts, path = self._parse_path(path)
_transform_user_name_key(kwargs)
kwargs.setdefault('user.name', self.user_name)
formatted_args = ' '.join('{}={}'.format(*t) for t in kwargs.items())
_logger.info("%s %s %s %s", op, path, formatted_args, ','.join(hosts))
kwargs['op'] = op
for i in range(self.max_tries):
log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING
for host in hosts:
try:
response = self._requests_session.request(
method,
'http://{}{}{}'.format(host, WEBHDFS_PATH, url_quote(path.encode('utf-8'))),
params=kwargs, timeout=self.timeout, allow_redirects=False,
**self._requests_kwargs
)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
_logger.log(log_level, "Failed to reach to %s (attempt %d/%d)",
host, i + 1, self.max_tries, exc_info=True)
continue
try:
_check_response(response, expected_status)
except (HdfsRetriableException, HdfsStandbyException):
_logger.log(log_level, "%s is in startup or standby mode (attempt %d/%d)",
host, i + 1, self.max_tries, exc_info=True)
continue
# Note: standby NN can still return basic validation errors, so non-StandbyException
# does not necessarily mean we have the active NN.
self._record_last_active(host)
return response
if i != self.max_tries - 1:
time.sleep(self.retry_delay)
raise HdfsNoServerException("Could not use any of the given hosts") | def _request(self, method, path, op, expected_status=httplib.OK, **kwargs) | Make a WebHDFS request against the NameNodes
This function handles NameNode failover and error checking.
All kwargs are passed as query params to the WebHDFS server. | 3.681216 | 3.504338 | 1.050474 |
metadata_response = self._put(
path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
assert not metadata_response.content
data_response = self._requests_session.put(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response, expected_status=httplib.CREATED)
assert not data_response.content | def create(self, path, data, **kwargs) | Create a file at the given path.
:param data: ``bytes`` or a ``file``-like object to upload
:param overwrite: If a file already exists, should it be overwritten?
:type overwrite: bool
:param blocksize: The block size of a file.
:type blocksize: long
:param replication: The number of replications of a file.
:type replication: short
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int | 4.228209 | 4.520259 | 0.935391 |
metadata_response = self._post(
path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.post(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response)
assert not data_response.content | def append(self, path, data, **kwargs) | Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int | 4.862481 | 5.423862 | 0.896498 |
if isinstance(sources, basestring):
raise ValueError("sources should be a list")
if any(',' in s for s in sources):
raise NotImplementedError("WebHDFS does not support commas in concat")
response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs)
assert not response.content | def concat(self, target, sources, **kwargs) | Concat existing files together.
For preconditions, see
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources
:param target: the path to the target destination.
:param sources: the paths to the sources to use for the concatenation.
:type sources: list | 6.266521 | 6.622466 | 0.946252 |
metadata_response = self._get(
path, 'OPEN', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.get(
metadata_response.headers['location'], stream=True, **self._requests_kwargs)
_check_response(data_response)
return data_response.raw | def open(self, path, **kwargs) | Return a file-like object for reading the given HDFS path.
:param offset: The starting byte position.
:type offset: long
:param length: The number of bytes to be processed.
:type length: long
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
:rtype: file-like object | 4.474115 | 4.696819 | 0.952584 |
response = self._put(link, 'CREATESYMLINK', destination=destination, **kwargs)
assert not response.content | def create_symlink(self, link, destination, **kwargs) | Create a symbolic link at ``link`` pointing to ``destination``.
:param link: the path to be created that points to target
:param destination: the target of the symbolic link
:param createParent: If the parent directories do not exist, should they be created?
:type createParent: bool
:raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on
CDH 5.3.0. | 9.086759 | 10.710733 | 0.848379 |
return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean'] | def rename(self, path, destination, **kwargs) | Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool | 14.454544 | 18.088623 | 0.799096 |
return FileStatus(**_json(self._get(path, 'GETFILESTATUS', **kwargs))['FileStatus']) | def get_file_status(self, path, **kwargs) | Return a :py:class:`FileStatus` object that represents the path. | 11.33782 | 10.119334 | 1.120412 |
return [
FileStatus(**item) for item in
_json(self._get(path, 'LISTSTATUS', **kwargs))['FileStatuses']['FileStatus']
] | def list_status(self, path, **kwargs) | List the statuses of the files/directories in the given path if the path is a directory.
:rtype: ``list`` of :py:class:`FileStatus` objects | 6.199879 | 7.563718 | 0.819687 |
return ContentSummary(
**_json(self._get(path, 'GETCONTENTSUMMARY', **kwargs))['ContentSummary']) | def get_content_summary(self, path, **kwargs) | Return the :py:class:`ContentSummary` of a given Path. | 15.048624 | 13.417403 | 1.121575 |
metadata_response = self._get(
path, 'GETFILECHECKSUM', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
assert not metadata_response.content
data_response = self._requests_session.get(
metadata_response.headers['location'], **self._requests_kwargs)
_check_response(data_response)
return FileChecksum(**_json(data_response)['FileChecksum']) | def get_file_checksum(self, path, **kwargs) | Get the checksum of a file.
:rtype: :py:class:`FileChecksum` | 5.573108 | 5.494647 | 1.01428 |
response = self._put(path, 'SETPERMISSION', **kwargs)
assert not response.content | def set_permission(self, path, **kwargs) | Set permission of a path.
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal | 12.611188 | 21.558929 | 0.584964 |
response = self._put(path, 'SETOWNER', **kwargs)
assert not response.content | def set_owner(self, path, **kwargs) | Set owner of a path (i.e. a file or a directory).
The parameters owner and group cannot both be null.
:param owner: user
:param group: group | 14.279933 | 22.808199 | 0.626088 |
response = self._put(path, 'SETTIMES', **kwargs)
assert not response.content | def set_times(self, path, **kwargs) | Set access time of a file.
:param modificationtime: Set the modification time of this file. The number of milliseconds
since Jan 1, 1970.
:type modificationtime: long
:param accesstime: Set the access time of this file. The number of milliseconds since Jan 1
1970.
:type accesstime: long | 12.135879 | 15.902881 | 0.763125 |
kwargs['xattr.name'] = xattr_name
kwargs['xattr.value'] = xattr_value
response = self._put(path, 'SETXATTR', flag=flag, **kwargs)
assert not response.content | def set_xattr(self, path, xattr_name, xattr_value, flag, **kwargs) | Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE`` | 4.033381 | 5.108488 | 0.789545 |
kwargs['xattr.name'] = xattr_name
response = self._put(path, 'REMOVEXATTR', **kwargs)
assert not response.content | def remove_xattr(self, path, xattr_name, **kwargs) | Remove an xattr of a file or directory. | 6.732898 | 6.393389 | 1.053103 |
kwargs['xattr.name'] = xattr_name
json = _json(self._get(path, 'GETXATTRS', encoding=encoding, **kwargs))['XAttrs']
# Decode the result
result = {}
for attr in json:
k = attr['name']
v = attr['value']
if v is None:
result[k] = None
elif encoding == 'text':
assert attr['value'].startswith('"') and attr['value'].endswith('"')
result[k] = v[1:-1]
elif encoding == 'hex':
assert attr['value'].startswith('0x')
# older python demands bytes, so we have to ascii encode
result[k] = binascii.unhexlify(v[2:].encode('ascii'))
elif encoding == 'base64':
assert attr['value'].startswith('0s')
# older python demands bytes, so we have to ascii encode
result[k] = base64.b64decode(v[2:].encode('ascii'))
else:
warnings.warn("Unexpected encoding {}".format(encoding))
result[k] = v
return result | def get_xattrs(self, path, xattr_name=None, encoding='text', **kwargs) | Get one or more xattr values for a file or directory.
:param xattr_name: ``str`` to get one attribute, ``list`` to get multiple attributes,
``None`` to get all attributes.
:param encoding: ``text`` | ``hex`` | ``base64``, defaults to ``text``
:returns: Dictionary mapping xattr name to value. With text encoding, the value will be a
unicode string. With hex or base64 encoding, the value will be a byte array.
:rtype: dict | 2.848919 | 2.936401 | 0.970208 |
return simplejson.loads(_json(self._get(path, 'LISTXATTRS', **kwargs))['XAttrNames']) | def list_xattrs(self, path, **kwargs) | Get all of the xattr names for a file or directory.
:rtype: list | 15.538024 | 16.542467 | 0.939281 |
response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs)
assert not response.content | def delete_snapshot(self, path, snapshotname, **kwargs) | Delete a snapshot of a directory | 6.520678 | 7.589077 | 0.859219 |
response = self._put(path, 'RENAMESNAPSHOT',
oldsnapshotname=oldsnapshotname, snapshotname=snapshotname, **kwargs)
assert not response.content | def rename_snapshot(self, path, oldsnapshotname, snapshotname, **kwargs) | Rename a snapshot | 5.208474 | 6.296414 | 0.827213 |
statuses = self.list_status(path, **kwargs)
if len(statuses) == 1 and statuses[0].pathSuffix == '' and statuses[0].type == 'FILE':
raise NotADirectoryError('Not a directory: {!r}'.format(path))
return [f.pathSuffix for f in statuses] | def listdir(self, path, **kwargs) | Return a list containing names of files in the given path | 3.691749 | 3.617167 | 1.020619 |
try:
self.get_file_status(path, **kwargs)
return True
except HdfsFileNotFoundException:
return False | def exists(self, path, **kwargs) | Return true if the given path exists | 4.625756 | 4.85169 | 0.953432 |
try:
listing = self.list_status(top, **kwargs)
except HdfsException as e:
if onerror is not None:
onerror(e)
return
dirnames, filenames = [], []
for f in listing:
if f.type == 'DIRECTORY':
dirnames.append(f.pathSuffix)
elif f.type == 'FILE':
filenames.append(f.pathSuffix)
else: # pragma: no cover
raise AssertionError("Unexpected type {}".format(f.type))
if topdown:
yield top, dirnames, filenames
for name in dirnames:
new_path = posixpath.join(top, name)
for x in self.walk(new_path, topdown, onerror, **kwargs):
yield x
if not topdown:
yield top, dirnames, filenames | def walk(self, top, topdown=True, onerror=None, **kwargs) | See ``os.walk`` for documentation | 1.943838 | 1.988218 | 0.977678 |
with io.open(localsrc, 'rb') as f:
self.create(dest, f, **kwargs) | def copy_from_local(self, localsrc, dest, **kwargs) | Copy a single file from the local file system to ``dest``
Takes all arguments that :py:meth:`create` takes. | 3.910012 | 3.801065 | 1.028662 |
with self.open(src, **kwargs) as fsrc:
with io.open(localdest, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst) | def copy_to_local(self, src, localdest, **kwargs) | Copy a single file from ``src`` to the local file system
Takes all arguments that :py:meth:`open` takes. | 2.360248 | 2.428472 | 0.971907 |
if (max_staleness is None or
self._last_time_recorded_active is None or
self._last_time_recorded_active < time.time() - max_staleness):
# Make a cheap request and rely on the reordering in self._record_last_active
self.get_file_status('/')
return self.hosts[0] | def get_active_namenode(self, max_staleness=None) | Return the address of the currently active NameNode.
:param max_staleness: This function caches the active NameNode. If this age of this cached
result is less than ``max_staleness`` seconds, return it. Otherwise, or if this
parameter is None, do a lookup.
:type max_staleness: float
:raises HdfsNoServerException: can't find an active NameNode | 6.668209 | 6.893003 | 0.967388 |
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) | def next(self) | Reads the next dataset row.
:return: the next row
:rtype: Instance | 5.907434 | 7.726441 | 0.764574 |
parser = argparse.ArgumentParser(
description='Executes an associator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("associator", help="associator classname, e.g., weka.associations.Apriori")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional associator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
associator = Associator(classname=parsed.associator)
if len(parsed.option) > 0:
associator.options = parsed.option
loader = converters.loader_for_file(parsed.train)
data = loader.load_file(parsed.train)
associator.build_associations(data)
print(str(associator))
except Exception, e:
print(e)
finally:
jvm.stop() | def main() | Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | 3.377922 | 2.846719 | 1.186602 |
if self.index < self.length:
index = self.index
self.index += 1
return self.rules[index]
else:
raise StopIteration() | def next(self) | Returns the next rule.
:return: the next rule object
:rtype: AssociationRule | 3.228509 | 3.171474 | 1.017984 |
depth = actor.depth
row = ""
for i in xrange(depth - 1):
row += "| "
if depth > 0:
row += "|-"
name = actor.name
if name != actor.__class__.__name__:
name = actor.__class__.__name__ + " '" + name + "'"
row += name
quickinfo = actor.quickinfo
if quickinfo is not None:
row += " [" + quickinfo + "]"
content.append(row)
if isinstance(actor, ActorHandler):
for sub in actor.actors:
self._build_tree(sub, content) | def _build_tree(self, actor, content) | Builds the tree for the given actor.
:param actor: the actor to process
:type actor: Actor
:param content: the rows of the tree collected so far
:type content: list | 3.125882 | 3.161208 | 0.988825 |
actors = []
for actor in self.owner.actors:
if actor.skip:
continue
actors.append(actor)
if len(actors) == 0:
return
if not self.allow_source and base.is_source(actors[0]):
raise Exception("Actor '" + actors[0].full_name + "' is a source, but no sources allowed!")
for i in xrange(1, len(actors)):
if not isinstance(actors[i], InputConsumer):
raise Exception("Actor does not accept any input: " + actors[i].full_name) | def check_actors(self) | Checks the actors of the owner. Raises an exception if invalid. | 4.704778 | 3.968757 | 1.185454 |
result = None
try:
f = open(fname, 'w')
f.write(flow.to_json())
f.close()
except Exception, e:
result = str(e)
return result | def save(cls, flow, fname) | Saves the flow to a JSON file.
:param flow: the flow to save
:type flow: Flow
:param fname: the file to load
:type fname: str
:return: None if successful, otherwise error message
:rtype: str | 2.642681 | 2.287561 | 1.155239 |
result = super(BranchDirector, self).setup()
if result is None:
try:
self.check_actors()
except Exception, e:
result = str(e)
return result | def setup(self) | Performs some checks.
:return: None if successful, otherwise error message.
:rtype: str | 6.725204 | 6.03866 | 1.113692 |
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return arrays.double_matrix_to_ndarray(matrix) | def ranked_attributes(self) | Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray | 6.253707 | 5.748587 | 1.087869 |
if self.index < self.length:
index = self.index
self.index += 1
return self.data[index]
else:
raise StopIteration() | def next(self) | Returns the next element from the array.
:return: the next array element object, wrapped as JavaObject if not null
:rtype: JavaObject or None | 2.666869 | 3.486972 | 0.76481 |
if self.is_optionhandler:
return types.string_array_to_list(javabridge.call(self.jobject, "getOptions", "()[Ljava/lang/String;"))
else:
return [] | def options(self) | Obtains the currently set options as list.
:return: the list of options
:rtype: list | 7.700003 | 8.38497 | 0.91831 |
if self.is_optionhandler:
javabridge.call(self.jobject, "setOptions", "([Ljava/lang/String;)V", types.string_list_to_array(options)) | def options(self, options) | Sets the command-line options (as list).
:param options: the list of command-line options to set
:type options: list | 6.779332 | 7.627575 | 0.888792 |
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
fig = plt.figure()
if data.class_index == -1:
c = None
else:
c = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
c.append(inst.get_value(inst.class_index))
if atts is None:
atts = []
for i in xrange(data.num_attributes):
atts.append(i)
num_plots = len(atts)
if inst_no:
num_plots += 1
clusters = evl.cluster_assignments
for index, att in enumerate(atts):
x = data.values(att)
ax = fig.add_subplot(
1, num_plots, index + 1)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title(data.attribute(att).name)
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if inst_no:
x = []
for i in xrange(data.num_instances):
x.append(i+1)
ax = fig.add_subplot(
1, num_plots, num_plots)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title("Instance number")
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if title is None:
title = data.relationname
fig.canvas.set_window_title(title)
plt.draw()
if not outfile is None:
plt.savefig(outfile)
if wait:
plt.show() | def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True) | Plots the cluster assignments against the specified attributes.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evl: the cluster evaluation to obtain the cluster assignments from
:type evl: ClusterEvaluation
:param data: the dataset the clusterer was evaluated against
:type data: Instances
:param atts: the list of attribute indices to plot, None for all
:type atts: list
:param inst_no: whether to include a fake attribute with the instance number
:type inst_no: bool
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | 2.015813 | 1.943392 | 1.037266 |
array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object"))
for i in xrange(len(jobjects)):
obj = jobjects[i]
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.get_env().set_object_array_element(array, i, obj)
javabridge.static_call(
"Lweka/core/SerializationHelper;", "writeAll",
"(Ljava/lang/String;[Ljava/lang/Object;)V",
filename, array) | def write_all(filename, jobjects) | Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list | 2.365447 | 2.39596 | 0.987265 |
parser = argparse.ArgumentParser(
description='Executes a filter from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-i", metavar="input1", dest="input1", required=True, help="input file 1")
parser.add_argument("-o", metavar="output1", dest="output1", required=True, help="output file 1")
parser.add_argument("-r", metavar="input2", dest="input2", help="input file 2")
parser.add_argument("-s", metavar="output2", dest="output2", help="output file 2")
parser.add_argument("-c", metavar="classindex", default="-1", dest="classindex",
help="1-based class attribute index")
parser.add_argument("filter", help="filter classname, e.g., weka.filters.AllFilter")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional filter options")
parsed = parser.parse_args()
if parsed.input2 is None and parsed.output2 is not None:
raise Exception("No second input file provided ('-r ...')!")
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.input1 is not None:
params.extend(["-i", parsed.input1])
if parsed.output1 is not None:
params.extend(["-o", parsed.output1])
if parsed.input2 is not None:
params.extend(["-r", parsed.input2])
if parsed.output2 is not None:
params.extend(["-s", parsed.output2])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
flter = Filter(parsed.filter)
if len(parsed.option) > 0:
flter.options = parsed.option
loader = Loader(classname="weka.core.converters.ArffLoader")
in1 = loader.load_file(parsed.input1)
cls = parsed.classindex
if str(parsed.classindex) == "first":
cls = "0"
if str(parsed.classindex) == "last":
cls = str(in1.num_attributes - 1)
in1.class_index = int(cls)
flter.inputformat(in1)
out1 = flter.filter(in1)
saver = Saver(classname="weka.core.converters.ArffSaver")
saver.save_file(out1, parsed.output1)
if parsed.input2 is not None:
in2 = loader.load_file(parsed.input2)
in2.class_index = int(cls)
out2 = flter.filter(in2)
saver.save_file(out2, parsed.output2)
except Exception, e:
print(e)
finally:
jvm.stop() | def main() | Runs a filter from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | 2.227221 | 2.077321 | 1.07216 |
result = javabridge.call(
self.loader.jobject, "getNextInstance",
"(Lweka/core/Instances;)Lweka/core/Instance;", self.structure.jobject)
if result is None:
raise StopIteration()
else:
return Instance(result) | def next(self) | Reads the next dataset row.
:return: the next row
:rtype: Instance | 5.556921 | 5.255363 | 1.057381 |
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
actual = []
predicted = []
error = None
cls = None
for pred in predictions:
actual.append(pred.actual)
predicted.append(pred.predicted)
if isinstance(pred, NumericPrediction):
if error is None:
error = []
error.append(abs(pred.error))
elif isinstance(pred, NominalPrediction):
if cls is None:
cls = []
if pred.actual != pred.predicted:
cls.append(1)
else:
cls.append(0)
fig, ax = plt.subplots()
if error is None and cls is None:
ax.scatter(actual, predicted, s=absolute_size, alpha=0.5)
elif cls is not None:
ax.scatter(actual, predicted, c=cls, s=absolute_size, alpha=0.5)
elif error is not None:
if not absolute:
min_err = min(error)
max_err = max(error)
factor = (max_err - min_err) / max_relative_size
for i in xrange(len(error)):
error[i] = error[i] / factor * max_relative_size
ax.scatter(actual, predicted, s=error, alpha=0.5)
ax.set_xlabel("actual")
ax.set_ylabel("predicted")
if title is None:
title = "Classifier errors"
ax.set_title(title)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
ax.grid(True)
fig.canvas.set_window_title(title)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show() | def plot_classifier_errors(predictions, absolute=True, max_relative_size=50, absolute_size=50, title=None,
outfile=None, wait=True) | Plots the classifers for the given list of predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param predictions: the predictions to plot
:type predictions: list
:param absolute: whether to use absolute errors as size or relative ones
:type absolute: bool
:param max_relative_size: the maximum size in point in case of relative mode
:type max_relative_size: int
:param absolute_size: the size in point in case of absolute mode
:type absolute_size: int
:param title: an optional title
:type title: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool | 1.971167 | 1.962975 | 1.004173 |
parser = argparse.ArgumentParser(
description='Performs classification/regression from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="Training set file")
parser.add_argument("-T", metavar="test", dest="test", help="Test set file")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds for cross-validation")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-v", action="store_true", dest="notrainstats", help="no statistics for training")
parser.add_argument("-o", action="store_true", dest="onlystats", help="only statistics, don't output model")
parser.add_argument("-i", action="store_true", dest="irstats", help="output information retrieval statistics")
parser.add_argument("-k", action="store_true", dest="itstats", help="output information theoretic statistics")
parser.add_argument("-m", metavar="costmatrix", dest="costmatrix", help="cost matrix file")
parser.add_argument("-g", metavar="graph", dest="graph", help="output file for graph (if supported)")
parser.add_argument("classifier", help="classifier classname, e.g., weka.classifiers.trees.J48")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional classifier options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.notrainstats:
params.append("-v")
if parsed.onlystats:
params.append("-o")
if parsed.irstats:
params.append("-i")
if parsed.itstats:
params.append("-k")
if parsed.costmatrix is not None:
params.extend(["-m", parsed.costmatrix])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
classifier = Classifier(classname=parsed.classifier)
if len(parsed.option) > 0:
classifier.options = parsed.option
print(Evaluation.evaluate_model(classifier, params))
except Exception, e:
print(e)
finally:
jvm.stop() | def main() | Runs a classifier from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. | 2.129813 | 2.021639 | 1.053508 |
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | def distributions_for_instances(self, data) | Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray | 20.908936 | 12.737967 | 1.641466 |
values = []
for i in xrange(self.num_instances):
inst = self.get_instance(i)
values.append(inst.get_value(index))
return numpy.array(values) | def values(self, index) | Returns the internal values of this attribute from all the instance objects.
:return: the values as numpy array
:rtype: list | 3.093112 | 3.126681 | 0.989264 |
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result | def append_instances(cls, inst1, inst2) | Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances | 4.011071 | 3.707952 | 1.081748 |
enm = javabridge.call(self.jobject, "enumerateValues", "()Ljava/util/Enumeration;")
if enm is None:
return None
else:
return types.enumeration_to_list(enm) | def values(self) | Returns the labels, strings or relation-values.
:return: all the values, None if not NOMINAL, STRING, or RELATION
:rtype: list | 5.733923 | 6.350035 | 0.902975 |
if self.row < self.data.num_instances:
index = self.row
self.row += 1
return self.data.get_instance(index)
else:
raise StopIteration() | def next(self) | Returns the next row from the Instances object.
:return: the next Instance object
:rtype: Instance | 3.488614 | 3.265104 | 1.068454 |
if self.col < self.data.num_attributes:
index = self.col
self.col += 1
return self.data.attribute(index)
else:
raise StopIteration() | def next(self) | Returns the next attribute from the Instances object.
:return: the next Attribute object
:rtype: Attribute | 4.231844 | 3.910264 | 1.08224 |
path = diff.b_path
assert any(
path.endswith(ext)
for ext in importlib.machinery.SOURCE_SUFFIXES
) | def check(self, diff) | Check that the new file introduced is a python source file | 9.333321 | 6.378719 | 1.463197 |
path = diff.b_path
contrib_path = self.project.contrib_module_path
assert pathlib.Path(contrib_path) in pathlib.Path(path).parents | def check(self, diff) | Check that the new file is within the contrib subdirectory | 8.715844 | 5.733418 | 1.520183 |
relative_path = relative_to_contrib(diff, self.project)
subpackage_name = relative_path.parts[0]
assert re_test(SUBPACKAGE_NAME_REGEX, subpackage_name) | def check(self, diff) | Check that the name of the subpackage within contrib is valid
The package name must match ``user_[a-zA-Z0-9_]+``. | 10.458794 | 6.997032 | 1.494747 |
relative_path = relative_to_contrib(diff, self.project)
assert len(relative_path.parts) == 2 | def check(self, diff) | Check that the new file introduced is at the proper depth
The proper depth is 2 (contrib/user_example/new_file.py) | 12.773604 | 7.213597 | 1.770768 |
r
filename = pathlib.Path(diff.b_path).parts[-1]
is_valid_feature_module_name = re_test(
FEATURE_MODULE_NAME_REGEX, filename)
is_valid_init_module_name = filename == '__init__.py'
assert is_valid_feature_module_name or is_valid_init_module_name | def check(self, diff) | r"""Check that the new file introduced has a valid name
The module can either be an __init__.py file or must
match ``feature_[a-zA-Z0-9_]+\.\w+``. | 5.571055 | 3.861629 | 1.442669 |
path = pathlib.Path(diff.b_path)
filename = path.parts[-1]
if filename == '__init__.py':
abspath = self.project.path.joinpath(path)
assert isemptyfile(abspath) | def check(self, diff) | Check that if the new file is __init__.py, then it is empty | 5.973634 | 4.571628 | 1.306675 |
global _handler
if _handler is None:
_handler = logging.StreamHandler()
formatter = logging.Formatter(format)
_handler.setFormatter(formatter)
level = logging._checkLevel(level)
levelName = logging._levelToName[level]
logger.setLevel(level)
_handler.setLevel(level)
if _handler not in logger.handlers:
logger.addHandler(_handler)
if echo:
logger.log(
level, 'Logging enabled at level {name}.'.format(name=levelName)) | def enable(logger=logger,
level=logging.INFO,
format=DETAIL_LOG_FORMAT,
echo=True) | Enable simple console logging for this module | 2.473035 | 2.561723 | 0.96538 |
base_dir = os.environ.get('OS_REFRESH_CONFIG_BASE_DIR')
if base_dir is None:
# NOTE(bnemec): Prefer the new location, but still allow the old one.
if os.path.isdir(OLD_BASE_DIR) and not os.path.isdir(DEFAULT_BASE_DIR):
logging.warning('Base directory %s is deprecated. The recommended '
'base directory is %s',
OLD_BASE_DIR, DEFAULT_BASE_DIR)
base_dir = OLD_BASE_DIR
else:
base_dir = DEFAULT_BASE_DIR
return base_dir | def default_base_dir() | Determine the default base directory path
If the OS_REFRESH_CONFIG_BASE_DIR environment variable is set,
use its value.
Otherwise, prefer the new default path, but still allow the old one for
backwards compatibility. | 3.120305 | 2.327373 | 1.340698 |
if black is None:
raise NotImplementedError
major, minor, _ = platform.python_version_tuple()
pyversion = 'py{major}{minor}'.format(major=major, minor=minor)
target_versions = [black.TargetVersion[pyversion.upper()]]
line_length = black.DEFAULT_LINE_LENGTH
string_normalization = True
mode = black.FileMode(
target_versions=target_versions,
line_length=line_length,
string_normalization=string_normalization,
)
return black.format_file_contents(code, fast=False, mode=mode) | def blacken_code(code) | Format code content using Black
Args:
code (str): code as string
Returns:
str | 2.921165 | 3.052467 | 0.956985 |
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func) | def _writer(func) | Decorator for a custom writer, but a default reader | 4.967979 | 4.435412 | 1.120072 |
if not isinstance(certification_request, csr.CertificationRequest):
raise TypeError(_pretty_message(
'''
certification_request must be an instance of
asn1crypto.csr.CertificationRequest, not %s
''',
_type_name(certification_request)
))
return pem.armor(
'CERTIFICATE REQUEST',
certification_request.dump()
) | def pem_armor_csr(certification_request) | Encodes a CSR into PEM format
:param certification_request:
An asn1crypto.csr.CertificationRequest object of the CSR to armor.
Typically this is obtained from CSRBuilder.build().
:return:
A byte string of the PEM-encoded CSR | 2.417424 | 2.201413 | 1.098124 |
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output)
if params:
output = output % params
output = output.strip()
return output | def _pretty_message(string, *params) | Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string | 6.175002 | 6.429454 | 0.960424 |
is_dict = isinstance(value, dict)
if not isinstance(value, x509.Name) and not is_dict:
raise TypeError(_pretty_message(
'''
subject must be an instance of asn1crypto.x509.Name or a dict,
not %s
''',
_type_name(value)
))
if is_dict:
value = x509.Name.build(value)
self._subject = value | def subject(self, value) | An asn1crypto.x509.Name object, or a dict with at least the
following keys:
- country_name
- state_or_province_name
- locality_name
- organization_name
- common_name
Less common keys include:
- organizational_unit_name
- email_address
- street_address
- postal_code
- business_category
- incorporation_locality
- incorporation_state_or_province
- incorporation_country
Uncommon keys include:
- surname
- title
- serial_number
- name
- given_name
- initials
- generation_qualifier
- dn_qualifier
- pseudonym
- domain_component
All values should be unicode strings | 2.582122 | 2.367939 | 1.090451 |
is_oscrypto = isinstance(value, asymmetric.PublicKey)
if not isinstance(value, keys.PublicKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
subject_public_key must be an instance of
asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey,
not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._subject_public_key = value
self._key_identifier = self._subject_public_key.sha1
self._authority_key_identifier = None | def subject_public_key(self, value) | An asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey
object of the subject's public key. | 3.080806 | 2.480852 | 1.241834 |
if value not in set(['sha1', 'sha256', 'sha512']):
raise ValueError(_pretty_message(
'''
hash_algo must be one of "sha1", "sha256", "sha512", not %s
''',
repr(value)
))
self._hash_algo = value | def hash_algo(self, value) | A unicode string of the hash algorithm to use when signing the
request - "sha1" (not recommended), "sha256" or "sha512" | 2.536635 | 2.341401 | 1.083383 |
if self._subject_alt_name is None:
return []
output = []
for general_name in self._subject_alt_name:
if general_name.name == name:
output.append(general_name.native)
return output | def _get_subject_alt(self, name) | Returns the native value for each value in the subject alt name
extension reqiest that is an asn1crypto.x509.GeneralName of the type
specified by the name param
:param name:
A unicode string use to filter the x509.GeneralName objects by -
is the choice name x509.GeneralName
:return:
A list of unicode strings. Empty list indicates no subject alt
name extension request. | 3.384122 | 2.451023 | 1.380698 |
if self._subject_alt_name is not None:
filtered_general_names = []
for general_name in self._subject_alt_name:
if general_name.name != name:
filtered_general_names.append(general_name)
self._subject_alt_name = x509.GeneralNames(filtered_general_names)
else:
self._subject_alt_name = x509.GeneralNames()
if values is not None:
for value in values:
new_general_name = x509.GeneralName(name=name, value=value)
self._subject_alt_name.append(new_general_name)
if len(self._subject_alt_name) == 0:
self._subject_alt_name = None | def _set_subject_alt(self, name, values) | Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects | 1.875385 | 1.692736 | 1.107902 |
extension = x509.Extension({
'extn_id': name
})
# We use native here to convert OIDs to meaningful names
name = extension['extn_id'].native
spec = extension.spec('extn_value')
if not isinstance(value, spec) and value is not None:
raise TypeError(_pretty_message(
'''
value must be an instance of %s, not %s
''',
_type_name(spec),
_type_name(value)
))
if name in self._special_extensions:
setattr(self, '_%s' % name, value)
else:
if value is None:
if name in self._other_extensions:
del self._other_extensions[name]
else:
self._other_extensions[name] = value | def set_extension(self, name, value) | Sets the value for an extension using a fully constructed Asn1Value
object from asn1crypto. Normally this should not be needed, and the
convenience attributes should be sufficient.
See the definition of asn1crypto.x509.Extension to determine the
appropriate object type for a given extension. Extensions are marked
as critical when RFC5280 or RFC6960 indicate so. If an extension is
validly marked as critical or not (such as certificate policies and
extended key usage), this class will mark it as non-critical.
:param name:
A unicode string of an extension id name from
asn1crypto.x509.ExtensionId
:param value:
A value object per the specs defined by asn1crypto.x509.Extension | 3.806094 | 3.429168 | 1.109918 |
if name == 'subject_alt_name':
return len(self._subject) == 0
if name == 'basic_constraints':
return self.ca is True
return {
'subject_directory_attributes': False,
'key_usage': True,
'issuer_alt_name': False,
'name_constraints': True,
# Based on example EV certificates, non-CA certs have this marked
# as non-critical, most likely because existing browsers don't
# seem to support policies or name constraints
'certificate_policies': False,
'policy_mappings': True,
'policy_constraints': True,
'extended_key_usage': False,
'inhibit_any_policy': True,
'subject_information_access': False,
'tls_feature': False,
'ocsp_no_check': False,
}.get(name, False) | def _determine_critical(self, name) | :return:
A boolean indicating the correct value of the critical flag for
an extension, based on information from RFC5280 and RFC 6960. The
correct value is based on the terminology SHOULD or MUST. | 4.747773 | 4.302285 | 1.103547 |
is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)
if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
signing_private_key must be an instance of
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(signing_private_key)
))
signature_algo = signing_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
def _make_extension(name, value):
return {
'extn_id': name,
'critical': self._determine_critical(name),
'extn_value': value
}
extensions = []
for name in sorted(self._special_extensions):
value = getattr(self, '_%s' % name)
if value is not None:
extensions.append(_make_extension(name, value))
for name in sorted(self._other_extensions.keys()):
extensions.append(_make_extension(name, self._other_extensions[name]))
attributes = []
if extensions:
attributes.append({
'type': 'extension_request',
'values': [extensions]
})
certification_request_info = csr.CertificationRequestInfo({
'version': 'v1',
'subject': self._subject,
'subject_pk_info': self._subject_public_key,
'attributes': attributes
})
if signing_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif signing_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif signing_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
signing_private_key = asymmetric.load_private_key(signing_private_key)
signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)
return csr.CertificationRequest({
'certification_request_info': certification_request_info,
'signature_algorithm': {
'algorithm': signature_algorithm_id,
},
'signature': signature
}) | def build(self, signing_private_key) | Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request | 2.071244 | 1.91145 | 1.083598 |
'''Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels
'''
# compute scores
results = self.cv_score_mean(X, y)
return results | def compute_metrics_cv(self, X, y, **kwargs) | Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels | 6.204637 | 2.215659 | 2.800358 |
'''Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types
'''
X, y = self._format_inputs(X, y)
if self.problem_type.binary_classification:
kf = StratifiedKFold(
shuffle=True, random_state=RANDOM_STATE + 3)
elif self.problem_type.multi_classification:
self.target_type_transformer.inverse_transform(y)
transformer = self.target_type_transformer
kf = StratifiedKFoldMultiClassIndicator(
transformer, shuffle=True, n_splits=3,
random_state=RANDOM_STATE + 3)
elif self.problem_type.regression:
kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)
else:
raise NotImplementedError
scoring = {
scorer_info.name: scorer_info.scorer
for scorer_info in self.scorers_info
}
cv_results = cross_validate(
self.estimator, X, y,
scoring=scoring, cv=kf, return_train_score=False)
# post-processing
results = self._process_cv_results(cv_results)
return results | def cv_score_mean(self, X, y) | Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types | 3.313266 | 2.329186 | 1.422499 |
# TODO Project should require ModuleType
project = Project(project_root)
contrib = project._resolve('.features.contrib')
return _get_contrib_features(contrib) | def get_contrib_features(project_root) | Get contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project_root (str, path-like): Path to project root
Returns:
List[ballet.Feature]: list of Feature objects | 14.476258 | 12.850444 | 1.126518 |
if isinstance(module, types.ModuleType):
# any module that has a __path__ attribute is also a package
if hasattr(module, '__path__'):
yield from _get_contrib_features_from_package(module)
else:
yield _get_contrib_feature_from_module(module)
else:
raise ValueError('Input is not a module') | def _get_contrib_features(module) | Get contributed features from within given module
Be very careful with untrusted code. The module/package will be
walked, every submodule will be imported, and all the code therein will be
executed. But why would you be trying to import from an untrusted package
anyway?
Args:
contrib (module): module (standalone or package) that contains feature
definitions
Returns:
List[Feature]: list of features | 3.507909 | 3.494766 | 1.003761 |
import ballet.templating
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.templating.render_project_template() | def quickstart() | Generate a brand-new ballet project | 5.938248 | 4.822457 | 1.231374 |
import ballet.update
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.update.update_project_template(push=push) | def update_project_template(push) | Update an existing ballet project from the upstream template | 5.119717 | 4.074466 | 1.256537 |
import ballet.templating
import ballet.util.log
ballet.util.log.enable(level='INFO',
format=ballet.util.log.SIMPLE_LOG_FORMAT,
echo=False)
ballet.templating.start_new_feature() | def start_new_feature() | Start working on a new feature from a template | 5.540109 | 5.143829 | 1.07704 |
_, fn, ext = splitext2(filepath)
if ext == '.h5':
_write_tabular_h5(obj, filepath)
elif ext == '.pkl':
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError | def write_tabular(obj, filepath) | Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl' | 2.994327 | 2.976576 | 1.005964 |
_, fn, ext = splitext2(filepath)
if ext == '.h5':
return _read_tabular_h5(filepath)
elif ext == '.pkl':
return _read_tabular_pickle(filepath)
else:
raise NotImplementedError | def read_tabular(filepath) | Read tabular object in HDF5 or pickle format
Args:
filepath (path-like): path to read to; must end in '.h5' or '.pkl' | 3.101803 | 3.029192 | 1.02397 |
path = pathlib.Path(input_dir).joinpath(config['path'])
kwargs = config['pd_read_kwargs']
return pd.read_csv(path, **kwargs) | def load_table_from_config(input_dir, config) | Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame | 4.103184 | 3.024456 | 1.356668 |
if not force and not project.on_pr():
raise SkippedValidationTest('Not on PR')
validator = FeatureApiValidator(project)
result = validator.validate()
if not result:
raise InvalidFeatureApi | def validate_feature_api(project, force=False) | Validate feature API | 8.780883 | 8.210912 | 1.069416 |
if not force and not project.on_pr():
raise SkippedValidationTest('Not on PR')
out = project.build()
X_df, y, features = out['X_df'], out['y'], out['features']
proposed_feature = get_proposed_feature(project)
accepted_features = get_accepted_features(features, proposed_feature)
evaluator = GFSSFAcceptanceEvaluator(X_df, y, accepted_features)
accepted = evaluator.judge(proposed_feature)
if not accepted:
raise FeatureRejected | def evaluate_feature_performance(project, force=False) | Evaluate feature performance | 6.831073 | 6.736783 | 1.013996 |
if not force and not project.on_master_after_merge():
raise SkippedValidationTest('Not on master')
out = project.build()
X_df, y, features = out['X_df'], out['y'], out['features']
proposed_feature = get_proposed_feature(project)
accepted_features = get_accepted_features(features, proposed_feature)
evaluator = GFSSFPruningEvaluator(
X_df, y, accepted_features, proposed_feature)
redundant_features = evaluator.prune()
# propose removal
for feature in redundant_features:
logger.debug(PRUNER_MESSAGE + feature.source)
return redundant_features | def prune_existing_features(project, force=False) | Prune existing features | 8.501538 | 8.220052 | 1.034244 |
project = Project(package)
if test_target_type is None:
test_target_type = detect_target_type()
if test_target_type == BalletTestTypes.PROJECT_STRUCTURE_VALIDATION:
check_project_structure(project)
elif test_target_type == BalletTestTypes.FEATURE_API_VALIDATION:
validate_feature_api(project)
elif test_target_type == BalletTestTypes.FEATURE_ACCEPTANCE_EVALUTION:
evaluate_feature_performance(project)
elif test_target_type == (
BalletTestTypes.FEATURE_PRUNING_EVALUATION):
prune_existing_features(project)
else:
raise NotImplementedError(
'Unsupported test target type: {test_target_type}'
.format(test_target_type=test_target_type)) | def validate(package, test_target_type=None) | Entrypoint for ./validate.py script in ballet projects | 3.136609 | 2.791062 | 1.123805 |
root, ext = os.path.splitext(safepath(filepath))
return root + s + ext | def spliceext(filepath, s) | Add s into filepath before the extension
Args:
filepath (str, path): file path
s (str): string to splice
Returns:
str | 4.923405 | 8.457674 | 0.582123 |
if new_ext and new_ext[0] != '.':
new_ext = '.' + new_ext
root, ext = os.path.splitext(safepath(filepath))
return root + new_ext | def replaceext(filepath, new_ext) | Replace any existing file extension with a new one
Example::
>>> replaceext('/foo/bar.txt', 'py')
'/foo/bar.py'
>>> replaceext('/foo/bar.txt', '.doc')
'/foo/bar.doc'
Args:
filepath (str, path): file path
new_ext (str): new file extension; if a leading dot is not included,
it will be added.
Returns:
Tuple[str] | 2.85725 | 4.33985 | 0.658375 |
root, filename = os.path.split(safepath(filepath))
filename, ext = os.path.splitext(safepath(filename))
return root, filename, ext | def splitext2(filepath) | Split filepath into root, filename, ext
Args:
filepath (str, path): file path
Returns:
str | 3.225714 | 4.694744 | 0.687091 |
exists = os.path.exists(safepath(filepath))
if exists:
filesize = os.path.getsize(safepath(filepath))
return filesize == 0
else:
return False | def isemptyfile(filepath) | Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool | 2.664376 | 3.776059 | 0.705597 |
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError
if dst.exists() and not dst.is_dir():
raise ValueError
if onexist is None:
def onexist(): pass
_synctree(src, dst, onexist) | def synctree(src, dst, onexist=None) | Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument | 2.452047 | 2.997488 | 0.818034 |
r
X = asarray2d(X)
n_samples, _ = X.shape
_, counts = np.unique(X, axis=0, return_counts=True)
empirical_p = counts * 1.0 / n_samples
log_p = np.log(empirical_p)
entropy = -np.sum(np.multiply(empirical_p, log_p))
return entropy | def calculate_disc_entropy(X) | r"""Calculates the exact Shannon entropy of a discrete dataset,
using empirical probabilities according to the equation:
$ H(X) = -\sum(c \in X) p(c) \times \log(p(c)) $
Where $ p(c) $ is calculated as the frequency of c in X.
If X's columns logically represent continuous features,
it is better to use the estimate_cont_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
Returns:
float: A floating-point number representing the dataset entropy. | 2.84063 | 3.434084 | 0.827187 |
X = asarray2d(X)
n_samples, n_features = X.shape
if n_samples <= 1:
return 0
nn = NearestNeighbors(
metric='chebyshev',
n_neighbors=NUM_NEIGHBORS,
algorithm='kd_tree')
nn.fit(X)
if epsilon is None:
# If epsilon is not provided, revert to the Kozachenko Estimator
n_neighbors = NUM_NEIGHBORS
radius = 0
# While we have non-zero radii, calculate for a larger k
# Potentially expensive
while not np.all(radius) and n_neighbors < n_samples:
distances, _ = nn.kneighbors(
n_neighbors=n_neighbors, return_distance=True)
radius = distances[:, -1]
n_neighbors += 1
if n_neighbors == n_samples:
# This case only happens if all samples are the same
# e.g. this isn't a continuous sample...
raise ValueError('Should not have discrete column to estimate')
return -digamma(n_neighbors) + digamma(n_samples) + \
n_features * np.mean(np.log(2 * radius))
else:
ind = nn.radius_neighbors(
radius=epsilon.ravel(),
return_distance=False)
nx = np.array([i.size for i in ind])
return - np.mean(digamma(nx + 1)) + digamma(n_samples) | def estimate_cont_entropy(X, epsilon=None) | Estimate the Shannon entropy of a discrete dataset.
Based off the Kraskov Estimator [1] and Kozachenko [2]
estimators for a dataset's Shannon entropy.
The function relies on nonparametric methods based on entropy
estimation from k-nearest neighbors distances as proposed
in [1] and augmented in [2] for mutual information estimation.
If X's columns logically represent discrete features,
it is better to use the calculate_disc_entropy function.
If you are unsure of which to use, estimate_entropy can
take datasets of mixed discrete and continuous functions.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number. If epsilon is not provided,
this will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the Kraskov
entropy estimator. The bias is cancelled out when computing
mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 | 4.281231 | 4.103572 | 1.043294 |
r
X = asarray2d(X)
n_samples, n_features = X.shape
if n_features < 1:
return 0
disc_mask = _get_discrete_columns(X)
cont_mask = ~disc_mask
# If our dataset is fully discrete/continuous, do something easier
if np.all(disc_mask):
return calculate_disc_entropy(X)
elif np.all(cont_mask):
return estimate_cont_entropy(X, epsilon)
# Separate the dataset into discrete and continuous datasets d,c
disc_features = asarray2d(X[:, disc_mask])
cont_features = asarray2d(X[:, cont_mask])
entropy = 0
uniques, counts = np.unique(disc_features, axis=0, return_counts=True)
empirical_p = counts / n_samples
# $\sum_{x \in d} p(x) \times H(c(x))$
for i in range(counts.size):
unique_mask = np.all(disc_features == uniques[i], axis=1)
selected_cont_samples = cont_features[unique_mask, :]
if epsilon is None:
selected_epsilon = None
else:
selected_epsilon = epsilon[unique_mask, :]
conditional_cont_entropy = estimate_cont_entropy(
selected_cont_samples, selected_epsilon)
entropy += empirical_p[i] * conditional_cont_entropy
# H(d)
entropy += calculate_disc_entropy(disc_features)
if epsilon is None:
entropy = max(0, entropy)
return entropy | def estimate_entropy(X, epsilon=None) | r"""Estimate a dataset's Shannon entropy.
This function can take datasets of mixed discrete and continuous
features, and uses a set of heuristics to determine which functions
to apply to each.
Because this function is a subroutine in a mutual information estimator,
we employ the Kozachenko Estimator[1] for continuous features when this
function is _not_ used for mutual information and an adaptation of the
Kraskov Estimator[2] when it is.
Let X be made of continuous features c and discrete features d.
To deal with both continuous and discrete features, We use the
following reworking of entropy:
$ H(X) = H(c,d) = \sum_{x \in d} p(x) \times H(c(x)) + H(d) $
Where c(x) is a dataset that represents the rows of the continuous dataset
in the same row as a discrete column with value x in the original dataset.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number representing the entropy in X.
If the dataset is fully discrete, an exact calculation is done.
If this is not the case and epsilon is not provided, this
will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the
Kraskov entropy estimator. The bias is cancelled out when
computing mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16. | 3.268032 | 2.985978 | 1.094459 |
disc_mask = _get_discrete_columns(X)
if np.all(disc_mask):
# if all discrete columns, there's no point getting epsilon
return 0
cont_features = X[:, ~disc_mask]
nn = NearestNeighbors(metric='chebyshev', n_neighbors=NUM_NEIGHBORS)
nn.fit(cont_features)
distances, _ = nn.kneighbors()
epsilon = np.nextafter(distances[:, -1], 0)
return asarray2d(epsilon) | def _calculate_epsilon(X) | Calculates epsilon, a subroutine for the Kraskov Estimator [1]
Represents the chebyshev distance of each dataset element to its
K-th nearest neighbor.
Args:
X (array-like): An array with shape (n_samples, n_features)
Returns:
array-like: An array with shape (n_samples, 1) representing
epsilon as described above.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | 4.67712 | 5.099769 | 0.917124 |
xz = np.concatenate((x, z), axis=1)
yz = np.concatenate((y, z), axis=1)
xyz = np.concatenate((xz, y), axis=1)
epsilon = _calculate_epsilon(xyz)
h_xz = estimate_entropy(xz, epsilon)
h_yz = estimate_entropy(yz, epsilon)
h_xyz = estimate_entropy(xyz, epsilon)
h_z = estimate_entropy(z, epsilon)
return max(0, h_xz + h_yz - h_xyz - h_z) | def estimate_conditional_information(x, y, z) | Estimate the conditional mutual information of three datasets.
Conditional mutual information is the
mutual information of two datasets, given a third:
$ I(x;y|z) = H(x,z) + H(y,z) - H(x,y,z) - H(z) $
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Equation 8 still holds because the epsilon terms cancel out:
Let d_x, represent the dimensionality of the continuous portion of x.
Then, we see that:
d_xz + d_yz - d_xyz - d_z =
(d_x + d_z) + (d_y + d_z) - (d_x + d_y + d_z) - d_z = 0
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
z (array-like): An array with shape (n_samples, n_features_z).
This is the dataset being conditioned on.
Returns:
float: A floating point number representing the conditional
mutual information of x and y given z. This calculation is
*exact* for entirely discrete datasets and *approximate*
if there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | 2.312034 | 2.236569 | 1.033741 |
xy = np.concatenate((x, y), axis=1)
epsilon = _calculate_epsilon(xy)
h_x = estimate_entropy(x, epsilon)
h_y = estimate_entropy(y, epsilon)
h_xy = estimate_entropy(xy, epsilon)
return max(0, h_x + h_y - h_xy) | def estimate_mutual_information(x, y) | Estimate the mutual information of two datasets.
Mutual information is a measure of dependence between
two datasets and is calculated as:
$I(x;y) = H(x) + H(y) - H(x,y)$
Where H(x) is the Shannon entropy of x. For continuous datasets,
adapts the Kraskov Estimator [1] for mutual information.
Args:
x (array-like): An array with shape (n_samples, n_features_x)
y (array-like): An array with shape (n_samples, n_features_y)
Returns:
float: A floating point number representing the mutual
information of x and y. This calculation is *exact*
for entirely discrete datasets and *approximate* if
there are continuous columns present.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004. | 2.691654 | 2.865669 | 0.939276 |
if not commit_range:
raise ValueError('commit_range cannot be empty')
result = re_find(COMMIT_RANGE_REGEX, commit_range)
if not result:
raise ValueError(
'Expected diff str of the form \'a..b\' or \'a...b\' (got {})'
.format(commit_range))
a, b = result['a'], result['b']
a, b = repo.rev_parse(a), repo.rev_parse(b)
if result['thirddot']:
a = one_or_raise(repo.merge_base(a, b))
return a, b | def get_diff_endpoints_from_commit_range(repo, commit_range) | Get endpoints of a diff given a commit range
The resulting endpoints can be diffed directly::
a, b = get_diff_endpoints_from_commit_range(repo, commit_range)
a.diff(b)
For details on specifying git diffs, see ``git diff --help``.
For details on specifying revisions, see ``git help revisions``.
Args:
repo (git.Repo): Repo object initialized with project root
commit_range (str): commit range as would be interpreted by ``git
diff`` command. Unfortunately only patterns of the form ``a..b``
and ``a...b`` are accepted. Note that the latter pattern finds the
merge-base of a and b and uses it as the starting point for the
diff.
Returns:
Tuple[git.Commit, git.Commit]: starting commit, ending commit (
inclusive)
Raises:
ValueError: commit_range is empty or ill-formed
See also:
<https://stackoverflow.com/q/7251477> | 3.835856 | 3.729162 | 1.028611 |
with repo.config_writer() as writer:
for k, value in variables.items():
section, option = k.split('.')
writer.set_value(section, option, value)
writer.release() | def set_config_variables(repo, variables) | Set config variables
Args:
repo (git.Repo): repo
variables (dict): entries of the form 'user.email': 'you@example.com' | 2.914811 | 3.406881 | 0.855566 |
changes = self.change_collector.collect_changes()
features = []
imported_okay = True
for importer, modname, modpath in changes.new_feature_info:
try:
mod = importer()
features.extend(_get_contrib_features(mod))
except (ImportError, SyntaxError):
logger.info(
'Failed to import module at {}'
.format(modpath))
logger.exception('Exception details: ')
imported_okay = False
if not imported_okay:
return False
# if no features were added at all, reject
if not features:
logger.info('Failed to collect any new features.')
return False
return all(
validate_feature_api(feature, self.X, self.y, subsample=False)
for feature in features
) | def validate(self) | Collect and validate all new features | 5.891125 | 5.319586 | 1.107441 |
if path.exists() and path.is_file():
with path.open('r') as f:
return yaml.load(f, Loader=yaml.SafeLoader)
else:
raise ConfigurationError("Couldn't find ballet.yml config file.") | def load_config_at_path(path) | Load config at exact path
Args:
path (path-like): path to config file
Returns:
dict: config dict | 3.82996 | 4.616068 | 0.829702 |
o = object()
result = get_in(config, path, default=o)
if result is not o:
return result
else:
return default | def config_get(config, *path, default=None) | Get a configuration option following a path through the config
Example usage:
>>> config_get(config,
'problem', 'problem_type_details', 'scorer',
default='accuracy')
Args:
config (dict): config dict
*path (list[str]): List of config sections and options to follow.
default (default=None): A default value to return in the case that
the option does not exist. | 4.921813 | 7.950068 | 0.619091 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.