_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q12400
|
PointCloudImage.open
|
train
|
def open(filename, frame='unspecified'):
"""Creates a PointCloudImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`PointCloudImage`
The new PointCloudImage.
"""
data = Image.load_data(filename)
return PointCloudImage(data, frame)
|
python
|
{
"resource": ""
}
|
q12401
|
NormalCloudImage.to_normal_cloud
|
train
|
def to_normal_cloud(self):
"""Convert the image to a NormalCloud object.
Returns
-------
:obj:`autolab_core.NormalCloud`
The corresponding NormalCloud.
"""
return NormalCloud(
data=self._data.reshape(
self.height *
self.width,
3).T,
frame=self._frame)
|
python
|
{
"resource": ""
}
|
q12402
|
NormalCloudImage.open
|
train
|
def open(filename, frame='unspecified'):
"""Creates a NormalCloudImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`NormalCloudImage`
The new NormalCloudImage.
"""
data = Image.load_data(filename)
return NormalCloudImage(data, frame)
|
python
|
{
"resource": ""
}
|
q12403
|
OrthographicIntrinsics.deproject
|
train
|
def deproject(self, depth_image):
"""Deprojects a DepthImage into a PointCloud.
Parameters
----------
depth_image : :obj:`DepthImage`
The 2D depth image to projet into a point cloud.
Returns
-------
:obj:`autolab_core.PointCloud`
A 3D point cloud created from the depth image.
Raises
------
ValueError
If depth_image is not a valid DepthImage in the same reference frame
as the camera.
"""
# check valid input
if not isinstance(depth_image, DepthImage):
raise ValueError('Must provide DepthImage object for projection')
if depth_image.frame != self._frame:
raise ValueError('Cannot deproject points in frame %s from camera with frame %s' %(depth_image.frame, self._frame))
# create homogeneous pixels
row_indices = np.arange(depth_image.height)
col_indices = np.arange(depth_image.width)
pixel_grid = np.meshgrid(col_indices, row_indices)
pixels = np.c_[pixel_grid[0].flatten(), pixel_grid[1].flatten()].T
depth_data = depth_image.data.flatten()
pixels_homog = np.r_[pixels, depth_data.reshape(1, depth_data.shape[0])]
# deproject
points_3d = np.linalg.inv(self.S).dot(pixels_homog - np.tile(self.t.reshape(3,1), [1, pixels_homog.shape[1]]))
return PointCloud(data=points_3d, frame=self._frame)
|
python
|
{
"resource": ""
}
|
q12404
|
OrthographicIntrinsics.deproject_pixel
|
train
|
def deproject_pixel(self, depth, pixel):
"""Deprojects a single pixel with a given depth into a 3D point.
Parameters
----------
depth : float
The depth value at the given pixel location.
pixel : :obj:`autolab_core.Point`
A 2D point representing the pixel's location in the camera image.
Returns
-------
:obj:`autolab_core.Point`
The projected 3D point.
Raises
------
ValueError
If pixel is not a valid 2D Point in the same reference frame
as the camera.
"""
if not isinstance(pixel, Point) and not pixel.dim == 2:
raise ValueError('Must provide 2D Point object for pixel projection')
if pixel.frame != self._frame:
raise ValueError('Cannot deproject pixel in frame %s from camera with frame %s' %(pixel.frame, self._frame))
point = np.r_[pixel.data, depth]
point_3d = np.linalg.inv(self.S).dot(point - self.t)
return Point(data=point_3d, frame=self._frame)
|
python
|
{
"resource": ""
}
|
q12405
|
OrthographicIntrinsics.save
|
train
|
def save(self, filename):
"""Save the CameraIntrinsics object to a .intr file.
Parameters
----------
filename : :obj:`str`
The .intr file to save the object to.
Raises
------
ValueError
If filename does not have the .intr extension.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != INTR_EXTENSION:
raise ValueError('Extension %s not supported for OrhtographicIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))
camera_intr_dict = copy.deepcopy(self.__dict__)
f = open(filename, 'w')
json.dump(camera_intr_dict, f)
f.close()
|
python
|
{
"resource": ""
}
|
q12406
|
PhoXiSensor._connect_to_sensor
|
train
|
def _connect_to_sensor(self):
"""Connect to the sensor.
"""
name = self._device_name
try:
# Check if device is actively in list
rospy.wait_for_service('phoxi_camera/get_device_list')
device_list = rospy.ServiceProxy('phoxi_camera/get_device_list', GetDeviceList)().out
if not str(name) in device_list:
logging.error('PhoXi sensor {} not in list of active devices'.format(name))
return False
success = rospy.ServiceProxy('phoxi_camera/connect_camera', ConnectCamera)(name).success
if not success:
logging.error('Could not connect to PhoXi sensor {}'.format(name))
return False
logging.debug('Connected to PhoXi Sensor {}'.format(name))
return True
except rospy.ServiceException as e:
logging.error('Service call failed: {}'.format(e))
return False
|
python
|
{
"resource": ""
}
|
q12407
|
PhoXiSensor._depth_im_callback
|
train
|
def _depth_im_callback(self, msg):
"""Callback for handling depth images.
"""
try:
self._cur_depth_im = DepthImage(self._bridge.imgmsg_to_cv2(msg) / 1000.0, frame=self._frame)
except:
self._cur_depth_im = None
|
python
|
{
"resource": ""
}
|
q12408
|
PhoXiSensor._normal_map_callback
|
train
|
def _normal_map_callback(self, msg):
"""Callback for handling normal maps.
"""
try:
self._cur_normal_map = self._bridge.imgmsg_to_cv2(msg)
except:
self._cur_normal_map = None
|
python
|
{
"resource": ""
}
|
q12409
|
RgbdDetection.image
|
train
|
def image(self, render_mode):
""" Get the image associated with a particular render mode """
if render_mode == RenderMode.SEGMASK:
return self.query_im
elif render_mode == RenderMode.COLOR:
return self.color_im
elif render_mode == RenderMode.DEPTH:
return self.depth_im
else:
raise ValueError('Render mode %s not supported' %(render_mode))
|
python
|
{
"resource": ""
}
|
q12410
|
RgbdDetectorFactory.detector
|
train
|
def detector(detector_type):
""" Returns a detector of the specified type. """
if detector_type == 'point_cloud_box':
return PointCloudBoxDetector()
elif detector_type == 'rgbd_foreground_mask_query':
return RgbdForegroundMaskQueryImageDetector()
elif detector_type == 'rgbd_foreground_mask':
return RgbdForegroundMaskDetector()
raise ValueError('Detector type %s not understood' %(detector_type))
|
python
|
{
"resource": ""
}
|
q12411
|
AlexNet._parse_config
|
train
|
def _parse_config(self, config):
""" Parses a tensorflow configuration """
self._batch_size = config['batch_size']
self._im_height = config['im_height']
self._im_width = config['im_width']
self._num_channels = config['channels']
self._output_layer = config['out_layer']
self._feature_layer = config['feature_layer']
self._out_size = None
if 'out_size' in config.keys():
self._out_size = config['out_size']
self._input_arr = np.zeros([self._batch_size, self._im_height,
self._im_width, self._num_channels])
if self._model_dir is None:
self._net_data = np.load(config['caffe_weights']).item()
self._mean = np.load(config['mean_file'])
self._model_filename = None
else:
self._net_data = None
self._mean = np.load(os.path.join(self._model_dir, 'mean.npy'))
self._model_filename = os.path.join(self._model_dir, 'model.ckpt')
|
python
|
{
"resource": ""
}
|
q12412
|
AlexNet._load
|
train
|
def _load(self):
""" Loads a model into weights """
if self._model_filename is None:
raise ValueError('Model filename not specified')
# read the input image
self._graph = tf.Graph()
with self._graph.as_default():
# read in filenames
reader = tf.train.NewCheckpointReader(self._model_filename)
# load AlexNet weights
weights = AlexNetWeights()
weights.conv1W = tf.Variable(reader.get_tensor("Variable"))
weights.conv1b = tf.Variable(reader.get_tensor("Variable_1"))
weights.conv2W = tf.Variable(reader.get_tensor("Variable_2"))
weights.conv2b = tf.Variable(reader.get_tensor("Variable_3"))
weights.conv3W = tf.Variable(reader.get_tensor("Variable_4"))
weights.conv3b = tf.Variable(reader.get_tensor("Variable_5"))
weights.conv4W = tf.Variable(reader.get_tensor("Variable_6"))
weights.conv4b = tf.Variable(reader.get_tensor("Variable_7"))
weights.conv5W = tf.Variable(reader.get_tensor("Variable_8"))
weights.conv5b = tf.Variable(reader.get_tensor("Variable_9"))
weights.fc6W = tf.Variable(reader.get_tensor("Variable_10"))
weights.fc6b = tf.Variable(reader.get_tensor("Variable_11"))
weights.fc7W = tf.Variable(reader.get_tensor("Variable_12"))
weights.fc7b = tf.Variable(reader.get_tensor("Variable_13"))
weights.fc8W = tf.Variable(reader.get_tensor("Variable_14"))
weights.fc8b = tf.Variable(reader.get_tensor("Variable_15"))
# form network
self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))
self._output_tensor = self.build_alexnet(weights)
self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer)
self._initialized = True
|
python
|
{
"resource": ""
}
|
q12413
|
AlexNet._initialize
|
train
|
def _initialize(self):
""" Open from caffe weights """
self._graph = tf.Graph()
with self._graph.as_default():
self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))
weights = self.build_alexnet_weights()
self._output_tensor = self.build_alexnet(weights)
self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer)
self._initialized = True
|
python
|
{
"resource": ""
}
|
q12414
|
AlexNet.open_session
|
train
|
def open_session(self):
""" Open tensorflow session. Exposed for memory management. """
with self._graph.as_default():
init = tf.initialize_all_variables()
self._sess = tf.Session()
self._sess.run(init)
|
python
|
{
"resource": ""
}
|
q12415
|
AlexNet.close_session
|
train
|
def close_session(self):
""" Close tensorflow session. Exposes for memory management. """
with self._graph.as_default():
self._sess.close()
self._sess = None
|
python
|
{
"resource": ""
}
|
q12416
|
AlexNet.predict
|
train
|
def predict(self, image_arr, featurize=False):
""" Predict a set of images in batches.
Parameters
----------
image_arr : NxHxWxC :obj:`numpy.ndarray`
input set of images in a num_images x image height x image width x image channels array (must match parameters of network)
featurize : bool
whether or not to use the featurization layer or classification output layer
Returns
-------
:obj:`numpy.ndarray`
num_images x feature_dim containing the output values for each input image
"""
# setup prediction
num_images = image_arr.shape[0]
output_arr = None
# predict by filling in image array in batches
close_sess = False
if not self._initialized and self._dynamic_load:
self._load()
with self._graph.as_default():
if self._sess is None:
close_sess = True
self.open_session()
i = 0
while i < num_images:
dim = min(self._batch_size, num_images-i)
cur_ind = i
end_ind = cur_ind + dim
self._input_arr[:dim,:,:,:] = image_arr[cur_ind:end_ind,:,:,:] - self._mean
if featurize:
output = self._sess.run(self._feature_tensor,
feed_dict={self._input_node: self._input_arr})
else:
output = self._sess.run(self._output_tensor,
feed_dict={self._input_node: self._input_arr})
if output_arr is None:
output_arr = output
else:
output_arr = np.r_[output_arr, output]
i = end_ind
if close_sess:
self.close_session()
return output_arr[:num_images,...]
|
python
|
{
"resource": ""
}
|
q12417
|
AlexNet.build_alexnet_weights
|
train
|
def build_alexnet_weights(self):
""" Build a set of convnet weights for AlexNet """
net_data = self._net_data
#conv1
#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
#conv3
#conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
conv3W = tf.Variable(net_data["conv3"][0])
conv3b = tf.Variable(net_data["conv3"][1])
#conv4
#conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
#conv5
#conv(3, 3, 256, 1, 1, group=2, name='conv5')
k_h = 3; k_w = 3; c_o = 256; s_h = 1; s_w = 1; group = 2
conv5W = tf.Variable(net_data["conv5"][0])
conv5b = tf.Variable(net_data["conv5"][1])
#fc6
#fc(4096, name='fc6')
fc6_in_size = net_data["fc6"][0].shape[0]
fc6_out_size = net_data["fc6"][0].shape[1]
fc6W = tf.Variable(net_data["fc6"][0])
fc6b = tf.Variable(net_data["fc6"][1])
#fc7
#fc(4096, name='fc7')
fc7_in_size = fc6_out_size
fc7_out_size = net_data["fc7"][0].shape[1]
fc7W = tf.Variable(net_data["fc7"][0])
fc7b = tf.Variable(net_data["fc7"][1])
#fc8
#fc(num_cats, relu=False, name='fc8')
fc8_in_size = fc7_out_size
fc8_out_size = self._out_size
fc8W = tf.Variable(tf.truncated_normal([fc8_in_size, fc8_out_size],
stddev=0.01,
seed=None))
fc8b = tf.Variable(tf.constant(0.0, shape=[fc8_out_size]))
# make return object
weights = AlexNetWeights()
weights.conv1W = conv1W
weights.conv1b = conv1b
weights.conv2W = conv2W
weights.conv2b = conv2b
weights.conv3W = conv3W
weights.conv3b = conv3b
weights.conv4W = conv4W
weights.conv4b = conv4b
weights.conv5W = conv5W
weights.conv5b = conv5b
weights.fc6W = fc6W
weights.fc6b = fc6b
weights.fc7W = fc7W
weights.fc7b = fc7b
weights.fc8W = fc8W
weights.fc8b = fc8b
return weights
|
python
|
{
"resource": ""
}
|
q12418
|
CNNBatchFeatureExtractor._forward_pass
|
train
|
def _forward_pass(self, images):
""" Forward pass a list of images through the CNN """
# form image array
num_images = len(images)
if num_images == 0:
return None
for image in images:
if not isinstance(image, Image):
new_images = []
for image in images:
if len(image.shape) > 2:
new_images.append(ColorImage(image, frame='unspecified'))
elif image.dtype == np.float32 or image.dtype == np.float64:
new_images.append(DepthImage(image, frame='unspecified'))
else:
raise ValueError('Image type not understood')
images = new_images
break
im_height = images[0].height
im_width = images[0].width
channels = images[0].channels
tensor_channels = 3
image_arr = np.zeros([num_images, im_height, im_width, tensor_channels])
for j, image in enumerate(images):
if channels == 3:
image_arr[j,:,:,:] = image.raw_data
else:
image_arr[j,:,:,:] = np.tile(image.raw_data, [1,1,1,3])
# predict
fp_start = time.time()
final_blobs = self.cnn_.featurize(image_arr)
fp_stop = time.time()
logging.debug('Featurization took %f sec per image' %((fp_stop - fp_start) / len(images)))
return final_blobs.reshape(final_blobs.shape[0], -1)
|
python
|
{
"resource": ""
}
|
q12419
|
Engine.repositories
|
train
|
def repositories(self):
"""
Returns a DataFrame with the data about the repositories found at
the specified repositories path in the form of siva files.
>>> repos_df = engine.repositories
:rtype: RepositoriesDataFrame
"""
return RepositoriesDataFrame(self.__engine.getRepositories(),
self.session, self.__implicits)
|
python
|
{
"resource": ""
}
|
q12420
|
Engine.blobs
|
train
|
def blobs(self, repository_ids=[], reference_names=[], commit_hashes=[]):
"""
Retrieves the blobs of a list of repositories, reference names and commit hashes.
So the result will be a DataFrame of all the blobs in the given commits that are
in the given references that belong to the given repositories.
>>> blobs_df = engine.blobs(repo_ids, ref_names, hashes)
Calling this function with no arguments is the same as:
>>> engine.repositories.references.commits.tree_entries.blobs
:param repository_ids: list of repository ids to filter by (optional)
:type repository_ids: list of strings
:param reference_names: list of reference names to filter by (optional)
:type reference_names: list of strings
:param commit_hashes: list of hashes to filter by (optional)
:type commit_hashes: list of strings
:rtype: BlobsDataFrame
"""
if not isinstance(repository_ids, list):
raise Exception("repository_ids must be a list")
if not isinstance(reference_names, list):
raise Exception("reference_names must be a list")
if not isinstance(commit_hashes, list):
raise Exception("commit_hashes must be a list")
return BlobsDataFrame(self.__engine.getBlobs(repository_ids,
reference_names,
commit_hashes),
self.session,
self.__implicits)
|
python
|
{
"resource": ""
}
|
q12421
|
Engine.from_metadata
|
train
|
def from_metadata(self, db_path, db_name='engine_metadata.db'):
"""
Registers in the current session the views of the MetadataSource so the
data is obtained from the metadata database instead of reading the
repositories with the DefaultSource.
:param db_path: path to the folder that contains the database.
:type db_path: str
:param db_name: name of the database file (engine_metadata.db) by default.
:type db_name: str
:returns: the same instance of the engine
:rtype: Engine
"""
self.__engine.fromMetadata(db_path, db_name)
return self
|
python
|
{
"resource": ""
}
|
q12422
|
SourcedDataFrame.__generate_method
|
train
|
def __generate_method(name):
"""
Wraps the DataFrame's original method by name to return the derived class instance.
"""
try:
func = getattr(DataFrame, name)
except AttributeError as e:
# PySpark version is too old
def func(self, *args, **kwargs):
raise e
return func
wraps = getattr(functools, "wraps", lambda _: lambda f: f) # py3.4+
@wraps(func)
def _wrapper(self, *args, **kwargs):
dataframe = func(self, *args, **kwargs)
if self.__class__ != SourcedDataFrame \
and isinstance(self, SourcedDataFrame) \
and isinstance(dataframe, DataFrame):
return self.__class__(dataframe._jdf, self._session, self._implicits)
return dataframe
return _wrapper
|
python
|
{
"resource": ""
}
|
q12423
|
RepositoriesDataFrame.references
|
train
|
def references(self):
"""
Returns the joined DataFrame of references and repositories.
>>> refs_df = repos_df.references
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getReferences(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12424
|
RepositoriesDataFrame.remote_references
|
train
|
def remote_references(self):
"""
Returns a new DataFrame with only the remote references of the
current repositories.
>>> remote_refs_df = repos_df.remote_references
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getRemoteReferences(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12425
|
RepositoriesDataFrame.master_ref
|
train
|
def master_ref(self):
"""
Filters the current DataFrame references to only contain those rows whose reference is master.
>>> master_df = repos_df.master_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getReferences().getHEAD(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12426
|
ReferencesDataFrame.head_ref
|
train
|
def head_ref(self):
"""
Filters the current DataFrame to only contain those rows whose reference is HEAD.
>>> heads_df = refs_df.head_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getHEAD(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12427
|
ReferencesDataFrame.master_ref
|
train
|
def master_ref(self):
"""
Filters the current DataFrame to only contain those rows whose reference is master.
>>> master_df = refs_df.master_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getMaster(),
self._session, self._implicits)
return self.ref('refs/heads/master')
|
python
|
{
"resource": ""
}
|
q12428
|
ReferencesDataFrame.ref
|
train
|
def ref(self, ref):
"""
Filters the current DataFrame to only contain those rows whose reference is the given
reference name.
>>> heads_df = refs_df.ref('refs/heads/HEAD')
:param ref: Reference to get
:type ref: str
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self.filter(self.name == ref)._jdf,
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12429
|
ReferencesDataFrame.all_reference_commits
|
train
|
def all_reference_commits(self):
"""
Returns the current DataFrame joined with the commits DataFrame, with all of the commits
in all references.
>>> commits_df = refs_df.all_reference_commits
Take into account that getting all the commits will lead to a lot of repeated tree
entries and blobs, thus making your query very slow.
Most of the time, you just want the HEAD commit of each reference:
>>> commits_df = refs_df.commits
:rtype: CommitsDataFrame
"""
return CommitsDataFrame(self._engine_dataframe.getAllReferenceCommits(), self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12430
|
ReferencesDataFrame.blobs
|
train
|
def blobs(self):
"""
Returns this DataFrame joined with the blobs DataSource.
>>> blobs_df = refs_df.blobs
:rtype: BlobsDataFrame
"""
return BlobsDataFrame(self._engine_dataframe.getBlobs(), self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12431
|
CommitsDataFrame.tree_entries
|
train
|
def tree_entries(self):
"""
Returns this DataFrame joined with the tree entries DataSource.
>>> entries_df = commits_df.tree_entries
:rtype: TreeEntriesDataFrame
"""
return TreeEntriesDataFrame(self._engine_dataframe.getTreeEntries(), self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12432
|
BlobsDataFrame.classify_languages
|
train
|
def classify_languages(self):
"""
Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame
"""
return BlobsWithLanguageDataFrame(self._engine_dataframe.classifyLanguages(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12433
|
BlobsDataFrame.extract_uasts
|
train
|
def extract_uasts(self):
"""
Returns a new DataFrame with the parsed UAST data of any blob added to
its row.
>>> blobs_df.extract_uasts
:rtype: UASTsDataFrame
"""
return UASTsDataFrame(self._engine_dataframe.extractUASTs(),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12434
|
UASTsDataFrame.query_uast
|
train
|
def query_uast(self, query, query_col='uast', output_col='result'):
"""
Queries the UAST of a file with the given query to get specific nodes.
>>> rows = uasts_df.query_uast('//*[@roleIdentifier]').collect()
>>> rows = uasts_df.query_uast('//*[@roleIdentifier]', 'foo', 'bar')
:param query: xpath query
:type query: str
:param query_col: column containing the list of nodes to query
:type query_col: str
:param output_col: column to place the result of the query
:type output_col: str
:rtype: UASTsDataFrame
"""
return UASTsDataFrame(self._engine_dataframe.queryUAST(query,
query_col,
output_col),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12435
|
UASTsDataFrame.extract_tokens
|
train
|
def extract_tokens(self, input_col='result', output_col='tokens'):
"""
Extracts the tokens from UAST nodes.
>>> rows = uasts_df.query_uast('//*[@roleIdentifier]').extract_tokens().collect()
>>> rows = uasts_df.query_uast('//*[@roleIdentifier]', output_col='foo').extract_tokens('foo', 'bar')
:param input_col: column containing the list of nodes to extract tokens from
:type input_col: str
:param output_col: column to place the resultant tokens
:type output_col: str
:rtype: UASTsDataFrame
"""
return UASTsDataFrame(self._engine_dataframe.extractTokens(input_col, output_col),
self._session, self._implicits)
|
python
|
{
"resource": ""
}
|
q12436
|
GSBlobStore.delete
|
train
|
def delete(self, bucket: str, key: str):
"""
Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other
return value is treated as something was possibly deleted.
"""
bucket_obj = self._ensure_bucket_loaded(bucket)
try:
bucket_obj.delete_blob(key)
except NotFound:
return False
|
python
|
{
"resource": ""
}
|
q12437
|
API_WRAPPER.request
|
train
|
def request(self, shards, full_response, return_status_tuple=False):
"""Request the API
This method is wrapped by similar functions
"""
try:
resp = self._request(shards)
if return_status_tuple:
return (self._parser(resp, full_response), True)
else:
return self._parser(resp, full_response)
except (ConflictError, CloudflareServerError, InternalServerError) as exc:
# The Retry system
if return_status_tuple:
return (None, False)
elif self.api_mother.do_retry:
# TODO
# request_limit = 0
sleep(self.api_mother.retry_sleep)
resp = self.request(shards, full_response, True)
while not resp[1]:
sleep(self.api_mother.retry_sleep)
resp = self.request(shards, full_response, True)
return resp[0]
else:
raise exc
|
python
|
{
"resource": ""
}
|
q12438
|
API_WRAPPER.command
|
train
|
def command(self, command, full_response=False, **kwargs): # pragma: no cover
"""Method Interface to the command API for Nationstates"""
command = Shard(c=command)
return self.get_shards(*(command, Shard(**kwargs)), full_response=full_response)
|
python
|
{
"resource": ""
}
|
q12439
|
Nation.send_telegram
|
train
|
def send_telegram(telegram=None, client_key=None, tgid=None, key=None): # pragma: no cover
"""Sends Telegram. Can either provide a telegram directly, or provide the api details and created internally
"""
if telegram:
pass
else:
telegram = self.api_mother.telegram(client_key, tgid, key)
telegram.send_telegram(self.nation_name)
|
python
|
{
"resource": ""
}
|
q12440
|
Nation.verify
|
train
|
def verify(self, checksum=None, token=None, full_response=False):
"""Wraps around the verify API"""
payload = {"checksum":checksum, "a":"verify"}
if token:
payload.update({"token":token})
return self.get_shards(Shard(**payload), full_response=True)
|
python
|
{
"resource": ""
}
|
q12441
|
BlobStore.upload_file_handle
|
train
|
def upload_file_handle(
self,
bucket: str,
key: str,
src_file_handle: typing.BinaryIO,
content_type: str=None,
metadata: dict=None):
"""
Saves the contents of a file handle as the contents of an object in a bucket.
"""
raise NotImplementedError()
|
python
|
{
"resource": ""
}
|
q12442
|
S3BlobStore.find_next_missing_parts
|
train
|
def find_next_missing_parts(
self,
bucket: str,
key: str,
upload_id: str,
part_count: int,
search_start: int=1,
return_count: int=1) -> typing.Sequence[int]:
"""
Given a `bucket`, `key`, and `upload_id`, find the next N missing parts of a multipart upload, where
N=`return_count`. If `search_start` is provided, start the search at part M, where M=`search_start`.
`part_count` is the number of parts expected for the upload.
Note that the return value may contain fewer than N parts.
"""
if part_count < search_start:
raise ValueError("")
result = list()
while True:
kwargs = dict(Bucket=bucket, Key=key, UploadId=upload_id) # type: dict
if search_start > 1:
kwargs['PartNumberMarker'] = search_start - 1
# retrieve all the parts after the one we *think* we need to start from.
parts_resp = self.s3_client.list_parts(**kwargs)
# build a set of all the parts known to be uploaded, detailed in this request.
parts_map = set() # type: typing.Set[int]
for part_detail in parts_resp.get('Parts', []):
parts_map.add(part_detail['PartNumber'])
while True:
if search_start not in parts_map:
# not found, add it to the list of parts we still need.
result.append(search_start)
# have we met our requirements?
if len(result) == return_count or search_start == part_count:
return result
search_start += 1
if parts_resp['IsTruncated'] and search_start == parts_resp['NextPartNumberMarker']:
# finished examining the results of this batch, move onto the next one
break
|
python
|
{
"resource": ""
}
|
q12443
|
scanf_compile
|
train
|
def scanf_compile(format, collapseWhitespace=True):
"""
Translate the format into a regular expression
For example:
>>> format_re, casts = scanf_compile('%s - %d errors, %d warnings')
>>> print format_re.pattern
(\S+) \- ([+-]?\d+) errors, ([+-]?\d+) warnings
Translated formats are cached for faster reuse
"""
format_pat = ""
cast_list = []
i = 0
length = len(format)
while i < length:
found = None
for token, pattern, cast in scanf_translate:
found = token.match(format, i)
if found:
if cast: # cast != None
cast_list.append(cast)
groups = found.groupdict() or found.groups()
if groups:
pattern = pattern % groups
format_pat += pattern
i = found.end()
break
if not found:
char = format[i]
# escape special characters
if char in "|^$()[]-.+*?{}<>\\":
format_pat += "\\"
format_pat += char
i += 1
if DEBUG:
print("DEBUG: %r -> %s" % (format, format_pat))
if collapseWhitespace:
format_pat = re.sub(r'\s+', r'\\s+', format_pat)
format_re = re.compile(format_pat)
return format_re, cast_list
|
python
|
{
"resource": ""
}
|
q12444
|
extractdata
|
train
|
def extractdata(pattern, text=None, filepath=None):
"""
Read through an entire file or body of text one line at a time. Parse each line that matches the supplied
pattern string and ignore the rest.
If *text* is supplied, it will be parsed according to the *pattern* string.
If *text* is not supplied, the file at *filepath* will be opened and parsed.
"""
y = []
if text is None:
textsource = open(filepath, 'r')
else:
textsource = text.splitlines()
for line in textsource:
match = scanf(pattern, line)
if match:
if len(y) == 0:
y = [[s] for s in match]
else:
for i, ydata in enumerate(y):
ydata.append(match[i])
if text is None:
textsource.close()
return y
|
python
|
{
"resource": ""
}
|
q12445
|
Nationstates.nation
|
train
|
def nation(self, nation_name, password=None, autologin=None):
"""Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (Optional) password for this nation
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation
"""
return Nation(nation_name, self, password=password, autologin=autologin)
|
python
|
{
"resource": ""
}
|
q12446
|
Nationstates.wa
|
train
|
def wa(self, chamber):
"""Setup access to the World Assembly API with the WorldAssembly object
:param chamber: Chamber of the WA
:type chamber: str, int
:returns: WorldAssembly Object based off region_name
:rtype: WorldAssembly
"""
if isinstance(chamber, int):
chamber = str(chamber)
return WorldAssembly(chamber, self)
|
python
|
{
"resource": ""
}
|
q12447
|
apply_patch
|
train
|
def apply_patch(diffs):
""" Not ready for use yet """
pass
if isinstance(diffs, patch.diff):
diffs = [diffs]
for diff in diffs:
if diff.header.old_path == '/dev/null':
text = []
else:
with open(diff.header.old_path) as f:
text = f.read()
new_text = apply_diff(diff, text)
with open(diff.header.new_path, 'w') as f:
f.write(new_text)
|
python
|
{
"resource": ""
}
|
q12448
|
b64decode_url
|
train
|
def b64decode_url(istr):
""" JWT Tokens may be truncated without the usual trailing padding '='
symbols. Compensate by padding to the nearest 4 bytes.
"""
istr = encode_safe(istr)
try:
return urlsafe_b64decode(istr + '=' * (4 - (len(istr) % 4)))
except TypeError as e:
raise Error('Unable to decode base64: %s' % (e))
|
python
|
{
"resource": ""
}
|
q12449
|
_validate
|
train
|
def _validate(claims, validate_claims, expiry_seconds):
""" Validate expiry related claims.
If validate_claims is False, do nothing.
Otherwise, validate the exp and nbf claims if they are present, and
validate the iat claim if expiry_seconds is provided.
"""
if not validate_claims:
return
now = time()
# TODO: implement support for clock skew
# The exp (expiration time) claim identifies the expiration time on or
# after which the JWT MUST NOT be accepted for processing. The
# processing of the exp claim requires that the current date/time MUST
# be before the expiration date/time listed in the exp claim.
try:
expiration_time = claims[CLAIM_EXPIRATION_TIME]
except KeyError:
pass
else:
_check_expiration_time(now, expiration_time)
# The iat (issued at) claim identifies the time at which the JWT was
# issued. This claim can be used to determine the age of the JWT.
# If expiry_seconds is provided, and the iat claims is present,
# determine the age of the token and check if it has expired.
try:
issued_at = claims[CLAIM_ISSUED_AT]
except KeyError:
pass
else:
if expiry_seconds is not None:
_check_expiration_time(now, issued_at + expiry_seconds)
# The nbf (not before) claim identifies the time before which the JWT
# MUST NOT be accepted for processing. The processing of the nbf claim
# requires that the current date/time MUST be after or equal to the
# not-before date/time listed in the nbf claim.
try:
not_before = claims[CLAIM_NOT_BEFORE]
except KeyError:
pass
else:
_check_not_before(now, not_before)
|
python
|
{
"resource": ""
}
|
q12450
|
gauge
|
train
|
def gauge(key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the global pyformance registry"""
return global_registry().gauge(key, gauge=gauge, default=default, **dims)
|
python
|
{
"resource": ""
}
|
q12451
|
count_calls_with_dims
|
train
|
def count_calls_with_dims(**dims):
"""Decorator to track the number of times a function is called
with with dimensions.
"""
def counter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
counter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).inc()
return fn(*args, **kwargs)
return fn_wrapper
return counter_wrapper
|
python
|
{
"resource": ""
}
|
q12452
|
meter_calls_with_dims
|
train
|
def meter_calls_with_dims(**dims):
"""Decorator to track the rate at which a function is called
with dimensions.
"""
def meter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
meter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).mark()
return fn(*args, **kwargs)
return fn_wrapper
return meter_wrapper
|
python
|
{
"resource": ""
}
|
q12453
|
hist_calls
|
train
|
def hist_calls(fn):
"""
Decorator to check the distribution of return values of a function.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn))
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return wrapper
|
python
|
{
"resource": ""
}
|
q12454
|
hist_calls_with_dims
|
train
|
def hist_calls_with_dims(**dims):
"""Decorator to check the distribution of return values of a
function with dimensions.
"""
def hist_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn), **dims)
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return fn_wrapper
return hist_wrapper
|
python
|
{
"resource": ""
}
|
q12455
|
time_calls_with_dims
|
train
|
def time_calls_with_dims(**dims):
"""Decorator to time the execution of the function with dimensions."""
def time_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_timer = timer("%s_calls" %
pyformance.registry.get_qualname(fn), **dims)
with _timer.time(fn=pyformance.registry.get_qualname(fn)):
return fn(*args, **kwargs)
return fn_wrapper
return time_wrapper
|
python
|
{
"resource": ""
}
|
q12456
|
MetricsRegistry.add
|
train
|
def add(self, key, metric, **dims):
"""Adds custom metric instances to the registry with dimensions
which are not created with their constructors default arguments
"""
return super(MetricsRegistry, self).add(
self.metadata.register(key, **dims), metric)
|
python
|
{
"resource": ""
}
|
q12457
|
_BaseSignalFxIngestClient.send
|
train
|
def send(self, cumulative_counters=None, gauges=None, counters=None):
"""Send the given metrics to SignalFx.
Args:
cumulative_counters (list): a list of dictionaries representing the
cumulative counters to report.
gauges (list): a list of dictionaries representing the gauges to
report.
counters (list): a list of dictionaries representing the counters
to report.
"""
if not gauges and not cumulative_counters and not counters:
return
data = {
'cumulative_counter': cumulative_counters,
'gauge': gauges,
'counter': counters,
}
_logger.debug('Sending datapoints to SignalFx: %s', data)
for metric_type, datapoints in data.items():
if not datapoints:
continue
if not isinstance(datapoints, list):
raise TypeError('Datapoints not of type list %s', datapoints)
for datapoint in datapoints:
self._add_extra_dimensions(datapoint)
self._add_to_queue(metric_type, datapoint)
# Ensure the sending thread is running.
self._start_thread()
|
python
|
{
"resource": ""
}
|
q12458
|
_BaseSignalFxIngestClient.send_event
|
train
|
def send_event(self, event_type, category=None, dimensions=None,
properties=None, timestamp=None):
"""Send an event to SignalFx.
Args:
event_type (string): the event type (name of the event time
series).
category (string): the category of the event.
dimensions (dict): a map of event dimensions.
properties (dict): a map of extra properties on that event.
timestamp (float): timestamp when the event has occured
"""
if category and category not in SUPPORTED_EVENT_CATEGORIES:
raise ValueError('Event category is not one of the supported' +
'types: {' +
', '.join(SUPPORTED_EVENT_CATEGORIES) + '}')
data = {
'eventType': event_type,
'category': category,
'dimensions': dimensions or {},
'properties': properties or {},
'timestamp': int(timestamp) if timestamp else None,
}
_logger.debug('Sending event to SignalFx: %s', data)
self._add_extra_dimensions(data)
return self._send_event(event_data=data, url='{0}/{1}'.format(
self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX),
session=self._session)
|
python
|
{
"resource": ""
}
|
q12459
|
_BaseSignalFxIngestClient.stop
|
train
|
def stop(self, msg='Thread stopped'):
"""Stop send thread and flush points for a safe exit."""
with self._lock:
if not self._thread_running:
return
self._thread_running = False
self._queue.put(_BaseSignalFxIngestClient._QUEUE_STOP)
self._send_thread.join()
_logger.debug(msg)
|
python
|
{
"resource": ""
}
|
q12460
|
ProtoBufSignalFxIngestClient._assign_value_by_type
|
train
|
def _assign_value_by_type(self, pbuf_obj, value, _bool=True, _float=True,
_integer=True, _string=True, error_prefix=''):
"""Assigns the supplied value to the appropriate protobuf value type"""
# bool inherits int, so bool instance check must be executed prior to
# checking for integer types
if isinstance(value, bool) and _bool is True:
pbuf_obj.value.boolValue = value
elif isinstance(value, six.integer_types) and \
not isinstance(value, bool) and _integer is True:
if value < INTEGER_MIN or value > INTEGER_MAX:
raise ValueError(
('{}: {} exceeds signed 64 bit integer range '
'as defined by ProtocolBuffers ({} to {})')
.format(error_prefix, str(value),
str(INTEGER_MIN), str(INTEGER_MAX)))
pbuf_obj.value.intValue = value
elif isinstance(value, float) and _float is True:
pbuf_obj.value.doubleValue = value
elif isinstance(value, six.string_types) and _string is True:
pbuf_obj.value.strValue = value
else:
raise ValueError(
'{}: {} is of invalid type {}'
.format(error_prefix, str(value), str(type(value))))
|
python
|
{
"resource": ""
}
|
q12461
|
ProtoBufSignalFxIngestClient._assign_value
|
train
|
def _assign_value(self, pbuf_dp, value):
"""Assigns a value to the protobuf obj"""
self._assign_value_by_type(pbuf_dp, value, _bool=False,
error_prefix='Invalid value')
|
python
|
{
"resource": ""
}
|
q12462
|
Computation.stream
|
train
|
def stream(self):
"""Iterate over the messages from the computation's output.
Control and metadata messages are intercepted and interpreted to
enhance this Computation's object knowledge of the computation's
context. Data and event messages are yielded back to the caller as a
generator.
"""
iterator = iter(self._stream)
while self._state < Computation.STATE_COMPLETED:
try:
message = next(iterator)
except StopIteration:
if self._state < Computation.STATE_COMPLETED:
self._stream = self._execute()
iterator = iter(self._stream)
continue
if isinstance(message, messages.StreamStartMessage):
self._state = Computation.STATE_STREAM_STARTED
continue
if isinstance(message, messages.JobStartMessage):
self._state = Computation.STATE_COMPUTATION_STARTED
self._id = message.handle
yield message
continue
if isinstance(message, messages.JobProgressMessage):
yield message
continue
if isinstance(message, messages.ChannelAbortMessage):
self._state = Computation.STATE_ABORTED
raise errors.ComputationAborted(message.abort_info)
if isinstance(message, messages.EndOfChannelMessage):
self._state = Computation.STATE_COMPLETED
continue
# Intercept metadata messages to accumulate received metadata...
if isinstance(message, messages.MetadataMessage):
self._metadata[message.tsid] = message.properties
yield message
continue
# ...as well as expired-tsid messages to clean it up.
if isinstance(message, messages.ExpiredTsIdMessage):
if message.tsid in self._metadata:
del self._metadata[message.tsid]
yield message
continue
if isinstance(message, messages.InfoMessage):
self._process_info_message(message.message)
self._batch_count_detected = True
if self._current_batch_message:
yield self._get_batch_to_yield()
continue
# Accumulate data messages and release them when we have received
# all batches for the same logical timestamp.
if isinstance(message, messages.DataMessage):
self._state = Computation.STATE_DATA_RECEIVED
if not self._batch_count_detected:
self._expected_batches += 1
if not self._current_batch_message:
self._current_batch_message = message
self._current_batch_count = 1
elif (message.logical_timestamp_ms ==
self._current_batch_message.logical_timestamp_ms):
self._current_batch_message.add_data(message.data)
self._current_batch_count += 1
else:
self._batch_count_detected = True
if (self._batch_count_detected and
self._current_batch_count == self._expected_batches):
yield self._get_batch_to_yield()
continue
if isinstance(message, messages.EventMessage):
yield message
continue
if isinstance(message, messages.ErrorMessage):
raise errors.ComputationFailed(message.errors)
# Yield last batch, even if potentially incomplete.
if self._current_batch_message:
yield self._get_batch_to_yield()
|
python
|
{
"resource": ""
}
|
q12463
|
Computation._process_info_message
|
train
|
def _process_info_message(self, message):
"""Process an information message received from the computation."""
# Extract the output resolution from the appropriate message, if
# it's present.
if message['messageCode'] == 'JOB_RUNNING_RESOLUTION':
self._resolution = message['contents']['resolutionMs']
elif message['messageCode'] == 'FETCH_NUM_TIMESERIES':
self._num_input_timeseries += int(message['numInputTimeSeries'])
|
python
|
{
"resource": ""
}
|
q12464
|
SignalFlowClient.execute
|
train
|
def execute(self, program, start=None, stop=None, resolution=None,
max_delay=None, persistent=False, immediate=False,
disable_all_metric_publishes=None):
"""Execute the given SignalFlow program and stream the output back."""
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay,
persistent=persistent,
immediate=immediate,
disableAllMetricPublishes=disable_all_metric_publishes)
def exec_fn(since=None):
if since:
params['start'] = since
return self._transport.execute(program, params)
c = computation.Computation(exec_fn)
self._computations.add(c)
return c
|
python
|
{
"resource": ""
}
|
q12465
|
SignalFlowClient.preflight
|
train
|
def preflight(self, program, start, stop, resolution=None,
max_delay=None):
"""Preflight the given SignalFlow program and stream the output
back."""
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay)
def exec_fn(since=None):
if since:
params['start'] = since
return self._transport.preflight(program, params)
c = computation.Computation(exec_fn)
self._computations.add(c)
return c
|
python
|
{
"resource": ""
}
|
q12466
|
SignalFlowClient.start
|
train
|
def start(self, program, start=None, stop=None, resolution=None,
max_delay=None):
"""Start executing the given SignalFlow program without being attached
to the output of the computation."""
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay)
self._transport.start(program, params)
|
python
|
{
"resource": ""
}
|
q12467
|
SignalFlowClient.attach
|
train
|
def attach(self, handle, filters=None, resolution=None):
"""Attach to an existing SignalFlow computation."""
params = self._get_params(filters=filters, resolution=resolution)
c = computation.Computation(
lambda since: self._transport.attach(handle, params))
self._computations.add(c)
return c
|
python
|
{
"resource": ""
}
|
q12468
|
SignalFlowClient.stop
|
train
|
def stop(self, handle, reason=None):
"""Stop a SignalFlow computation."""
params = self._get_params(reason=reason)
self._transport.stop(handle, params)
|
python
|
{
"resource": ""
}
|
q12469
|
SignalFxRestClient.get_metric_by_name
|
train
|
def get_metric_by_name(self, metric_name, **kwargs):
"""
get a metric by name
Args:
metric_name (string): name of metric
Returns:
dictionary of response
"""
return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX,
metric_name,
**kwargs)
|
python
|
{
"resource": ""
}
|
q12470
|
SignalFxRestClient.update_metric_by_name
|
train
|
def update_metric_by_name(self, metric_name, metric_type, description=None,
custom_properties=None, tags=None, **kwargs):
"""
Create or update a metric object
Args:
metric_name (string): name of metric
type (string): metric type, must be one of 'gauge', 'counter',
'cumulative_counter'
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
tags (optional[list of strings]): list of tags associated with
metric
"""
data = {'type': metric_type.upper(),
'description': description or '',
'customProperties': custom_properties or {},
'tags': tags or []}
resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX,
str(metric_name)),
data=data, **kwargs)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12471
|
SignalFxRestClient.get_dimension
|
train
|
def get_dimension(self, key, value, **kwargs):
"""
get a dimension by key and value
Args:
key (string): key of the dimension
value (string): value of the dimension
Returns:
dictionary of response
"""
return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX,
'{0}/{1}'.format(key, value),
**kwargs)
|
python
|
{
"resource": ""
}
|
q12472
|
SignalFxRestClient.get_metric_time_series
|
train
|
def get_metric_time_series(self, mts_id, **kwargs):
"""get a metric time series by id"""
return self._get_object_by_name(self._MTS_ENDPOINT_SUFFIX,
mts_id,
**kwargs)
|
python
|
{
"resource": ""
}
|
q12473
|
SignalFxRestClient.get_tag
|
train
|
def get_tag(self, tag_name, **kwargs):
"""get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response
"""
return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,
tag_name,
**kwargs)
|
python
|
{
"resource": ""
}
|
q12474
|
SignalFxRestClient.update_tag
|
train
|
def update_tag(self, tag_name, description=None,
custom_properties=None, **kwargs):
"""update a tag by name
Args:
tag_name (string): name of tag to update
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
"""
data = {'description': description or '',
'customProperties': custom_properties or {}}
resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
data=data, **kwargs)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12475
|
SignalFxRestClient.delete_tag
|
train
|
def delete_tag(self, tag_name, **kwargs):
"""delete a tag by name
Args:
tag_name (string): name of tag to delete
"""
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no associated json
return resp
|
python
|
{
"resource": ""
}
|
q12476
|
SignalFxRestClient.get_organization
|
train
|
def get_organization(self, **kwargs):
"""Get the organization to which the user belongs
Returns:
dictionary of the response
"""
resp = self._get(self._u(self._ORGANIZATION_ENDPOINT_SUFFIX),
**kwargs)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12477
|
SignalFxRestClient.validate_detector
|
train
|
def validate_detector(self, detector):
"""Validate a detector.
Validates the given detector; throws a 400 Bad Request HTTP error if
the detector is invalid; otherwise doesn't return or throw anything.
Args:
detector (object): the detector model object. Will be serialized as
JSON.
"""
resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX, 'validate'),
data=detector)
resp.raise_for_status()
|
python
|
{
"resource": ""
}
|
q12478
|
SignalFxRestClient.create_detector
|
train
|
def create_detector(self, detector):
"""Creates a new detector.
Args:
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (created detector model).
"""
resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX),
data=detector)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12479
|
SignalFxRestClient.update_detector
|
train
|
def update_detector(self, detector_id, detector):
"""Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model).
"""
resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id),
data=detector)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12480
|
SignalFxRestClient.delete_detector
|
train
|
def delete_detector(self, detector_id, **kwargs):
"""Remove a detector.
Args:
detector_id (string): the ID of the detector.
"""
resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX,
detector_id),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no response json
return resp
|
python
|
{
"resource": ""
}
|
q12481
|
SignalFxRestClient.get_detector_incidents
|
train
|
def get_detector_incidents(self, id, **kwargs):
"""Gets all incidents for a detector
"""
resp = self._get(
self._u(self._DETECTOR_ENDPOINT_SUFFIX, id, 'incidents'),
None,
**kwargs
)
resp.raise_for_status()
return resp.json()
|
python
|
{
"resource": ""
}
|
q12482
|
SignalFxRestClient.clear_incident
|
train
|
def clear_incident(self, id, **kwargs):
"""Clear an incident.
"""
resp = self._put(
self._u(self._INCIDENT_ENDPOINT_SUFFIX, id, 'clear'),
None,
**kwargs
)
resp.raise_for_status()
return resp
|
python
|
{
"resource": ""
}
|
q12483
|
SignalFx.login
|
train
|
def login(self, email, password):
"""Authenticate a user with SignalFx to acquire a session token.
Note that data ingest can only be done with an organization or team API
access token, not with a user token obtained via this method.
Args:
email (string): the email login
password (string): the password
Returns a new, immediately-usable session token for the logged in user.
"""
r = requests.post('{0}/v2/session'.format(self._api_endpoint),
json={'email': email, 'password': password})
r.raise_for_status()
return r.json()['accessToken']
|
python
|
{
"resource": ""
}
|
q12484
|
SignalFx.rest
|
train
|
def rest(self, token, endpoint=None, timeout=None):
"""Obtain a metadata REST API client."""
from . import rest
return rest.SignalFxRestClient(
token=token,
endpoint=endpoint or self._api_endpoint,
timeout=timeout or self._timeout)
|
python
|
{
"resource": ""
}
|
q12485
|
SignalFx.ingest
|
train
|
def ingest(self, token, endpoint=None, timeout=None, compress=None):
"""Obtain a datapoint and event ingest client."""
from . import ingest
if ingest.sf_pbuf:
client = ingest.ProtoBufSignalFxIngestClient
else:
_logger.warn('Protocol Buffers not installed properly; '
'falling back to JSON.')
client = ingest.JsonSignalFxIngestClient
compress = compress if compress is not None else self._compress
return client(
token=token,
endpoint=endpoint or self._ingest_endpoint,
timeout=timeout or self._timeout,
compress=compress)
|
python
|
{
"resource": ""
}
|
q12486
|
SignalFx.signalflow
|
train
|
def signalflow(self, token, endpoint=None, timeout=None, compress=None):
"""Obtain a SignalFlow API client."""
from . import signalflow
compress = compress if compress is not None else self._compress
return signalflow.SignalFlowClient(
token=token,
endpoint=endpoint or self._stream_endpoint,
timeout=timeout or self._timeout,
compress=compress)
|
python
|
{
"resource": ""
}
|
q12487
|
MetricMetadata.register
|
train
|
def register(self, key, **kwargs):
"""Registers metadata for a metric and returns a composite key"""
dimensions = dict((k, str(v)) for k, v in kwargs.items())
composite_key = self._composite_name(key, dimensions)
self._metadata[composite_key] = {
'metric': key,
'dimensions': dimensions
}
return composite_key
|
python
|
{
"resource": ""
}
|
q12488
|
WebSocketTransport.opened
|
train
|
def opened(self):
"""Handler called when the WebSocket connection is opened. The first
thing to do then is to authenticate ourselves."""
request = {
'type': 'authenticate',
'token': self._token,
'userAgent': '{} ws4py/{}'.format(version.user_agent,
ws4py.__version__),
}
self.send(json.dumps(request))
|
python
|
{
"resource": ""
}
|
q12489
|
WebSocketTransport.closed
|
train
|
def closed(self, code, reason=None):
"""Handler called when the WebSocket is closed. Status code 1000
denotes a normal close; all others are errors."""
if code != 1000:
self._error = errors.SignalFlowException(code, reason)
_logger.info('Lost WebSocket connection with %s (%s: %s).',
self, code, reason)
for c in self._channels.values():
c.offer(WebSocketComputationChannel.END_SENTINEL)
self._channels.clear()
with self._connection_cv:
self._connected = False
self._connection_cv.notify()
|
python
|
{
"resource": ""
}
|
q12490
|
get_aws_unique_id
|
train
|
def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT):
"""Determine the current AWS unique ID
Args:
timeout (int): How long to wait for a response from AWS metadata IP
"""
try:
resp = requests.get(AWS_ID_URL, timeout=timeout).json()
except requests.exceptions.ConnectTimeout:
_logger.warning('Connection timeout when determining AWS unique '
'ID. Not using AWS unique ID.')
return None
else:
aws_id = "{0}_{1}_{2}".format(resp['instanceId'], resp['region'],
resp['accountId'])
_logger.debug('Using AWS unique ID %s.', aws_id)
return aws_id
|
python
|
{
"resource": ""
}
|
q12491
|
fft
|
train
|
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
output = mkl_fft.fft(a, n, axis)
if _unitary(norm):
output *= 1 / sqrt(output.shape[axis])
return output
|
python
|
{
"resource": ""
}
|
q12492
|
ifft
|
train
|
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
unitary = _unitary(norm)
output = mkl_fft.ifft(a, n, axis)
if unitary:
output *= sqrt(output.shape[axis])
return output
|
python
|
{
"resource": ""
}
|
q12493
|
rfft
|
train
|
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
unitary = _unitary(norm)
if unitary and n is None:
a = asarray(a)
n = a.shape[axis]
output = mkl_fft.rfft_numpy(a, n=n, axis=axis)
if unitary:
output *= 1 / sqrt(n)
return output
|
python
|
{
"resource": ""
}
|
q12494
|
irfft
|
train
|
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
output = mkl_fft.irfft_numpy(a, n=n, axis=axis)
if _unitary(norm):
output *= sqrt(output.shape[axis])
return output
|
python
|
{
"resource": ""
}
|
q12495
|
ihfft
|
train
|
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
|
python
|
{
"resource": ""
}
|
q12496
|
fftn
|
train
|
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
output = mkl_fft.fftn(a, s, axes)
if _unitary(norm):
output *= 1 / sqrt(_tot_size(output, axes))
return output
|
python
|
{
"resource": ""
}
|
q12497
|
ifftn
|
train
|
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
unitary = _unitary(norm)
output = mkl_fft.ifftn(a, s, axes)
if unitary:
output *= sqrt(_tot_size(output, axes))
return output
|
python
|
{
"resource": ""
}
|
q12498
|
fft2
|
train
|
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return fftn(a, s=s, axes=axes, norm=norm)
|
python
|
{
"resource": ""
}
|
q12499
|
ifft2
|
train
|
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return ifftn(a, s=s, axes=axes, norm=norm)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.