_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q20600
PyCurlMixin.process_queue
train
def process_queue(self): """ Processes all API calls since last invocation, returning a list of data in the order the API calls were created """ m = pycurl.CurlMulti() m.handles = [] # Loop the queue and create Curl objects for processing for item in self._queue: c = self._create_curl(*item) m.add_handle(c) m.handles.append(c) # Process the collected Curl handles num_handles = len(m.handles) while num_handles: while 1: # Perform the calls ret, num_handles = m.perform() if ret != pycurl.E_CALL_MULTI_PERFORM: break m.select(1.0) # Collect data results = [] for c in m.handles: c.status = c.getinfo(c.HTTP_CODE) if 'Content-Encoding: gzip' in c.response_headers.getvalue(): c.body = cStringIO.StringIO(self._gunzip_body(c.body.getvalue())) result = { "data" : self._digest_result(c.body.getvalue()), "code": c.status } if not c.status or c.status >= 400: # Don't throw the exception because some might have succeeded result['exception'] = HapiThreadedError(c) results.append(result) # cleanup for c in m.handles: if hasattr(c, "data_out"): c.data_out.close() c.body.close() c.response_headers.close() c.close() m.remove_handle(c) m.close() del m.handles self._queue = [] return results
python
{ "resource": "" }
q20601
LeadsClient.camelcase_search_options
train
def camelcase_search_options(self, options): """change all underscored variants back to what the API is expecting""" new_options = {} for key in options: value = options[key] new_key = SEARCH_OPTIONS_DICT.get(key, key) if new_key == 'sort': value = SORT_OPTIONS_DICT.get(value, value) elif new_key == 'timePivot': value = TIME_PIVOT_OPTIONS_DICT.get(value, value) elif new_key in BOOLEAN_SEARCH_OPTIONS: value = str(value).lower() new_options[new_key] = value return new_options
python
{ "resource": "" }
q20602
LeadsClient.get_leads
train
def get_leads(self, *guids, **options): """Supports all the search parameters in the API as well as python underscored variants""" original_options = options options = self.camelcase_search_options(options.copy()) params = {} for i in xrange(len(guids)): params['guids[%s]'%i] = guids[i] for k in options.keys(): if k in SEARCH_OPTIONS: params[k] = options[k] del options[k] leads = self._call('list/', params, **options) self.log.info("retrieved %s leads through API ( %soptions=%s )" % (len(leads), guids and 'guids=%s, '%guids or '', original_options)) return leads
python
{ "resource": "" }
q20603
LeadsClient.retrieve_lead
train
def retrieve_lead(self, *guid, **options): cur_guid = guid or '' params = {} for key in options: params[key] = options[key] """ Set guid to -1 as default for not finding a user """ lead = {'guid' : '-1'} """ wrap lead call so that it doesn't error out when not finding a lead """ try: lead = self._call('lead/%s' % cur_guid, params, **options) except: """ no lead here """ return lead
python
{ "resource": "" }
q20604
BroadcastClient.get_broadcast
train
def get_broadcast(self, broadcast_guid, **kwargs): ''' Get a specific broadcast by guid ''' params = kwargs broadcast = self._call('broadcasts/%s' % broadcast_guid, params=params, content_type='application/json') return Broadcast(broadcast)
python
{ "resource": "" }
q20605
BroadcastClient.get_broadcasts
train
def get_broadcasts(self, type="", page=None, remote_content_id=None, limit=None, **kwargs): ''' Get all broadcasts, with optional paging and limits. Type filter can be 'scheduled', 'published' or 'failed' ''' if remote_content_id: return self.get_broadcasts_by_remote(remote_content_id) params = {'type': type} if page: params['page'] = page params.update(kwargs) result = self._call('broadcasts', params=params, content_type='application/json') broadcasts = [Broadcast(b) for b in result] if limit: return broadcasts[:limit] return broadcasts
python
{ "resource": "" }
q20606
BroadcastClient.cancel_broadcast
train
def cancel_broadcast(self, broadcast_guid): ''' Cancel a broadcast specified by guid ''' subpath = 'broadcasts/%s/update' % broadcast_guid broadcast = {'status': 'CANCELED'} bcast_dict = self._call(subpath, method='POST', data=broadcast, content_type='application/json') return bcast_dict
python
{ "resource": "" }
q20607
BroadcastClient.get_channels
train
def get_channels(self, current=True, publish_only=False, settings=False): """ if "current" is false it will return all channels that a user has published to in the past. if publish_only is set to true, then return only the channels that are publishable. if settings is true, the API will make extra queries to return the settings for each channel. """ if publish_only: if current: endpoint = 'channels/setting/publish/current' else: endpoint = 'channels/setting/publish' else: if current: endpoint = 'channels/current' else: endpoint = 'channels' result = self._call(endpoint, content_type='application/json', params=dict(settings=settings)) return [Channel(c) for c in result]
python
{ "resource": "" }
q20608
ProspectsClient.get_prospects
train
def get_prospects(self, offset=None, orgoffset=None, limit=None): """ Return the prospects for the current API key. Optionally start the result list at the given offset. Each member of the return list is a prospect element containing organizational information such as name and location. """ params = {} if limit: params['count'] = limit if offset: params['timeOffset'] = offset params['orgOffset'] = orgoffset return self._call('timeline', params)
python
{ "resource": "" }
q20609
ProspectsClient.search_prospects
train
def search_prospects(self, search_type, query, offset=None, orgoffset=None): """ Supports doing a search for prospects by city, reion, or country. search_type should be one of 'city' 'region' 'country'. This method is intended to be called with one of the outputs from the get_options_for_query method above. """ params = {'q': query} if offset and orgoffset: params['orgOffset'] = orgoffset params['timeOffset'] = offset return self._call('search/%s' % search_type, params)
python
{ "resource": "" }
q20610
track_field
train
def track_field(field): """ Returns whether the given field should be tracked by Auditlog. Untracked fields are many-to-many relations and relations to the Auditlog LogEntry model. :param field: The field to check. :type field: Field :return: Whether the given field should be tracked. :rtype: bool """ from auditlog.models import LogEntry # Do not track many to many relations if field.many_to_many: return False # Do not track relations to LogEntry if getattr(field, 'remote_field', None) is not None and field.remote_field.model == LogEntry: return False # 1.8 check elif getattr(field, 'rel', None) is not None and field.rel.to == LogEntry: return False return True
python
{ "resource": "" }
q20611
get_fields_in_model
train
def get_fields_in_model(instance): """ Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw data. This method excludes many to many fields. :param instance: The model instance to get the fields for :type instance: Model :return: The list of fields for the given model (instance) :rtype: list """ assert isinstance(instance, Model) # Check if the Django 1.8 _meta API is available use_api = hasattr(instance._meta, 'get_fields') and callable(instance._meta.get_fields) if use_api: return [f for f in instance._meta.get_fields() if track_field(f)] return instance._meta.fields
python
{ "resource": "" }
q20612
is_authenticated
train
def is_authenticated(user): """Return whether or not a User is authenticated. Function provides compatibility following deprecation of method call to `is_authenticated()` in Django 2.0. This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier), as `is_authenticated` was introduced as a property in v1.10.s """ if not hasattr(user, 'is_authenticated'): return False if callable(user.is_authenticated): # Will be callable if django.version < 2.0, but is only necessary in # v1.9 and earlier due to change introduced in v1.10 making # `is_authenticated` a property instead of a callable. return user.is_authenticated() else: return user.is_authenticated
python
{ "resource": "" }
q20613
AuditlogModelRegistry.register
train
def register(self, model=None, include_fields=[], exclude_fields=[], mapping_fields={}): """ Register a model with auditlog. Auditlog will then track mutations on this model's instances. :param model: The model to register. :type model: Model :param include_fields: The fields to include. Implicitly excludes all other fields. :type include_fields: list :param exclude_fields: The fields to exclude. Overrides the fields to include. :type exclude_fields: list """ def registrar(cls): """Register models for a given class.""" if not issubclass(cls, Model): raise TypeError("Supplied model is not a valid model.") self._registry[cls] = { 'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields, } self._connect_signals(cls) # We need to return the class, as the decorator is basically # syntactic sugar for: # MyClass = auditlog.register(MyClass) return cls if model is None: # If we're being used as a decorator, return a callable with the # wrapper. return lambda cls: registrar(cls) else: # Otherwise, just register the model. registrar(model)
python
{ "resource": "" }
q20614
AuditlogModelRegistry._connect_signals
train
def _connect_signals(self, model): """ Connect signals for the model. """ for signal in self._signals: receiver = self._signals[signal] signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model))
python
{ "resource": "" }
q20615
AuditlogModelRegistry._disconnect_signals
train
def _disconnect_signals(self, model): """ Disconnect signals for the model. """ for signal, receiver in self._signals.items(): signal.disconnect(sender=model, dispatch_uid=self._dispatch_uid(signal, model))
python
{ "resource": "" }
q20616
LogEntryManager.log_create
train
def log_create(self, instance, **kwargs): """ Helper method to create a new log entry. This method automatically populates some fields when no explicit value is given. :param instance: The model instance to log a change for. :type instance: Model :param kwargs: Field overrides for the :py:class:`LogEntry` object. :return: The new log entry or `None` if there were no changes. :rtype: LogEntry """ changes = kwargs.get('changes', None) pk = self._get_pk_value(instance) if changes is not None: kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance)) kwargs.setdefault('object_pk', pk) kwargs.setdefault('object_repr', smart_text(instance)) if isinstance(pk, integer_types): kwargs.setdefault('object_id', pk) get_additional_data = getattr(instance, 'get_additional_data', None) if callable(get_additional_data): kwargs.setdefault('additional_data', get_additional_data()) # Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is # used twice. if kwargs.get('action', None) is LogEntry.Action.CREATE: if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).exists(): self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete() else: self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete() # save LogEntry to same database instance is using db = instance._state.db return self.create(**kwargs) if db is None or db == '' else self.using(db).create(**kwargs) return None
python
{ "resource": "" }
q20617
LogEntryManager.get_for_object
train
def get_for_object(self, instance): """ Get log entries for the specified model instance. :param instance: The model instance to get log entries for. :type instance: Model :return: QuerySet of log entries for the given model instance. :rtype: QuerySet """ # Return empty queryset if the given model instance is not a model instance. if not isinstance(instance, models.Model): return self.none() content_type = ContentType.objects.get_for_model(instance.__class__) pk = self._get_pk_value(instance) if isinstance(pk, integer_types): return self.filter(content_type=content_type, object_id=pk) else: return self.filter(content_type=content_type, object_pk=smart_text(pk))
python
{ "resource": "" }
q20618
LogEntryManager.get_for_objects
train
def get_for_objects(self, queryset): """ Get log entries for the objects in the specified queryset. :param queryset: The queryset to get the log entries for. :type queryset: QuerySet :return: The LogEntry objects for the objects in the given queryset. :rtype: QuerySet """ if not isinstance(queryset, QuerySet) or queryset.count() == 0: return self.none() content_type = ContentType.objects.get_for_model(queryset.model) primary_keys = list(queryset.values_list(queryset.model._meta.pk.name, flat=True)) if isinstance(primary_keys[0], integer_types): return self.filter(content_type=content_type).filter(Q(object_id__in=primary_keys)).distinct() elif isinstance(queryset.model._meta.pk, models.UUIDField): primary_keys = [smart_text(pk) for pk in primary_keys] return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct() else: return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
python
{ "resource": "" }
q20619
LogEntryManager.get_for_model
train
def get_for_model(self, model): """ Get log entries for all objects of a specified type. :param model: The model to get log entries for. :type model: class :return: QuerySet of log entries for the given model. :rtype: QuerySet """ # Return empty queryset if the given object is not valid. if not issubclass(model, models.Model): return self.none() content_type = ContentType.objects.get_for_model(model) return self.filter(content_type=content_type)
python
{ "resource": "" }
q20620
LogEntryManager._get_pk_value
train
def _get_pk_value(self, instance): """ Get the primary key field value for a model instance. :param instance: The model instance to get the primary key for. :type instance: Model :return: The primary key value of the given model instance. """ pk_field = instance._meta.pk.name pk = getattr(instance, pk_field, None) # Check to make sure that we got an pk not a model object. if isinstance(pk, models.Model): pk = self._get_pk_value(pk) return pk
python
{ "resource": "" }
q20621
AuditlogHistoryField.bulk_related_objects
train
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ if self.delete_related: return super(AuditlogHistoryField, self).bulk_related_objects(objs, using) # When deleting, Collector.collect() finds related objects using this # method. However, because we don't want to delete these related # objects, we simply return an empty list. return []
python
{ "resource": "" }
q20622
get_stockprices
train
def get_stockprices(chart_range='1y'): ''' This is a proxy to the main fetch function to cache the result based on the chart range parameter. ''' all_symbols = list_symbols() @daily_cache(filename='iex_chart_{}'.format(chart_range)) def get_stockprices_cached(all_symbols): return _get_stockprices(all_symbols, chart_range) return get_stockprices_cached(all_symbols)
python
{ "resource": "" }
q20623
LivePipelineEngine._inputs_for_term
train
def _inputs_for_term(term, workspace, graph): """ Compute inputs for the given term. This is mostly complicated by the fact that for each input we store as many rows as will be necessary to serve **any** computation requiring that input. """ offsets = graph.offset out = [] if term.windowed: # If term is windowed, then all input data should be instances of # AdjustedArray. for input_ in term.inputs: adjusted_array = ensure_adjusted_array( workspace[input_], input_.missing_value, ) out.append( adjusted_array.traverse( window_length=term.window_length, offset=offsets[term, input_], ) ) else: # If term is not windowed, input_data may be an AdjustedArray or # np.ndarray. Coerce the former to the latter. for input_ in term.inputs: input_data = ensure_ndarray(workspace[input_]) offset = offsets[term, input_] # OPTIMIZATION: Don't make a copy by doing input_data[0:] if # offset is zero. if offset: input_data = input_data[offset:] out.append(input_data) return out
python
{ "resource": "" }
q20624
preemphasis
train
def preemphasis(signal, shift=1, cof=0.98): """preemphasising on the signal. Args: signal (array): The input signal. shift (int): The shift step. cof (float): The preemphasising coefficient. 0 equals to no filtering. Returns: array: The pre-emphasized signal. """ rolled_signal = np.roll(signal, shift) return signal - cof * rolled_signal
python
{ "resource": "" }
q20625
log_power_spectrum
train
def log_power_spectrum(frames, fft_points=512, normalize=True): """Log power spectrum of each frame in frames. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. normalize (bool): If normalize=True, the log power spectrum will be normalized. Returns: array: The power spectrum - If frames is an num_frames x sample_per_frame matrix, output will be num_frames x fft_length. """ power_spec = power_spectrum(frames, fft_points) power_spec[power_spec <= 1e-20] = 1e-20 log_power_spec = 10 * np.log10(power_spec) if normalize: return log_power_spec - np.max(log_power_spec) else: return log_power_spec
python
{ "resource": "" }
q20626
derivative_extraction
train
def derivative_extraction(feat, DeltaWindows): """This function the derivative features. Args: feat (array): The main feature vector(For returning the second order derivative it can be first-order derivative). DeltaWindows (int): The value of DeltaWindows is set using the configuration parameter DELTAWINDOW. Returns: array: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy array which is the derivative features along the features. """ # Getting the shape of the vector. rows, cols = feat.shape # Difining the vector of differences. DIF = np.zeros(feat.shape, dtype=feat.dtype) Scale = 0 # Pad only along features in the vector. FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge') for i in range(DeltaWindows): # Start index offset = DeltaWindows # The dynamic range Range = i + 1 dif = Range * FEAT[:, offset + Range:offset + Range + cols] - FEAT[:, offset - Range:offset - Range + cols] Scale += 2 * np.power(Range, 2) DIF += dif return DIF / Scale
python
{ "resource": "" }
q20627
cmvnw
train
def cmvnw(vec, win_size=301, variance_normalization=False): """ This function is aimed to perform local cepstral mean and variance normalization on a sliding window. The code assumes that there is one observation per row. Args: vec (array): input feature matrix (size:(num_observation,num_features)) win_size (int): The size of sliding window for local normalization. Default=301 which is around 3s if 100 Hz rate is considered(== 10ms frame stide) variance_normalization (bool): If the variance normilization should be performed or not. Return: array: The mean(or mean+variance) normalized feature vector. """ # Get the shapes eps = 2**-30 rows, cols = vec.shape # Windows size must be odd. assert isinstance(win_size, int), "Size must be of type 'int'!" assert win_size % 2 == 1, "Windows size must be odd!" # Padding and initial definitions pad_size = int((win_size - 1) / 2) vec_pad = np.lib.pad(vec, ((pad_size, pad_size), (0, 0)), 'symmetric') mean_subtracted = np.zeros(np.shape(vec), dtype=np.float32) for i in range(rows): window = vec_pad[i:i + win_size, :] window_mean = np.mean(window, axis=0) mean_subtracted[i, :] = vec[i, :] - window_mean # Variance normalization if variance_normalization: # Initial definitions. variance_normalized = np.zeros(np.shape(vec), dtype=np.float32) vec_pad_variance = np.lib.pad( mean_subtracted, ((pad_size, pad_size), (0, 0)), 'symmetric') # Looping over all observations. for i in range(rows): window = vec_pad_variance[i:i + win_size, :] window_variance = np.std(window, axis=0) variance_normalized[i, :] \ = mean_subtracted[i, :] / (window_variance + eps) output = variance_normalized else: output = mean_subtracted return output
python
{ "resource": "" }
q20628
filterbanks
train
def filterbanks( num_filter, coefficients, sampling_freq, low_freq=None, high_freq=None): """Compute the Mel-filterbanks. Each filter will be stored in one rows. The columns correspond to fft bins. Args: num_filter (int): the number of filters in the filterbank, default 20. coefficients (int): (fftpoints//2 + 1). Default is 257. sampling_freq (float): the samplerate of the signal we are working with. It affects mel spacing. low_freq (float): lowest band edge of mel filters, default 0 Hz high_freq (float): highest band edge of mel filters, default samplerate/2 Returns: array: A numpy array of size num_filter x (fftpoints//2 + 1) which are filterbank """ high_freq = high_freq or sampling_freq / 2 low_freq = low_freq or 300 s = "High frequency cannot be greater than half of the sampling frequency!" assert high_freq <= sampling_freq / 2, s assert low_freq >= 0, "low frequency cannot be less than zero!" # Computing the Mel filterbank # converting the upper and lower frequencies to Mels. # num_filter + 2 is because for num_filter filterbanks we need # num_filter+2 point. mels = np.linspace( functions.frequency_to_mel(low_freq), functions.frequency_to_mel(high_freq), num_filter + 2) # we should convert Mels back to Hertz because the start and end-points # should be at the desired frequencies. hertz = functions.mel_to_frequency(mels) # The frequency resolution required to put filters at the # exact points calculated above should be extracted. # So we should round those frequencies to the closest FFT bin. freq_index = ( np.floor( (coefficients + 1) * hertz / sampling_freq)).astype(int) # Initial definition filterbank = np.zeros([num_filter, coefficients]) # The triangular function for each filter for i in range(0, num_filter): left = int(freq_index[i]) middle = int(freq_index[i + 1]) right = int(freq_index[i + 2]) z = np.linspace(left, right, num=right - left + 1) filterbank[i, left:right + 1] = functions.triangle(z, left=left, middle=middle, right=right) return filterbank
python
{ "resource": "" }
q20629
extract_derivative_feature
train
def extract_derivative_feature(feature): """ This function extracts temporal derivative features which are first and second derivatives. Args: feature (array): The feature vector which its size is: N x M Return: array: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3 """ first_derivative_feature = processing.derivative_extraction( feature, DeltaWindows=2) second_derivative_feature = processing.derivative_extraction( first_derivative_feature, DeltaWindows=2) # Creating the future cube for each file feature_cube = np.concatenate( (feature[:, :, None], first_derivative_feature[:, :, None], second_derivative_feature[:, :, None]), axis=2) return feature_cube
python
{ "resource": "" }
q20630
remove_umis
train
def remove_umis(adj_list, cluster, nodes): '''removes the specified nodes from the cluster and returns the remaining nodes ''' # list incomprehension: for x in nodes: for node in adj_list[x]: yield node nodes_to_remove = set([node for x in nodes for node in adj_list[x]] + nodes) return cluster - nodes_to_remove
python
{ "resource": "" }
q20631
get_substr_slices
train
def get_substr_slices(umi_length, idx_size): ''' Create slices to split a UMI into approximately equal size substrings Returns a list of tuples that can be passed to slice function ''' cs, r = divmod(umi_length, idx_size) sub_sizes = [cs + 1] * r + [cs] * (idx_size - r) offset = 0 slices = [] for s in sub_sizes: slices.append((offset, offset + s)) offset += s return slices
python
{ "resource": "" }
q20632
build_substr_idx
train
def build_substr_idx(umis, umi_length, min_edit): ''' Build a dictionary of nearest neighbours using substrings, can be used to reduce the number of pairwise comparisons. ''' substr_idx = collections.defaultdict( lambda: collections.defaultdict(set)) slices = get_substr_slices(umi_length, min_edit + 1) for idx in slices: for u in umis: u_sub = u[slice(*idx)] substr_idx[idx][u_sub].add(u) return substr_idx
python
{ "resource": "" }
q20633
UMIClusterer._get_best_percentile
train
def _get_best_percentile(self, cluster, counts): ''' return all UMIs with counts >1% of the median counts in the cluster ''' if len(cluster) == 1: return list(cluster) else: threshold = np.median(list(counts.values()))/100 return [read for read in cluster if counts[read] > threshold]
python
{ "resource": "" }
q20634
UMIClusterer._get_adj_list_adjacency
train
def _get_adj_list_adjacency(self, umis, counts, threshold): ''' identify all umis within hamming distance threshold''' adj_list = {umi: [] for umi in umis} if len(umis) > 25: umi_length = len(umis[0]) substr_idx = build_substr_idx(umis, umi_length, threshold) iter_umi_pairs = iter_nearest_neighbours(umis, substr_idx) else: iter_umi_pairs = itertools.combinations(umis, 2) for umi1, umi2 in iter_umi_pairs: if edit_distance(umi1, umi2) <= threshold: adj_list[umi1].append(umi2) adj_list[umi2].append(umi1) return adj_list
python
{ "resource": "" }
q20635
UMIClusterer._group_unique
train
def _group_unique(self, clusters, adj_list, counts): ''' return groups for unique method''' if len(clusters) == 1: groups = [clusters] else: groups = [[x] for x in clusters] return groups
python
{ "resource": "" }
q20636
UMIClusterer._group_directional
train
def _group_directional(self, clusters, adj_list, counts): ''' return groups for directional method''' observed = set() groups = [] for cluster in clusters: if len(cluster) == 1: groups.append(list(cluster)) observed.update(cluster) else: cluster = sorted(cluster, key=lambda x: counts[x], reverse=True) # need to remove any node which has already been observed temp_cluster = [] for node in cluster: if node not in observed: temp_cluster.append(node) observed.add(node) groups.append(temp_cluster) return groups
python
{ "resource": "" }
q20637
UMIClusterer._group_adjacency
train
def _group_adjacency(self, clusters, adj_list, counts): ''' return groups for adjacency method''' groups = [] for cluster in clusters: if len(cluster) == 1: groups.append(list(cluster)) else: observed = set() lead_umis = self._get_best_min_account(cluster, adj_list, counts) observed.update(lead_umis) for lead_umi in lead_umis: connected_nodes = set(adj_list[lead_umi]) groups.append([lead_umi] + list(connected_nodes - observed)) observed.update(connected_nodes) return groups
python
{ "resource": "" }
q20638
UMIClusterer._group_cluster
train
def _group_cluster(self, clusters, adj_list, counts): ''' return groups for cluster or directional methods''' groups = [] for cluster in clusters: groups.append(sorted(cluster, key=lambda x: counts[x], reverse=True)) return groups
python
{ "resource": "" }
q20639
UMIClusterer._group_percentile
train
def _group_percentile(self, clusters, adj_list, counts): ''' Return "groups" for the the percentile method. Note that grouping isn't really compatible with the percentile method. This just returns the retained UMIs in a structure similar to other methods ''' retained_umis = self._get_best_percentile(clusters, counts) groups = [[x] for x in retained_umis] return groups
python
{ "resource": "" }
q20640
CellClusterer._get_connected_components_adjacency
train
def _get_connected_components_adjacency(self, graph, counts): ''' find the connected UMIs within an adjacency dictionary''' found = set() components = list() for node in sorted(graph, key=lambda x: counts[x], reverse=True): if node not in found: # component = self.search(node, graph) component = breadth_first_search(node, graph) found.update(component) components.append(component) return components
python
{ "resource": "" }
q20641
getHeader
train
def getHeader(): """return a header string with command line options and timestamp """ system, host, release, version, machine = os.uname() return "# UMI-tools version: %s\n# output generated by %s\n# job started at %s on %s -- %s\n# pid: %i, system: %s %s %s %s" %\ (__version__, " ".join(sys.argv), time.asctime(time.localtime(time.time())), host, global_id, os.getpid(), system, release, version, machine)
python
{ "resource": "" }
q20642
getParams
train
def getParams(options=None): """return a string containing script parameters. Parameters are all variables that start with ``param_``. """ result = [] if options: members = options.__dict__ for k, v in sorted(members.items()): result.append("# %-40s: %s" % (k, str(v))) else: vars = inspect.currentframe().f_back.f_locals for var in filter(lambda x: re.match("param_", x), vars.keys()): result.append("# %-40s: %s" % (var, str(vars[var]))) if result: return "\n".join(result) else: return "# no parameters."
python
{ "resource": "" }
q20643
getFooter
train
def getFooter(): """return a header string with command line options and timestamp. """ return "# job finished in %i seconds at %s -- %s -- %s" %\ (time.time() - global_starting_time, time.asctime(time.localtime(time.time())), " ".join(map(lambda x: "%5.2f" % x, os.times()[:4])), global_id)
python
{ "resource": "" }
q20644
validateExtractOptions
train
def validateExtractOptions(options): ''' Check the validity of the option combinations for barcode extraction''' if not options.pattern and not options.pattern2: if not options.read2_in: U.error("Must supply --bc-pattern for single-end") else: U.error("Must supply --bc-pattern and/or --bc-pattern2 " "if paired-end ") if options.pattern2: if not options.read2_in: U.error("must specify a paired fastq ``--read2-in``") if not options.pattern2: options.pattern2 = options.pattern extract_cell = False extract_umi = False # If the pattern is a regex we can compile the regex(es) prior to # ExtractFilterAndUpdate instantiation if options.extract_method == "regex": if options.pattern: try: options.pattern = regex.compile(options.pattern) except regex.error: U.error("--bc-pattern '%s' is not a " "valid regex" % options.pattern) if options.pattern2: try: options.pattern2 = regex.compile(options.pattern2) except regex.Error: U.error("--bc-pattern2 '%s' is not a " "valid regex" % options.pattern2) # check whether the regex contains a umi group(s) and cell groups(s) if options.extract_method == "regex": if options.pattern: for group in options.pattern.groupindex: if group.startswith("cell_"): extract_cell = True elif group.startswith("umi_"): extract_umi = True if options.pattern2: for group in options.pattern2.groupindex: if group.startswith("cell_"): extract_cell = True elif group.startswith("umi_"): extract_umi = True # check whether the pattern string contains umi/cell bases elif options.extract_method == "string": if options.pattern: if "C" in options.pattern: extract_cell = True if "N" in options.pattern: extract_umi = True if options.pattern2: if "C" in options.pattern2: extract_cell = True if "N" in options.pattern2: extract_umi = True if not extract_umi: if options.extract_method == "string": U.error("barcode pattern(s) do not include any umi bases " "(marked with 'Ns') %s, %s" % ( options.pattern, options.pattern2)) elif options.extract_method == "regex": U.error("barcode regex(es) do not include any umi groups " "(starting with 'umi_') %s, %s" ( options.pattern, options.pattern2)) return(extract_cell, extract_umi)
python
{ "resource": "" }
q20645
Stop
train
def Stop(): """stop the experiment. This method performs final book-keeping, closes the output streams and writes the final log messages indicating script completion. """ if global_options.loglevel >= 1 and global_benchmark: t = time.time() - global_starting_time global_options.stdlog.write( "######### Time spent in benchmarked functions #########\n") global_options.stdlog.write("# function\tseconds\tpercent\n") for key, value in global_benchmark.items(): global_options.stdlog.write( "# %s\t%6i\t%5.2f%%\n" % (key, value, (100.0 * float(value) / t))) global_options.stdlog.write( "#######################################################\n") if global_options.loglevel >= 1: global_options.stdlog.write(getFooter() + "\n") # close files if global_options.stdout != sys.stdout: global_options.stdout.close() # do not close log, otherwise error occurs in atext.py # if global_options.stdlog != sys.stdout: # global_options.stdlog.close() if global_options.stderr != sys.stderr: global_options.stderr.close() if global_options.timeit_file: outfile = open(global_options.timeit_file, "a") if global_options.timeit_header: outfile.write("\t".join( ("name", "wall", "user", "sys", "cuser", "csys", "host", "system", "release", "machine", "start", "end", "path", "cmd")) + "\n") csystem, host, release, version, machine = map(str, os.uname()) uusr, usys, c_usr, c_sys = map(lambda x: "%5.2f" % x, os.times()[:4]) t_end = time.time() c_wall = "%5.2f" % (t_end - global_starting_time) if sys.argv[0] == "run.py": cmd = global_args[0] if len(global_args) > 1: cmd += " '" + "' '".join(global_args[1:]) + "'" else: cmd = sys.argv[0] result = "\t".join((global_options.timeit_name, c_wall, uusr, usys, c_usr, c_sys, host, csystem, release, machine, time.asctime(time.localtime(global_starting_time)), time.asctime(time.localtime(t_end)), os.path.abspath(os.getcwd()), cmd)) + "\n" outfile.write(result) outfile.close()
python
{ "resource": "" }
q20646
getTempFile
train
def getTempFile(dir=None, shared=False, suffix=""): '''get a temporary file. The file is created and the caller needs to close and delete the temporary file once it is not used any more. Arguments --------- dir : string Directory of the temporary file and if not given is set to the default temporary location in the global configuration dictionary. shared : bool If set, the tempory file will be in a shared temporary location (given by the global configuration directory). suffix : string Filename suffix Returns ------- file : File A file object of the temporary file. ''' return tempfile.NamedTemporaryFile(dir=dir, delete=False, prefix="ctmp", suffix=suffix)
python
{ "resource": "" }
q20647
getTempFilename
train
def getTempFilename(dir=None, shared=False, suffix=""): '''return a temporary filename. The file is created and the caller needs to delete the temporary file once it is not used any more. Arguments --------- dir : string Directory of the temporary file and if not given is set to the default temporary location in the global configuration dictionary. shared : bool If set, the tempory file will be in a shared temporary location. suffix : string Filename suffix Returns ------- filename : string Absolute pathname of temporary file. ''' tmpfile = getTempFile(dir=dir, shared=shared, suffix=suffix) tmpfile.close() return tmpfile.name
python
{ "resource": "" }
q20648
get_gene_count_tab
train
def get_gene_count_tab(infile, bc_getter=None): ''' Yields the counts per umi for each gene bc_getter: method to get umi (plus optionally, cell barcode) from read, e.g get_umi_read_id or get_umi_tag TODO: ADD FOLLOWING OPTION skip_regex: skip genes matching this regex. Useful to ignore unassigned reads (as per get_bundles class above) ''' gene = None counts = collections.Counter() for line in infile: values = line.strip().split("\t") assert len(values) == 2, "line: %s does not contain 2 columns" % line read_id, assigned_gene = values if assigned_gene != gene: if gene: yield gene, counts gene = assigned_gene counts = collections.defaultdict(collections.Counter) cell, umi = bc_getter(read_id) counts[cell][umi] += 1 # yield final values yield gene, counts
python
{ "resource": "" }
q20649
metafetcher
train
def metafetcher(bamfile, metacontig2contig, metatag): ''' return reads in order of metacontigs''' for metacontig in metacontig2contig: for contig in metacontig2contig[metacontig]: for read in bamfile.fetch(contig): read.set_tag(metatag, metacontig) yield read
python
{ "resource": "" }
q20650
TwoPassPairWriter.write
train
def write(self, read, unique_id=None, umi=None, unmapped=False): '''Check if chromosome has changed since last time. If it has, scan for mates. Write the read to outfile and save the identity for paired end retrieval''' if unmapped or read.mate_is_unmapped: self.outfile.write(read) return if not self.chrom == read.reference_name: self.write_mates() self.chrom = read.reference_name key = read.query_name, read.next_reference_name, read.next_reference_start self.read1s.add(key) self.outfile.write(read)
python
{ "resource": "" }
q20651
TwoPassPairWriter.write_mates
train
def write_mates(self): '''Scan the current chromosome for matches to any of the reads stored in the read1s buffer''' if self.chrom is not None: U.debug("Dumping %i mates for contig %s" % ( len(self.read1s), self.chrom)) for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True): if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)): continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) U.debug("%i mates remaining" % len(self.read1s))
python
{ "resource": "" }
q20652
TwoPassPairWriter.close
train
def close(self): '''Write mates for remaining chromsome. Search for matches to any unmatched reads''' self.write_mates() U.info("Searching for mates for %i unmatched alignments" % len(self.read1s)) found = 0 for read in self.infile.fetch(until_eof=True, multiple_iterators=True): if read.is_unmapped: continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) found += 1 continue U.info("%i mates never found" % len(self.read1s)) self.outfile.close()
python
{ "resource": "" }
q20653
getErrorCorrectMapping
train
def getErrorCorrectMapping(cell_barcodes, whitelist, threshold=1): ''' Find the mappings between true and false cell barcodes based on an edit distance threshold. Any cell barcode within the threshold to more than one whitelist barcode will be excluded''' true_to_false = collections.defaultdict(set) whitelist = set([str(x).encode("utf-8") for x in whitelist]) for cell_barcode in cell_barcodes: match = None barcode_in_bytes = str(cell_barcode).encode("utf-8") for white_cell in whitelist: if barcode_in_bytes in whitelist: # don't check if whitelisted continue if edit_distance(barcode_in_bytes, white_cell) <= threshold: if match is not None: # already matched one barcode match = None # set match back to None break # break and don't add to maps else: match = white_cell.decode("utf-8") if match is not None: true_to_false[match].add(cell_barcode) return true_to_false
python
{ "resource": "" }
q20654
fastqIterate
train
def fastqIterate(infile): '''iterate over contents of fastq file.''' def convert2string(b): if type(b) == str: return b else: return b.decode("utf-8") while 1: line1 = convert2string(infile.readline()) if not line1: break if not line1.startswith('@'): U.error("parsing error: expected '@' in line %s" % line1) line2 = convert2string(infile.readline()) line3 = convert2string(infile.readline()) if not line3.startswith('+'): U.error("parsing error: expected '+' in line %s" % line3) line4 = convert2string(infile.readline()) # incomplete entry if not line4: U.error("incomplete entry for %s" % line1) yield Record(line1[1:-1], line2[:-1], line4[:-1])
python
{ "resource": "" }
q20655
Record.guessFormat
train
def guessFormat(self): '''return quality score format - might return several if ambiguous.''' c = [ord(x) for x in self.quals] mi, ma = min(c), max(c) r = [] for entry_format, v in iteritems(RANGES): m1, m2 = v if mi >= m1 and ma < m2: r.append(entry_format) return r
python
{ "resource": "" }
q20656
random_read_generator.refill_random
train
def refill_random(self): ''' refill the list of random_umis ''' self.random_umis = np.random.choice( list(self.umis.keys()), self.random_fill_size, p=self.prob) self.random_ix = 0
python
{ "resource": "" }
q20657
random_read_generator.fill
train
def fill(self): ''' parse the BAM to obtain the frequency for each UMI''' self.frequency2umis = collections.defaultdict(list) for read in self.inbam: if read.is_unmapped: continue if read.is_read2: continue self.umis[self.barcode_getter(read)[0]] += 1 self.umis_counter = collections.Counter(self.umis) total_umis = sum(self.umis_counter.values()) U.info("total_umis %i" % total_umis) U.info("#umis %i" % len(self.umis_counter)) self.prob = self.umis_counter.values() sum_prob = sum(self.prob) self.prob = [float(x) / sum_prob for x in self.prob] self.refill_random()
python
{ "resource": "" }
q20658
random_read_generator.getUmis
train
def getUmis(self, n): ''' return n umis from the random_umis atr.''' if n < (self.random_fill_size - self.random_ix): barcodes = self.random_umis[self.random_ix: self.random_ix+n] else: # could use the end of the random_umis but # let's just make a new random_umis if n > self.random_fill_size: # ensure random_umis is long enough self.random_fill_size = n * 2 self.refill_random() barcodes = self.random_umis[self.random_ix: self.random_ix+n] self.random_ix += n return barcodes
python
{ "resource": "" }
q20659
addBarcodesToIdentifier
train
def addBarcodesToIdentifier(read, UMI, cell): '''extract the identifier from a read and append the UMI and cell barcode before the first space''' read_id = read.identifier.split(" ") if cell == "": read_id[0] = read_id[0] + "_" + UMI else: read_id[0] = read_id[0] + "_" + cell + "_" + UMI identifier = " ".join(read_id) return identifier
python
{ "resource": "" }
q20660
extractSeqAndQuals
train
def extractSeqAndQuals(seq, quals, umi_bases, cell_bases, discard_bases, retain_umi=False): '''Remove selected bases from seq and quals ''' new_seq = "" new_quals = "" umi_quals = "" cell_quals = "" ix = 0 for base, qual in zip(seq, quals): if ((ix not in discard_bases) and (ix not in cell_bases)): # if we are retaining the umi, this base is both seq and umi if retain_umi: new_quals += qual new_seq += base umi_quals += qual else: # base is either seq or umi if ix not in umi_bases: new_quals += qual new_seq += base else: umi_quals += qual elif ix in cell_bases: cell_quals += qual ix += 1 return new_seq, new_quals, umi_quals, cell_quals
python
{ "resource": "" }
q20661
get_below_threshold
train
def get_below_threshold(umi_quals, quality_encoding, quality_filter_threshold): '''test whether the umi_quals are below the threshold''' umi_quals = [x - RANGES[quality_encoding][0] for x in map(ord, umi_quals)] below_threshold = [x < quality_filter_threshold for x in umi_quals] return below_threshold
python
{ "resource": "" }
q20662
umi_below_threshold
train
def umi_below_threshold(umi_quals, quality_encoding, quality_filter_threshold): ''' return true if any of the umi quals is below the threshold''' below_threshold = get_below_threshold( umi_quals, quality_encoding, quality_filter_threshold) return any(below_threshold)
python
{ "resource": "" }
q20663
mask_umi
train
def mask_umi(umi, umi_quals, quality_encoding, quality_filter_threshold): ''' Mask all positions where quals < threshold with "N" ''' below_threshold = get_below_threshold( umi_quals, quality_encoding, quality_filter_threshold) new_umi = "" for base, test in zip(umi, below_threshold): if test: new_umi += "N" else: new_umi += base return new_umi
python
{ "resource": "" }
q20664
ExtractBarcodes
train
def ExtractBarcodes(read, match, extract_umi=False, extract_cell=False, discard=False, retain_umi=False): '''Extract the cell and umi barcodes using a regex.match object inputs: - read 1 and read2 = Record objects - match = regex.match object - extract_umi and extract_cell = switches to determine whether these barcodes should be extracted - discard = is there a region(s) of the sequence which should be discarded entirely? - retain_umi = Should UMI sequence be retained on the read sequence returns: - cell_barcode = Cell barcode string - cell_barcode_quals = Cell barcode quality scores - umi = UMI barcode string. - umi_quals = UMI barcode quality scores - new_seq = Read1 sequence after extraction - new_quals = Read1 qualities after extraction Barcodes and qualities default to empty strings where extract_cell or extract_umi are false. ''' cell_barcode, umi, cell_barcode_quals, umi_quals, new_seq, new_quals = ("",)*6 if not extract_cell and not extract_umi: U.error("must set either extract_cell and/or extract_umi to true") groupdict = match.groupdict() cell_bases = set() umi_bases = set() discard_bases = set() for k in sorted(list(groupdict)): span = match.span(k) if extract_cell and k.startswith("cell_"): cell_barcode += groupdict[k] cell_bases.update(range(span[0], span[1])) elif extract_umi and k.startswith("umi_"): umi += groupdict[k] umi_bases.update(range(span[0], span[1])) elif discard and k.startswith("discard_"): discard_bases.update(range(span[0], span[1])) new_seq, new_quals, umi_quals, cell_quals = extractSeqAndQuals( read.seq, read.quals, umi_bases, cell_bases, discard_bases, retain_umi) return (cell_barcode, cell_barcode_quals, umi, umi_quals, new_seq, new_quals)
python
{ "resource": "" }
q20665
ExtractFilterAndUpdate.maskQuality
train
def maskQuality(self, umi, umi_quals): '''mask low quality bases and return masked umi''' masked_umi = mask_umi(umi, umi_quals, self.quality_encoding, self.quality_filter_mask) if masked_umi != umi: self.read_counts['UMI masked'] += 1 return masked_umi else: return umi
python
{ "resource": "" }
q20666
ExtractFilterAndUpdate.filterCellBarcode
train
def filterCellBarcode(self, cell): '''Filter out cell barcodes not in the whitelist, with optional cell barcode error correction''' if self.cell_blacklist and cell in self.cell_blacklist: self.read_counts['Cell barcode in blacklist'] += 1 return None if cell not in self.cell_whitelist: if self.false_to_true_map: if cell in self.false_to_true_map: cell = self.false_to_true_map[cell] self.read_counts['False cell barcode. Error-corrected'] += 1 else: self.read_counts['Filtered cell barcode. Not correctable'] += 1 return None else: self.read_counts['Filtered cell barcode'] += 1 return None if self.cell_blacklist and cell in self.cell_blacklist: self.read_counts['Cell barcode corrected to barcode blacklist'] += 1 return None return cell
python
{ "resource": "" }
q20667
detect_bam_features
train
def detect_bam_features(bamfile, n_entries=1000): ''' read the first n entries in the bam file and identify the tags available detecting multimapping ''' inbam = pysam.Samfile(bamfile) inbam = inbam.fetch(until_eof=True) tags = ["NH", "X0", "XT"] available_tags = {x: 1 for x in tags} for n, read in enumerate(inbam): if n > n_entries: break if read.is_unmapped: continue else: for tag in tags: if not read.has_tag(tag): available_tags[tag] = 0 return available_tags
python
{ "resource": "" }
q20668
aggregateStatsDF
train
def aggregateStatsDF(stats_df): ''' return a dataframe with aggregated counts per UMI''' grouped = stats_df.groupby("UMI") agg_dict = {'counts': [np.median, len, np.sum]} agg_df = grouped.agg(agg_dict) agg_df.columns = ['median_counts', 'times_observed', 'total_counts'] return agg_df
python
{ "resource": "" }
q20669
mason_morrow
train
def mason_morrow(target, throat_perimeter='throat.perimeter', throat_area='throat.area'): r""" Mason and Morrow relate the capillary pressure to the shaped factor in a similar way to Mortensen but for triangles. References ---------- Mason, G. and Morrow, N.R.. Capillary behavior of a perfectly wetting liquid in irregular triangular tubes. Journal of Colloid and Interface Science, 141(1), pp.262-274 (1991). """ # Only apply to throats with an area ts = target.throats()[target[throat_area] <= 0] P = target[throat_perimeter] A = target[throat_area] value = A/(P**2) value[ts] = 1/(4*_sp.pi) return value
python
{ "resource": "" }
q20670
jenkins_rao
train
def jenkins_rao(target, throat_perimeter='throat.perimeter', throat_area='throat.area', throat_diameter='throat.indiameter'): r""" Jenkins and Rao relate the capillary pressure in an eliptical throat to the aspect ratio References ---------- Jenkins, R.G. and Rao, M.B., The effect of elliptical pores on mercury porosimetry results. Powder technology, 38(2), pp.177-180. (1984) """ P = target[throat_perimeter] A = target[throat_area] r = target[throat_diameter]/2 # Normalized by value for perfect circle value = (P/A)/(2/r) return value
python
{ "resource": "" }
q20671
AdvectionDiffusion.set_outflow_BC
train
def set_outflow_BC(self, pores, mode='merge'): r""" Adds outflow boundary condition to the selected pores. Outflow condition simply means that the gradient of the solved quantity does not change, i.e. is 0. """ # Hijack the parse_mode function to verify mode/pores argument mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'], single=True) pores = self._parse_indices(pores) # Calculating A[i,i] values to ensure the outflow condition network = self.project.network phase = self.project.phases()[self.settings['phase']] throats = network.find_neighbor_throats(pores=pores) C12 = network['throat.conns'][throats] P12 = phase[self.settings['pressure']][C12] gh = phase[self.settings['hydraulic_conductance']][throats] Q12 = -gh * np.diff(P12, axis=1).squeeze() Qp = np.zeros(self.Np) np.add.at(Qp, C12[:, 0], -Q12) np.add.at(Qp, C12[:, 1], Q12) # Store boundary values if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'): self['pore.bc_outflow'] = np.nan self['pore.bc_outflow'][pores] = Qp[pores]
python
{ "resource": "" }
q20672
PETScSparseLinearSolver._create_solver
train
def _create_solver(self): r""" This method creates the petsc sparse linear solver. """ # http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPType.html#KSPType iterative_solvers = ['richardson', 'chebyshev', 'cg', 'groppcg', 'pipecg', 'pipecgrr', 'cgne', 'nash', 'stcg', 'gltr', 'fcg', 'pipefcg', 'gmres', 'pipefgmres', 'fgmres', 'lgmres', 'dgmres', 'pgmres', 'tcqmr', 'bcgs', 'ibcgs', 'fbcgs', 'fbcgsr', 'bcgsl', 'pipebcgs', 'cgs', 'tfqmr', 'cr', 'pipecr', 'lsqr', 'preonly', 'qcg', 'bicg', 'minres', 'symmlq', 'lcd', 'python', 'gcr', 'pipegcr', 'tsirm', 'cgls', 'fetidp'] # http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCType.html#PCType preconditioners = ['none', 'jacobi', 'sor', 'lu', 'shell', 'bjacobi', 'mg', 'eisenstat', 'ilu', 'icc', 'asm', 'gasm', 'ksp', 'composite', 'redundant', 'spai', 'nn', 'cholesky', 'pbjacobi', 'mat', 'hypre', 'parms', 'fieldsplit', 'tfs', 'ml', 'galerkin', 'exotic', 'cp', 'bfbt', 'lsc', 'python', 'pfmg', 'syspfmg', 'redistribute', 'svd', 'gamg', 'sacusp', 'sacusppoly', 'bicgstabcusp', 'ainvcusp', 'chowiluviennacl', 'rowscalingviennacl', 'saviennacl', 'bddc', 'kaczmarz', 'telescope'] lu_direct_solvers = ['mumps', 'superlu_dist', 'umfpack', 'klu'] cholesky_direct_solvers = ['mumps', 'cholmod'] solver = self.settings['type'] preconditioner = self.settings['preconditioner'] if solver not in (iterative_solvers + lu_direct_solvers + cholesky_direct_solvers): solver = 'cg' print('Warning: ' + self.settings['type'] + ' not availabe, ' + solver + ' used instead.') if preconditioner not in preconditioners: preconditioner = 'jacobi' print('Warning: ' + self.settings['preconditioner'] + ' not availabe, ' + preconditioner + ' used instead.') if solver in lu_direct_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType('lu') self.ksp.getPC().setFactorSolverPackage(solver) self.ksp.setType('preonly') elif solver in cholesky_direct_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType('cholesky') self.ksp.getPC().setFactorSolverPackage(solver) self.ksp.setType('preonly') elif solver in iterative_solvers: self.ksp = PETSc.KSP() self.ksp.create(PETSc.COMM_WORLD) self.ksp.getPC().setType(preconditioner) self.ksp.setType(solver) self.ksp.setTolerances(self.settings['atol'], self.settings['rtol'], self.settings['maxiter'])
python
{ "resource": "" }
q20673
PETScSparseLinearSolver.solve
train
def solve(self): r""" This method solves the sparse linear system, converts the solution vector from a PETSc.Vec instance to a numpy array, and finally destroys all the petsc objects to free memory. Parameters ---------- solver_type : string, optional Default is the iterative solver 'cg' based on the Conjugate Gradient method. preconditioner_type : string, optional Default is the 'jacobi' preconditioner, i.e., diagonal scaling preconditioning. The preconditioner is used with iterative solvers. When a direct solver is used, this parameter is ignored. factorization_type : string, optional The factorization type used with the direct solver. Default is 'lu'. This parameter is ignored when an iterative solver is used. Returns ------- Returns a numpy array corresponding to the solution of the linear sparse system Ax = b. Notes ----- Certain combinations of iterative solvers and precondioners or direct solvers and factorization types are not supported. The summary table of the different possibilities can be found here: https://www.mcs.anl.gov/petsc/documentation/linearsolvertable.html """ self._initialize_A() self._create_solver() self._initialize_b_x() # PETSc self.ksp.setOperators(self.petsc_A) self.ksp.setFromOptions() self.ksp.solve(self.petsc_b, self.petsc_x) # Convert solution vector from PETSc.Vec instance # to a numpy array self.solution = PETSc.Vec.getArray(self.petsc_x) # Destroy petsc solver, coefficients matrix, rhs, and solution vectors PETSc.KSP.destroy(self.ksp) PETSc.Mat.destroy(self.petsc_A) PETSc.Vec.destroy(self.petsc_b) PETSc.Vec.destroy(self.petsc_x) return(self.solution)
python
{ "resource": "" }
q20674
StokesFlow.calc_effective_permeability
train
def calc_effective_permeability(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ phase = self.project.phases()[self.settings['phase']] d_normal = self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length) K = d_normal * sp.mean(phase['pore.viscosity']) return K
python
{ "resource": "" }
q20675
InvasionPercolation.setup
train
def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''): r""" Set up the required parameters for the algorithm Parameters ---------- phase : OpenPNM Phase object The phase to be injected into the Network. The Phase must have the capillary entry pressure values for the system. entry_pressure : string The dictionary key to the capillary entry pressure. If none is supplied then the current value is retained. The default is 'throat.capillary_pressure'. pore_volume : string The dictionary key to the pore volume. If none is supplied then the current value is retained. The default is 'pore.volume'. throat_volume : string The dictionary key to the throat volume. If none is supplied then the current value is retained. The default is 'throat.volume'. """ self.settings['phase'] = phase.name if pore_volume: self.settings['pore_volume'] = pore_volume if throat_volume: self.settings['throat_volume'] = throat_volume if entry_pressure: self.settings['entry_pressure'] = entry_pressure # Setup arrays and info self['throat.entry_pressure'] = phase[self.settings['entry_pressure']] # Indices into t_entry giving a sorted list self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0) self['throat.order'] = 0 self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt) self['throat.invasion_sequence'] = -1 self['pore.invasion_sequence'] = -1 self._tcount = 0
python
{ "resource": "" }
q20676
Project.extend
train
def extend(self, obj): r""" This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects. """ if type(obj) is not list: obj = [obj] for item in obj: if hasattr(item, '_mro'): if 'GenericNetwork' in item._mro(): if self.network: raise Exception('Project already has a network') # Must use append since extend breaks the dicts up into # separate objects, while append keeps it as a single object. super().append(item) else: raise Exception('Only OpenPNM objects can be added')
python
{ "resource": "" }
q20677
Project.pop
train
def pop(self, index): r""" The object at the given index is removed from the list and returned. Notes ----- This method uses ``purge_object`` to perform the actual removal of the object. It is reommended to just use that directly instead. See Also -------- purge_object """ obj = self[index] self.purge_object(obj, deep=False) return obj
python
{ "resource": "" }
q20678
Project.clear
train
def clear(self, objtype=[]): r""" Clears objects from the project entirely or selectively, depdening on the received arguments. Parameters ---------- objtype : list of strings A list containing the object type(s) to be removed. If no types are specified, then all objects are removed. To clear only objects of a specific type, use *'network'*, *'geometry'*, *'phase'*, *'physics'*, or *'algorithm'*. It's also possible to use abbreviations, like *'geom'*. """ if len(objtype) == 0: super().clear() else: names = [obj.name for obj in self] for name in names: try: obj = self[name] for t in objtype: if obj._isa(t): self.purge_object(obj) except KeyError: pass
python
{ "resource": "" }
q20679
Project.copy
train
def copy(self, name=None): r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects """ if name is None: name = ws._gen_name() proj = deepcopy(self) ws[name] = proj return proj
python
{ "resource": "" }
q20680
Project.find_phase
train
def find_phase(self, obj): r""" Find the Phase associated with a given object. Parameters ---------- obj : OpenPNM Object Can either be a Physics or Algorithm object Returns ------- An OpenPNM Phase object. Raises ------ If no Phase object can be found, then an Exception is raised. """ # If received phase, just return self if obj._isa('phase'): return obj # If phase happens to be in settings (i.e. algorithm), look it up if 'phase' in obj.settings.keys(): phase = self.phases()[obj.settings['phase']] return phase # Otherwise find it using bottom-up approach (i.e. look in phase keys) for phase in self.phases().values(): if ('pore.'+obj.name in phase) or ('throat.'+obj.name in phase): return phase # If all else fails, throw an exception raise Exception('Cannot find a phase associated with '+obj.name)
python
{ "resource": "" }
q20681
Project.find_geometry
train
def find_geometry(self, physics): r""" Find the Geometry associated with a given Physics Parameters ---------- physics : OpenPNM Physics Object Must be a Physics object Returns ------- An OpenPNM Geometry object Raises ------ If no Geometry object can be found, then an Exception is raised. """ # If geometry happens to be in settings, look it up directly if 'geometry' in physics.settings.keys(): geom = self.geometries()[physics.settings['geometry']] return geom # Otherwise, use the bottom-up approach for geo in self.geometries().values(): if physics in self.find_physics(geometry=geo): return geo # If all else fails, throw an exception raise Exception('Cannot find a geometry associated with '+physics.name)
python
{ "resource": "" }
q20682
Project.find_full_domain
train
def find_full_domain(self, obj): r""" Find the full domain object associated with a given object. For geometry the network is found, for physics the phase is found and for all other objects which are defined for for the full domain, themselves are found. Parameters ---------- obj : OpenPNM Object Can be any object Returns ------- An OpenPNM object """ if 'Subdomain' not in obj._mro(): # Network, Phase, Alg return obj else: if obj._isa() == 'geometry': # Geom return self.network else: # Phys return self.find_phase(obj)
python
{ "resource": "" }
q20683
Project.save_object
train
def save_object(self, obj): r""" Saves the given object to a file Parameters ---------- obj : OpenPNM object The file to be saved. Depending on the object type, the file extension will be one of 'net', 'geo', 'phase', 'phys' or 'alg'. """ if not isinstance(obj, list): obj = [obj] for item in obj: filename = item.name + '.' + item.settings['prefix'] with open(filename, 'wb') as f: pickle.dump({item.name: item}, f)
python
{ "resource": "" }
q20684
Project.load_object
train
def load_object(self, filename): r""" Loads a single object from a file Parameters ---------- """ p = Path(filename) with open(p, 'rb') as f: d = pickle.load(f) obj = self._new_object(objtype=p.suffix.strip('.'), name=p.name.split('.')[0]) obj.update(d)
python
{ "resource": "" }
q20685
Project._dump_data
train
def _dump_data(self, mode=['props']): r""" Dump data from all objects in project to an HDF5 file. Note that 'pore.coords', 'throat.conns', 'pore.all', 'throat.all', and all labels pertaining to the linking of objects are kept. Parameters ---------- mode : string or list of strings The type of data to be dumped to the HDF5 file. Options are: **'props'** : Numerical data such as 'pore.diameter'. The default is only 'props'. **'labels'** : Boolean data that are used as labels. Since this is boolean data it does not consume large amounts of memory and probably does not need to be dumped. See Also -------- _fetch_data Notes ----- In principle, after data is fetched from an HDF5 file, it should physically stay there until it's called upon. This let users manage the data as if it's in memory, even though it isn't. This behavior has not been confirmed yet, which is why these functions are hidden. """ with h5py.File(self.name + '.hdf5') as f: for obj in self: for key in list(obj.keys()): tempname = obj.name + '|' + '_'.join(key.split('.')) arr = obj[key] if 'U' in str(obj[key][0].dtype): pass elif 'all' in key.split('.'): pass else: f.create_dataset(name='/'+tempname, shape=arr.shape, dtype=arr.dtype, data=arr) for obj in self: obj.clear(mode=mode)
python
{ "resource": "" }
q20686
Project._fetch_data
train
def _fetch_data(self): r""" Retrieve data from an HDF5 file and place onto correct objects in the project See Also -------- _dump_data Notes ----- In principle, after data is fetched from and HDF5 file, it should physically stay there until it's called upon. This let users manage the data as if it's in memory, even though it isn't. This behavior has not been confirmed yet, which is why these functions are hidden. """ with h5py.File(self.name + '.hdf5') as f: # Reload data into project for item in f.keys(): obj_name, propname = item.split('|') propname = propname.split('_') propname = propname[0] + '.' + '_'.join(propname[1:]) self[obj_name][propname] = f[item]
python
{ "resource": "" }
q20687
Project.check_geometry_health
train
def check_geometry_health(self): r""" Perform a check to find pores with overlapping or undefined Geometries Returns ------- A HealthDict """ health = HealthDict() health['overlapping_pores'] = [] health['undefined_pores'] = [] health['overlapping_throats'] = [] health['undefined_throats'] = [] geoms = self.geometries().keys() if len(geoms): net = self.network Ptemp = np.zeros((net.Np,)) Ttemp = np.zeros((net.Nt,)) for item in geoms: Pind = net['pore.'+item] Tind = net['throat.'+item] Ptemp[Pind] = Ptemp[Pind] + 1 Ttemp[Tind] = Ttemp[Tind] + 1 health['overlapping_pores'] = np.where(Ptemp > 1)[0].tolist() health['undefined_pores'] = np.where(Ptemp == 0)[0].tolist() health['overlapping_throats'] = np.where(Ttemp > 1)[0].tolist() health['undefined_throats'] = np.where(Ttemp == 0)[0].tolist() return health
python
{ "resource": "" }
q20688
Project.check_physics_health
train
def check_physics_health(self, phase): r""" Perform a check to find pores which have overlapping or missing Physics Parameters ---------- phase : OpenPNM Phase object The Phase whose Physics should be checked Returns ------- A HealthDict """ health = HealthDict() health['overlapping_pores'] = [] health['undefined_pores'] = [] health['overlapping_throats'] = [] health['undefined_throats'] = [] geoms = self.geometries().keys() if len(geoms): phys = self.find_physics(phase=phase) if len(phys) == 0: raise Exception(str(len(geoms))+' geometries were found, but' + ' no physics') if None in phys: raise Exception('Undefined physics found, check the grid') Ptemp = np.zeros((phase.Np,)) Ttemp = np.zeros((phase.Nt,)) for item in phys: Pind = phase['pore.'+item.name] Tind = phase['throat.'+item.name] Ptemp[Pind] = Ptemp[Pind] + 1 Ttemp[Tind] = Ttemp[Tind] + 1 health['overlapping_pores'] = np.where(Ptemp > 1)[0].tolist() health['undefined_pores'] = np.where(Ptemp == 0)[0].tolist() health['overlapping_throats'] = np.where(Ttemp > 1)[0].tolist() health['undefined_throats'] = np.where(Ttemp == 0)[0].tolist() return health
python
{ "resource": "" }
q20689
Project._regenerate_models
train
def _regenerate_models(self, objs=[], propnames=[]): r""" Can be used to regenerate models across all objects in the project. Parameters ---------- objs : list of OpenPNM objects Can be used to specify which specific objects to regenerate. The default is to regenerate all objects. If a subset of objects is given, this function ensure they are generated in a sensible order such as any phases are done before any physics objects. propnames : list of strings, or string The specific model to regenerate. If none are given then ALL models on all objects are regenerated. If a subset is given, then only object that have a corresponding model are regenerated, to avoid any problems. This means that a single model can be given, without specifying the objects. """ objs = list(objs) if objs == []: objs = self if type(propnames) is str: propnames = [propnames] # Sort objs in the correct order (geom, phase, phys) net = [i for i in objs if i is self.network] geoms = [i for i in objs if i in self.geometries().values()] phases = [i for i in objs if i in self.phases().values()] phys = [i for i in objs if i in self.physics().values()] objs = net + geoms + phases + phys for obj in objs: if len(propnames): for model in propnames: if model in obj.models.keys(): obj.regenerate_models(propnames=model) else: obj.regenerate_models()
python
{ "resource": "" }
q20690
Porosimetry.set_partial_filling
train
def set_partial_filling(self, propname): r""" Define which pore filling model to apply. Parameters ---------- propname : string Dictionary key on the physics object(s) containing the pore filling model(s) to apply. Notes ----- It is assumed that these models are functions of the `quantity` specified in the algorithms settings. This values is applied to the corresponding phase just prior to regenerating the given pore-scale model(s). """ if propname.startswith('pore'): self.settings['pore_partial_filling'] = propname if propname.startswith('throat'): self.settings['throat_partial_filling'] = propname
python
{ "resource": "" }
q20691
generic_function
train
def generic_function(target, prop, func, **kwargs): r""" Runs an arbitrary function on the given data This allows users to place a customized calculation into the automatated model regeneration pipeline. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. prop : string The dictionary key containing the array to be operated on func : Numpy function A handle to the function to apply kwargs : keyward arguments All arguments required by the specific Numpy function Examples -------- The following example shows how to use a Numpy function, but any function can be used, as long as it returns an array object: >>> import openpnm as op >>> import numpy as np >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts) >>> geo['pore.rand'] = np.random.rand(geo.Np) >>> geo.add_model(propname='pore.cos', ... model=op.models.misc.generic_function, ... func=np.cos, ... prop='pore.rand') """ values = target[prop] result = func(values, **kwargs) if not isinstance(result, np.ndarray): logger.warning('Given function must return a Numpy array') return result
python
{ "resource": "" }
q20692
product
train
def product(target, prop1, prop2, **kwargs): r""" Calculates the product of multiple property values Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. prop1 : string The name of the first argument prop2 : string The name of the second argument Notes ----- Additional properties can be specified beyond just ``prop1`` and ``prop2`` by including additional arguments in the function call (i.e. ``prop3 = 'pore.foo'``). """ value = target[prop1]*target[prop2] for item in kwargs.values(): value *= target[item] return value
python
{ "resource": "" }
q20693
random
train
def random(target, element, seed=None, num_range=[0, 1]): r""" Create an array of random numbers of a specified size. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seed : int The starting seed value to send to Scipy's random number generator. The default is None, which means different distribution is returned each time the model is run. num_range : list A two element list indicating the low and high end of the returned numbers. """ range_size = num_range[1] - num_range[0] range_min = num_range[0] if seed is not None: np.random.seed(seed) value = np.random.rand(target._count(element),) value = value*range_size + range_min return value
python
{ "resource": "" }
q20694
linear
train
def linear(target, m, b, prop): r""" Calculates a property as a linear function of a given property Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. m, b : floats Slope and intercept of the linear corelation prop : string The dictionary key containing the independent variable or phase property to be used in the correlation. """ x = target[prop] value = m*x + b return value
python
{ "resource": "" }
q20695
polynomial
train
def polynomial(target, a, prop, **kwargs): r""" Calculates a property as a polynomial function of a given property Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. a : array_like A list containing the polynomial coefficients, where element 0 in the list corresponds to a0 and so on. Note that no entries can be skipped so 0 coefficients must be sent as 0. prop : string The dictionary key containing the independent variable or phase property to be used in the polynomial. """ x = target[prop] value = 0.0 for i in range(0, len(a)): value += a[i]*x**i return value
python
{ "resource": "" }
q20696
generic_distribution
train
def generic_distribution(target, seeds, func): r""" Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns values from the distribution for the given seeds This uses the ``ppf`` method of the stats object Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. func : object An 'rv_frozen' object from the Scipy.stats library with all of the parameters pre-specified. Examples -------- The following code illustrates the process of obtaining a 'frozen' Scipy stats object and adding it as a model: >>> import scipy >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts) >>> geo.add_model(propname='pore.seed', ... model=op.models.geometry.pore_seed.random) Now retrieve the stats distribution and add to ``geo`` as a model: >>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0) >>> geo.add_model(propname='pore.size', ... model=op.models.geometry.pore_size.generic_distribution, ... seeds='pore.seed', ... func=stats_obj) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50) """ seeds = target[seeds] value = func.ppf(seeds) return value
python
{ "resource": "" }
q20697
from_neighbor_throats
train
def from_neighbor_throats(target, throat_prop='throat.seed', mode='min'): r""" Adopt a value from the values found in neighboring throats Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_prop : string The dictionary key of the array containing the throat property to be used in the calculation. The default is 'throat.seed'. mode : string Controls how the pore property is calculated. Options are 'min', 'max' and 'mean'. """ prj = target.project network = prj.network lookup = prj.find_full_domain(target) Ps = lookup.map_pores(target.pores(), target) data = lookup[throat_prop] neighborTs = network.find_neighbor_throats(pores=Ps, flatten=False, mode='or') values = np.ones((np.shape(Ps)[0],))*np.nan if mode == 'min': for pore in range(len(Ps)): values[pore] = np.amin(data[neighborTs[pore]]) if mode == 'max': for pore in range(len(Ps)): values[pore] = np.amax(data[neighborTs[pore]]) if mode == 'mean': for pore in range(len(Ps)): values[pore] = np.mean(data[neighborTs[pore]]) return values
python
{ "resource": "" }
q20698
from_neighbor_pores
train
def from_neighbor_pores(target, pore_prop='pore.seed', mode='min'): r""" Adopt a value based on the values in neighboring pores Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. pore_prop : string The dictionary key to the array containing the pore property to be used in the calculation. Default is 'pore.seed'. mode : string Controls how the throat property is calculated. Options are 'min', 'max' and 'mean'. """ prj = target.project network = prj.network throats = network.map_throats(target.throats(), target) P12 = network.find_connected_pores(throats) lookup = prj.find_full_domain(target) pvalues = lookup[pore_prop][P12] if mode == 'min': value = np.amin(pvalues, axis=1) if mode == 'max': value = np.amax(pvalues, axis=1) if mode == 'mean': value = np.mean(pvalues, axis=1) return value
python
{ "resource": "" }
q20699
spatially_correlated
train
def spatially_correlated(target, weights=None, strel=None): r""" Generates pore seeds that are spatailly correlated with their neighbors. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. weights : list of ints, optional The [Nx,Ny,Nz] distances (in number of pores) in each direction that should be correlated. strel : array_like, optional (in place of weights) The option allows full control over the spatial correlation pattern by specifying the structuring element to be used in the convolution. The array should be a 3D array containing the strength of correlations in each direction. Nonzero values indicate the strength, direction and extent of correlations. The following would achieve a basic correlation in the z-direction: strel = sp.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], \ [[0, 0, 0], [1, 1, 1], [0, 0, 0]], \ [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]) Notes ----- This approach uses image convolution to replace each pore seed in the geoemtry with a weighted average of those around it. It then converts the new seeds back to a random distribution by assuming they new seeds are normally distributed. Because is uses image analysis tools, it only works on Cubic networks. This is the appproached used by Gostick et al [2]_ to create an anistropic gas diffusion layer for fuel cell electrodes. References ---------- .. [2] J. Gostick et al, Pore network modeling of fibrous gas diffusion layers for polymer electrolyte membrane fuel cells. J Power Sources v173, pp277–290 (2007) Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 10]) >>> Ps, Ts = pn.Ps, pn.Ts >>> geom = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts) >>> mod = op.models.geometry.pore_seed.spatially_correlated >>> geom.add_model(propname='pore.seed', model=mod, weights=[2, 2, 2]) """ import scipy.ndimage as spim network = target.project.network # The following will only work on Cubic networks x = network._shape[0] y = network._shape[1] z = network._shape[2] im = _sp.rand(x, y, z) if strel is None: # Then generate a strel if sum(weights) == 0: # If weights of 0 are sent, then skip everything and return rands. return im.flatten() w = _sp.array(weights) strel = _sp.zeros(w*2+1) strel[:, w[1], w[2]] = 1 strel[w[0], :, w[2]] = 1 strel[w[0], w[1], :] = 1 im = spim.convolve(im, strel) # Convolution is no longer randomly distributed, so fit a gaussian # and find it's seeds im = (im - _sp.mean(im))/_sp.std(im) im = 1/2*_sp.special.erfc(-im/_sp.sqrt(2)) values = im.flatten() values = values[network.pores(target.name)] return values
python
{ "resource": "" }