_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q30600
Encoder._get_codepage_char_map
train
def _get_codepage_char_map(self, encoding): """ Get codepage character map Process an encoding and return a map of UTF-characters to code points in this encoding. This is generated once only, and returned from a cache. :param encoding: The name of the encoding. """ # Skip things that were loaded previously if encoding in self.available_characters: return self.available_characters[encoding] codepage_char_list = self._get_codepage_char_list(encoding) codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list)) self.available_characters[encoding] = codepage_char_map return codepage_char_map
python
{ "resource": "" }
q30601
Encoder.can_encode
train
def can_encode(self, encoding, char): """Determine if a character is encodeable in the given code page. :param encoding: The name of the encoding. :param char: The character to attempt to encode. """ available_map = {} try: available_map = self._get_codepage_char_map(encoding) except LookupError: return False # Decide whether this character is encodeable in this code page is_ascii = ord(char) < 128 is_encodable = char in available_map return is_ascii or is_encodable
python
{ "resource": "" }
q30602
Encoder._encode_char
train
def _encode_char(char, charmap, defaultchar): """ Encode a single character with the given encoding map :param char: char to encode :param charmap: dictionary for mapping characters in this code page """ if ord(char) < 128: return ord(char) if char in charmap: return charmap[char] return ord(defaultchar)
python
{ "resource": "" }
q30603
Encoder.encode
train
def encode(self, text, encoding, defaultchar='?'): """ Encode text under the given encoding :param text: Text to encode :param encoding: Encoding name to use (must be defined in capabilities) :param defaultchar: Fallback for non-encodable characters """ codepage_char_map = self._get_codepage_char_map(encoding) output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text]) return output_bytes
python
{ "resource": "" }
q30604
MagicEncode.force_encoding
train
def force_encoding(self, encoding): """Sets a fixed encoding. The change is emitted right away. From now one, this buffer will switch the code page anymore. However, it will still keep track of the current code page. """ if not encoding: self.disabled = False else: self.write_with_encoding(encoding, None) self.disabled = True
python
{ "resource": "" }
q30605
MagicEncode.write
train
def write(self, text): """Write the text, automatically switching encodings. """ if self.disabled: self.write_with_encoding(self.encoding, text) return # See how far we can go into the text with the current encoding to_write, text = split_writable_text(self.encoder, text, self.encoding) if to_write: self.write_with_encoding(self.encoding, to_write) while text: # See if any of the code pages that the printer profile # supports can encode this character. encoding = self.encoder.find_suitable_encoding(text[0]) if not encoding: self._handle_character_failed(text[0]) text = text[1:] continue # Write as much text as possible with the encoding found. to_write, text = split_writable_text(self.encoder, text, encoding) if to_write: self.write_with_encoding(encoding, to_write)
python
{ "resource": "" }
q30606
DocxTemplate.replace_media
train
def replace_media(self,src_file,dst_file): """Replace one media by another one into a docx This has been done mainly because it is not possible to add images in docx header/footer. With this function, put a dummy picture in your header/footer, then specify it with its replacement in this function Syntax: tpl.replace_media('dummy_media_to_replace.png','media_to_paste.jpg') Note: for images, the aspect ratio will be the same as the replaced image Note2 : it is important to have the source media file as it is required to calculate its CRC to find them in the docx """ with open(dst_file, 'rb') as fh: crc = self.get_file_crc(src_file) self.crc_to_new_media[crc] = fh.read()
python
{ "resource": "" }
q30607
DocxTemplate.replace_embedded
train
def replace_embedded(self,src_file,dst_file): """Replace one embdded object by another one into a docx This has been done mainly because it is not possible to add images in docx header/footer. With this function, put a dummy picture in your header/footer, then specify it with its replacement in this function Syntax: tpl.replace_embedded('dummy_doc.docx','doc_to_paste.docx') Note2 : it is important to have the source file as it is required to calculate its CRC to find them in the docx """ with open(dst_file, 'rb') as fh: crc = self.get_file_crc(src_file) self.crc_to_new_embedded[crc] = fh.read()
python
{ "resource": "" }
q30608
DocxTemplate.build_pic_map
train
def build_pic_map(self): """Searches in docx template all the xml pictures tag and store them in pic_map dict""" if self.pic_to_replace: # Main document part=self.docx.part self.pic_map.update(self._img_filename_to_part(part)) # Header/Footer for relid, rel in six.iteritems(self.docx.part.rels): if rel.reltype in (REL_TYPE.HEADER,REL_TYPE.FOOTER): self.pic_map.update(self._img_filename_to_part(rel.target_part))
python
{ "resource": "" }
q30609
ConstanceConfig.create_perm
train
def create_perm(self, using=None, *args, **kwargs): """ Creates a fake content type and permission to be able to check for permissions """ from django.conf import settings from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType constance_dbs = getattr(settings, 'CONSTANCE_DBS', None) if constance_dbs is not None and using not in constance_dbs: return if ContentType._meta.installed and Permission._meta.installed: content_type, created = ContentType.objects.using(using).get_or_create( app_label='constance', model='config', ) permission, created = Permission.objects.using(using).get_or_create( content_type=content_type, codename='change_config', defaults={'name': 'Can change config'})
python
{ "resource": "" }
q30610
check_fieldsets
train
def check_fieldsets(*args, **kwargs): """ A Django system check to make sure that, if defined, CONFIG_FIELDSETS accounts for every entry in settings.CONFIG. """ if hasattr(settings, "CONFIG_FIELDSETS") and settings.CONFIG_FIELDSETS: inconsistent_fieldnames = get_inconsistent_fieldnames() if inconsistent_fieldnames: return [ checks.Warning( _( "CONSTANCE_CONFIG_FIELDSETS is missing " "field(s) that exists in CONSTANCE_CONFIG." ), hint=", ".join(sorted(inconsistent_fieldnames)), obj="settings.CONSTANCE_CONFIG", id="constance.E001", ) ] return []
python
{ "resource": "" }
q30611
get_inconsistent_fieldnames
train
def get_inconsistent_fieldnames(): """ Returns a set of keys from settings.CONFIG that are not accounted for in settings.CONFIG_FIELDSETS. If there are no fieldnames in settings.CONFIG_FIELDSETS, returns an empty set. """ field_name_list = [] for fieldset_title, fields_list in settings.CONFIG_FIELDSETS.items(): for field_name in fields_list: field_name_list.append(field_name) if not field_name_list: return {} return set(set(settings.CONFIG.keys()) - set(field_name_list))
python
{ "resource": "" }
q30612
FCMDeviceQuerySet.send_message
train
def send_message( self, title=None, body=None, icon=None, data=None, sound=None, badge=None, api_key=None, **kwargs): """ Send notification for all active devices in queryset and deactivate if DELETE_INACTIVE_DEVICES setting is set to True. """ if self: from .fcm import fcm_send_bulk_message registration_ids = list(self.filter(active=True).values_list( 'registration_id', flat=True )) if len(registration_ids) == 0: return [{'failure': len(self), 'success': 0}] result = fcm_send_bulk_message( registration_ids=registration_ids, title=title, body=body, icon=icon, data=data, sound=sound, badge=badge, api_key=api_key, **kwargs ) self._deactivate_devices_with_error_results( registration_ids, result['results'] ) return result
python
{ "resource": "" }
q30613
FCMDeviceQuerySet.send_data_message
train
def send_data_message( self, api_key=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, timeout=5, json_encoder=None): """ Send data messages for all active devices in queryset and deactivate if DELETE_INACTIVE_DEVICES setting is set to True. """ if self: from .fcm import fcm_send_bulk_data_messages registration_ids = list(self.filter(active=True).values_list( 'registration_id', flat=True )) if len(registration_ids) == 0: return [{'failure': len(self), 'success': 0}] result = fcm_send_bulk_data_messages( api_key=api_key, registration_ids=registration_ids, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, timeout=timeout, json_encoder=json_encoder, ) self._deactivate_devices_with_error_results( registration_ids, result['results'] ) return result
python
{ "resource": "" }
q30614
AbstractFCMDevice.send_message
train
def send_message( self, title=None, body=None, icon=None, data=None, sound=None, badge=None, api_key=None, **kwargs): """ Send single notification message. """ from .fcm import fcm_send_message result = fcm_send_message( registration_id=str(self.registration_id), title=title, body=body, icon=icon, data=data, sound=sound, badge=badge, api_key=api_key, **kwargs ) self._deactivate_device_on_error_result(result) return result
python
{ "resource": "" }
q30615
AbstractFCMDevice.send_data_message
train
def send_data_message( self, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, api_key=None, timeout=5, json_encoder=None): """ Send single data message. """ from .fcm import fcm_send_single_device_data_message result = fcm_send_single_device_data_message( registration_id=str(self.registration_id), condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, api_key=api_key, timeout=timeout, json_encoder=json_encoder, ) self._deactivate_device_on_error_result(result) return result
python
{ "resource": "" }
q30616
sma
train
def sma(arg, n): """ If n is 0 then return the ltd mean; else return the n day mean """ if n == 0: return pd.expanding_mean(arg) else: return pd.rolling_mean(arg, n, min_periods=n)
python
{ "resource": "" }
q30617
rsi
train
def rsi(arg, n): """ compute RSI for the given arg arg: Series or DataFrame """ if isinstance(arg, pd.DataFrame): cols = [(name, rsi(arg[name], n)) for name in arg.columns] return pd.DataFrame.from_items(cols) else: assert isinstance(arg, pd.Series) n = int(n) converted = arg.dropna() change = converted.diff() gain = change.apply(lambda c: c > 0 and c or 0) avgGain = wilderma(gain, n) loss = change.apply(lambda c: c < 0 and abs(c) or 0) avgLoss = wilderma(loss, n) result = avgGain / avgLoss result[result == np.inf] = 100. # divide by zero result = 100. - (100. / (1. + result)) return pd.Series(result, index=converted.index).reindex(arg.index)
python
{ "resource": "" }
q30618
send_outlook_email
train
def send_outlook_email(to, subject, body, attachments=None, cc=None, bcc=None, is_html=0): """ Send an email using your local outlook client """ import win32com.client asarr = lambda v: None if not v else isinstance(v, basestring) and [v] or v def update_recipients(robj, users, type): users = asarr(to) if users: for u in users: r = robj.Add(u) r.Type = type outlook = win32com.client.gencache.EnsureDispatch("Outlook.Application") mapi = outlook.GetNamespace("MAPI") constants = win32com.client.constants msg = outlook.CreateItem(0) # setup the recipients recipients = msg.Recipients to and update_recipients(recipients, to, constants.olTo) cc and update_recipients(recipients, cc, constants.olCC) bcc and update_recipients(recipients, bcc, constants.olBCC) recipients.ResolveAll() msg.Subject = subject if is_html: msg.BodyFormat = constants.olFormatHTML msg.HTMLBody = body else: msg.Body = body map(lambda fpath: msg.Attachments.Add(fpath), attachments or []) msg.Send()
python
{ "resource": "" }
q30619
WinSCPBatch.add_uploads
train
def add_uploads(self, filemap): """Add the dict of uploads Parameters ---------- filemap: dict, (remote_filename -> local_filename) """ [self.add_upload(k, v) for k, v in filemap.iteritems()]
python
{ "resource": "" }
q30620
return_on_initial_capital
train
def return_on_initial_capital(capital, period_pl, leverage=None): """Return the daily return series based on the capital""" if capital <= 0: raise ValueError('cost must be a positive number not %s' % capital) leverage = leverage or 1. eod = capital + (leverage * period_pl.cumsum()) ltd_rets = (eod / capital) - 1. dly_rets = ltd_rets dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:] return dly_rets
python
{ "resource": "" }
q30621
_listWrapOn
train
def _listWrapOn(F, availWidth, canv, mergeSpace=1, obj=None, dims=None): '''return max width, required height for a list of flowables F''' doct = getattr(canv, '_doctemplate', None) cframe = getattr(doct, 'frame', None) if cframe: from reportlab.platypus.doctemplate import _addGeneratedContent, Indenter doct_frame = cframe from copy import deepcopy cframe = doct.frame = deepcopy(doct_frame) cframe._generated_content = None del cframe._generated_content try: W = 0 H = 0 pS = 0 atTop = 1 F = F[:] while F: f = F.pop(0) if hasattr(f, 'frameAction'): from reportlab.platypus.doctemplate import Indenter if isinstance(f, Indenter): availWidth -= f.left + f.right continue w, h = f.wrapOn(canv, availWidth, 0xfffffff) if dims is not None: dims.append((w, h)) if cframe: _addGeneratedContent(F, cframe) if w <= fl._FUZZ or h <= fl._FUZZ: continue # # THE HACK # # W = max(W,min(availWidth, w)) W = max(W, w) H += h if not atTop: h = f.getSpaceBefore() if mergeSpace: if getattr(f, '_SPACETRANSFER', False): h = pS h = max(h - pS, 0) H += h else: if obj is not None: obj._spaceBefore = f.getSpaceBefore() atTop = 0 s = f.getSpaceAfter() if getattr(f, '_SPACETRANSFER', False): s = pS pS = s H += pS if obj is not None: obj._spaceAfter = pS return W, H - pS finally: if cframe: doct.frame = doct_frame
python
{ "resource": "" }
q30622
Positions.plot_rets
train
def plot_rets(self, ls=1, ax=None): """Plot each of the position returns :param ls: True, if positions should be broken into long/short :param ax: Axes :param regr: True, if regression line is shown """ import matplotlib.pyplot as plt from tia.util.mplot import AxesFormat if ax is None: ax = plt.gca() frame = self.frame if not ls: ax.scatter(frame.index, frame.ret, c='k', marker='o', label='All') else: if len(self.long_pids) > 0: lframe = frame.ix[frame.index.isin(self.long_pids)] ax.scatter(lframe.index, lframe.ret, c='k', marker='o', label='Long') if len(self.short_pids) > 0: sframe = frame.ix[frame.index.isin(self.short_pids)] ax.scatter(sframe.index, sframe.ret, c='r', marker='o', label='Short') # set some boundaries AxesFormat().Y.percent().apply() ax.set_xlim(0, frame.index.max() + 3) ax.set_xlabel('pid') ax.set_ylabel('return') ax.legend(loc='upper left') return ax
python
{ "resource": "" }
q30623
Positions.plot_ret_range
train
def plot_ret_range(self, ax=None, ls=0, dur=0): """Plot the return range for each position :param ax: Axes """ import matplotlib.pyplot as plt from tia.util.mplot import AxesFormat if ax is None: ax = plt.gca() frame = self.frame pids = frame.index min_rets = pd.Series([self[pid].performance.ltd_txn.min() for pid in pids], index=pids) max_rets = pd.Series([self[pid].performance.ltd_txn.max() for pid in pids], index=pids) if not ls: s = frame.duration + 20 if dur else 20 ax.scatter(frame.index, frame.ret, s=s, c='k', marker='o', label='All') ax.vlines(pids, min_rets, max_rets) else: if len(self.long_pids) > 0: lframe = frame.ix[frame.index.isin(self.long_pids)] s = lframe.duration + 20 if dur else 20 ax.scatter(lframe.index, lframe.ret, s=s, c='k', marker='o', label='Long') ax.vlines(lframe.index, min_rets[lframe.index], max_rets[frame.index]) if len(self.short_pids) > 0: sframe = frame.ix[frame.index.isin(self.short_pids)] s = sframe.duration + 20 if dur else 20 ax.scatter(sframe.index, sframe.ret, s=s, c='r', marker='o', label='Short') ax.vlines(sframe.index, min_rets[sframe.index], max_rets[sframe.index]) AxesFormat().Y.percent().apply() ax.axhline(color='k', linestyle='--') ax.set_xlim(0, frame.index.max() + 3) ax.set_xlabel('pid') ax.set_ylabel('return') ax.legend(loc='upper left') return ax
python
{ "resource": "" }
q30624
PositionsStats.consecutive_frame
train
def consecutive_frame(self): """Return a DataFrame with columns cnt, pids, pl. cnt is the number of pids in the sequence. pl is the pl sum""" if self._frame.empty: return pd.DataFrame(columns=['pids', 'pl', 'cnt', 'is_win']) else: vals = (self._frame[PC.RET] >= 0).astype(int) seq = (vals.shift(1) != vals).astype(int).cumsum() def _do_apply(sub): return pd.Series({ 'pids': sub.index.values, 'pl': sub[PC.PL].sum(), 'cnt': len(sub.index), 'is_win': sub[PC.RET].iloc[0] >= 0, }) return self._frame.groupby(seq).apply(_do_apply)
python
{ "resource": "" }
q30625
ProfitAndLossDetails.asfreq
train
def asfreq(self, freq): """Resample the p&l at the specified frequency :param freq: :return: Pl object """ frame = self.frame if freq == 'B': resampled = frame.groupby(frame.index.date).apply(lambda f: f.sum()) resampled.index = pd.DatetimeIndex([i for i in resampled.index]) return ProfitAndLossDetails(resampled) else: resampled = frame.resample(freq, how='sum') return ProfitAndLossDetails(resampled)
python
{ "resource": "" }
q30626
GridHelper.get_axes
train
def get_axes(self, idx): """ Allow for simple indexing """ cidx = 0 if idx > 0: cidx = idx % self.ncols ridx = idx / self.ncols return self.axarr[ridx][cidx]
python
{ "resource": "" }
q30627
periodicity
train
def periodicity(freq_or_frame): """ resolve the number of periods per year """ if hasattr(freq_or_frame, 'rule_code'): rc = freq_or_frame.rule_code rc = rc.split('-')[0] factor = PER_YEAR_MAP.get(rc, None) if factor is not None: return factor / abs(freq_or_frame.n) else: raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame) elif isinstance(freq_or_frame, basestring): factor = PER_YEAR_MAP.get(freq_or_frame, None) if factor is not None: return factor else: raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame) elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)): freq = freq_or_frame.index.freq if not freq: freq = pd.infer_freq(freq_or_frame.index) if freq: return periodicity(freq) else: # Attempt to resolve it import warnings freq = guess_freq(freq_or_frame.index) warnings.warn('frequency not set. guessed it to be %s' % freq) return periodicity(freq) else: return periodicity(freq) else: raise ValueError("periodicity expects DataFrame, Series, or rule_code property")
python
{ "resource": "" }
q30628
_resolve_periods_in_year
train
def _resolve_periods_in_year(scale, frame): """ Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then use it. If scale is a string then use it to lookup the annual factor """ if scale is None: return periodicity(frame) elif isinstance(scale, basestring): return periodicity(scale) elif np.isscalar(scale): return scale else: raise ValueError("scale must be None, scalar, or string, not %s" % type(scale))
python
{ "resource": "" }
q30629
returns_cumulative
train
def returns_cumulative(returns, geometric=True, expanding=False): """ return the cumulative return Parameters ---------- returns : DataFrame or Series geometric : bool, default is True If True, geometrically link returns expanding : bool default is False If True, return expanding series/frame of returns If False, return the final value(s) """ if expanding: if geometric: return (1. + returns).cumprod() - 1. else: return returns.cumsum() else: if geometric: return (1. + returns).prod() - 1. else: return returns.sum()
python
{ "resource": "" }
q30630
rolling_returns_cumulative
train
def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True): """ return the rolling cumulative returns Parameters ---------- returns : DataFrame or Series window : number of observations min_periods : minimum number of observations in a window geometric : link the returns geometrically """ if geometric: rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1. else: rc = lambda x: (x[np.isfinite(x)]).sum() return pd.rolling_apply(returns, window, rc, min_periods=min_periods)
python
{ "resource": "" }
q30631
returns_annualized
train
def returns_annualized(returns, geometric=True, scale=None, expanding=False): """ return the annualized cumulative returns Parameters ---------- returns : DataFrame or Series geometric : link the returns geometrically scale: None or scalar or string (ie 12 for months in year), If None, attempt to resolve from returns If scalar, then use this as the annualization factor If string, then pass this to periodicity function to resolve annualization factor expanding: bool, default is False If True, return expanding series/frames. If False, return final result. """ scale = _resolve_periods_in_year(scale, returns) if expanding: if geometric: n = pd.expanding_count(returns) return ((1. + returns).cumprod() ** (scale / n)) - 1. else: return pd.expanding_mean(returns) * scale else: if geometric: n = returns.count() return ((1. + returns).prod() ** (scale / n)) - 1. else: return returns.mean() * scale
python
{ "resource": "" }
q30632
information_ratio
train
def information_ratio(rets, bm_rets, scale=None, expanding=False): """Information ratio, a common measure of manager efficiency, evaluates excess returns over a benchmark versus tracking error. :param rets: period returns :param bm_rets: periodic benchmark returns (not annualized) :param scale: None or the scale to be used for annualization :param expanding: :return: """ scale = _resolve_periods_in_year(scale, rets) rets_ann = returns_annualized(rets, scale=scale, expanding=expanding) bm_rets_ann = returns_annualized(rets, scale=scale, expanding=expanding) tracking_error_ann = std_annualized((rets - bm_rets), scale=scale, expanding=expanding) return (rets_ann - bm_rets_ann) / tracking_error_ann
python
{ "resource": "" }
q30633
rolling_percentileofscore
train
def rolling_percentileofscore(series, window, min_periods=None): """Computue the score percentile for the specified window.""" import scipy.stats as stats def _percentile(arr): score = arr[-1] vals = arr[:-1] return stats.percentileofscore(vals, score) notnull = series.dropna() min_periods = min_periods or window if notnull.empty: return pd.Series(np.nan, index=series.index) else: return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index)
python
{ "resource": "" }
q30634
PdfBuilder.new_title_bar
train
def new_title_bar(self, title, color=None): """Return an array of Pdf Objects which constitute a Header""" # Build a title bar for top of page w, t, c = '100%', 2, color or HexColor('#404040') title = '<b>{0}</b>'.format(title) if 'TitleBar' not in self.stylesheet: tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10, leading=10, alignment=TA_CENTER) self.stylesheet.add(tb) return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'), self.new_paragraph(title, 'TitleBar'), HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')]
python
{ "resource": "" }
q30635
PdfBuilder.build_page
train
def build_page(self, template_id, flowable_map): """Build a pdf page by looking up the specified template and then mapping the flowable_map items to the appropriate named Frame """ pt = self.get_page_template(template_id) # If this is the first page then ensure the page template is ordered first and no breaks or changes # are requested otherwise blank page shows up if self.active_template_id is None: self.make_template_first(template_id) self.story.append(NextPageTemplate(template_id)) self.inc_cover and self.story.append(PageBreak()) self.active_template_id = template_id elif self.active_template_id == template_id: # TODO - understand why this is necessary to not get a blank page between pages self.story.append(PageBreak()) else: self.story.append(NextPageTemplate(template_id)) self.story.append(PageBreak()) self.active_template_id = template_id for idx, frame in enumerate(pt.frames): if frame.id not in flowable_map: # Add a note to the template to show that nothing was defined for this area self.story.append(Paragraph('NOT DEFINED: %s' % frame.id, getSampleStyleSheet()['Normal'])) else: flowables = flowable_map[frame.id] if not isinstance(flowables, Flowable) and hasattr(flowables, '__iter__'): [self.story.append(f) for f in flowables] else: self.story.append(flowables) if idx < (len(pt.frames) - 1): self.story.append(FrameBreak()) return self
python
{ "resource": "" }
q30636
PdfBuilder.table_formatter
train
def table_formatter(self, dataframe, inc_header=1, inc_index=1): """Return a table formatter for the dataframe. Saves the user the need to import this class""" return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index)
python
{ "resource": "" }
q30637
PortfolioPricer.get_mkt_val
train
def get_mkt_val(self, pxs=None): """ return the market value series for the specified Series of pxs """ pxs = self._closing_pxs if pxs is None else pxs return pxs * self.multiplier
python
{ "resource": "" }
q30638
InstrumentPrices.volatility
train
def volatility(self, n, freq=None, which='close', ann=True, model='ln', min_periods=1, rolling='simple'): """Return the annualized volatility series. N is the number of lookback periods. :param n: int, number of lookback periods :param freq: resample frequency or None :param which: price series to use :param ann: If True then annualize :param model: {'ln', 'pct', 'bbg'} ln - use logarithmic price changes pct - use pct price changes bbg - use logarithmic price changes but Bloomberg uses actual business days :param rolling:{'simple', 'exp'}, if exp, use ewmstd. if simple, use rolling_std :return: """ if model not in ('bbg', 'ln', 'pct'): raise ValueError('model must be one of (bbg, ln, pct), not %s' % model) if rolling not in ('simple', 'exp'): raise ValueError('rolling must be one of (simple, exp), not %s' % rolling) px = self.frame[which] px = px if not freq else px.resample(freq, how='last') if model == 'bbg' and periods_in_year(px) == 252: # Bloomberg uses business days, so need to convert and reindex orig = px.index px = px.resample('B').ffill() chg = np.log(px / px.shift(1)) chg[chg.index - orig] = np.nan if rolling == 'simple': vol = pd.rolling_std(chg, n, min_periods=min_periods).reindex(orig) else: vol = pd.ewmstd(chg, span=n, min_periods=n) return vol if not ann else vol * np.sqrt(260) else: chg = px.pct_change() if model == 'pct' else np.log(px / px.shift(1)) if rolling == 'simple': vol = pd.rolling_std(chg, n, min_periods=min_periods) else: vol = pd.ewmstd(chg, span=n, min_periods=n) return vol if not ann else vol * np.sqrt(periods_in_year(vol))
python
{ "resource": "" }
q30639
Instrument.get_mkt_val
train
def get_mkt_val(self, pxs=None): """Return the market value series for the series of pxs""" pxs = pxs if pxs is not None else self.pxs.close return pxs * self.multiplier
python
{ "resource": "" }
q30640
Instrument.get_eod_frame
train
def get_eod_frame(self): """Return the eod market data frame for pricing""" close = self.pxs.close mktval = self.get_mkt_val(close) dvds = self.pxs.dvds df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds}) df.index.name = 'date' return df
python
{ "resource": "" }
q30641
Instrument.truncate
train
def truncate(self, before=None, after=None): """Return an instrument with prices starting at before and ending at after""" pxframe = self.pxs.frame if (before is None or before == pxframe.index[0]) and (after is None or after == pxframe.index[-1]): return self else: tpxs = self.pxs.frame.truncate(before, after) return Instrument(self.sid, InstrumentPrices(tpxs), multiplier=self.multiplier)
python
{ "resource": "" }
q30642
insert_level
train
def insert_level(df, label, level=0, copy=0, axis=0, level_name=None): """Add a new level to the index with the specified label. The newly created index will be a MultiIndex. :param df: DataFrame :param label: label to insert :param copy: If True, copy the DataFrame before assigning new index :param axis: If 0, then columns. If 1, then index :return: """ df = df if not copy else df.copy() src = df.columns if axis == 0 else df.index current = [src.get_level_values(lvl) for lvl in range(src.nlevels)] current.insert(level, [label] * len(src)) idx = pd.MultiIndex.from_arrays(current) level_name and idx.set_names(level_name, level, inplace=1) if axis == 0: df.columns = idx else: df.index = idx return df
python
{ "resource": "" }
q30643
APO
train
def APO(series, fast=12, slow=26, matype=0): """double exponential moving average""" return _series_to_series(series, talib.APO, fast, slow, matype)
python
{ "resource": "" }
q30644
MAMA
train
def MAMA(series, fast=.5, slow=.05): """MESA Adaptive Moving Average""" return _series_to_frame(series, ['MAMA', 'FAMA'], talib.MAMA, fast, slow)
python
{ "resource": "" }
q30645
MFI
train
def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'): """money flow inedx""" return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n)
python
{ "resource": "" }
q30646
Trade.split
train
def split(self, amt): """ return 2 trades, 1 with specific amt and the other with self.quantity - amt """ ratio = abs(amt / self.qty) t1 = Trade(self.tid, self.ts, amt, self.px, fees=ratio * self.fees, **self.kwargs) t2 = Trade(self.tid, self.ts, self.qty - amt, self.px, fees=(1. - ratio) * self.fees, **self.kwargs) return [t1, t2]
python
{ "resource": "" }
q30647
pad_positive_wrapper
train
def pad_positive_wrapper(fmtfct): """Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are used to denote negative numbers""" def check_and_append(*args, **kwargs): result = fmtfct(*args, **kwargs) if fmtfct.parens and not result.endswith(')'): result += ' ' return result return check_and_append
python
{ "resource": "" }
q30648
RegionFormatter.iter_rows
train
def iter_rows(self, start=None, end=None): """Iterate each of the Region rows in this region""" start = start or 0 end = end or self.nrows for i in range(start, end): yield self.iloc[i, :]
python
{ "resource": "" }
q30649
RegionFormatter.iter_cols
train
def iter_cols(self, start=None, end=None): """Iterate each of the Region cols in this region""" start = start or 0 end = end or self.ncols for i in range(start, end): yield self.iloc[:, i]
python
{ "resource": "" }
q30650
RegionFormatter.guess_number_format
train
def guess_number_format(self, rb=1, align=1, **fmt_args): """Determine the most appropriate formatter by inspected all the region values""" fct = fmt.guess_formatter(self.actual_values, **fmt_args) return self.apply_number_format(fct, rb=rb, align=align)
python
{ "resource": "" }
q30651
RegionFormatter.dynamic_number_format
train
def dynamic_number_format(self, rb=1, align=1, **fmt_args): """Formatter changes based on the cell value""" fct = fmt.DynamicNumberFormatter(**fmt_args) return self.apply_number_format(fct, rb=rb, align=align)
python
{ "resource": "" }
q30652
ShortTermReport.add_summary_page
train
def add_summary_page(self): """Build a table which is shown on the first page which gives an overview of the portfolios""" s = PortfolioSummary() s.include_long_short() pieces = [] for r in self.results: tmp = s(r.port, PortfolioSummary.analyze_returns) tmp['desc'] = r.desc tmp['sid'] = r.sid tmp = tmp.set_index(['sid', 'desc'], append=1).reorder_levels([2, 1, 0]) pieces.append(tmp) frame = pd.concat(pieces) tf = self.pdf.table_formatter(frame) tf.apply_basic_style(cmap=self.table_style) # [col.guess_format(pcts=1, trunc_dot_zeros=1) for col in tf.cells.iter_cols()] tf.cells.match_column_labels(['nmonths', 'cnt', 'win cnt', 'lose cnt', 'dur max']).int_format() tf.cells.match_column_labels(['sharpe ann', 'sortino', 'dur avg']).float_format(precision=1) tf.cells.match_column_labels(['maxdd dt']).apply_format(new_datetime_formatter('%d-%b-%y')) tf.cells.match_column_labels(['cagr', 'mret avg', 'mret std ann', 'ret std', 'mret avg ann', 'maxdd', 'avg dd', 'winpct', 'ret avg', 'ret min', 'ret max']).percent_format() self.pdf.build_page('summary', {'F1': tf.build()})
python
{ "resource": "" }
q30653
Request.set_flag
train
def set_flag(self, request, val, fld): """If the specified val is not None, then set the specified field to its boolean value""" if val is not None: val = bool(val) request.set(fld, val)
python
{ "resource": "" }
q30654
XmlHelper.as_security_error
train
def as_security_error(node, secid): """ convert the securityError element to a SecurityError """ assert node.Name == 'securityError' src = XmlHelper.get_child_value(node, 'source') code = XmlHelper.get_child_value(node, 'code') cat = XmlHelper.get_child_value(node, 'category') msg = XmlHelper.get_child_value(node, 'message') subcat = XmlHelper.get_child_value(node, 'subcategory') return SecurityError(security=secid, source=src, code=code, category=cat, message=msg, subcategory=subcat)
python
{ "resource": "" }
q30655
ResponseHandler.do_init
train
def do_init(self, handler): """ will be called prior to waiting for the message """ self.waiting = True self.exc_info = None self.handler = handler
python
{ "resource": "" }
q30656
ReferenceDataRequest.response_as_series
train
def response_as_series(self): """ Return the response as a single series """ assert len(self.symbols) == 1, 'expected single request' if self.response_type == 'frame': return self.response.ix[self.symbols[0]] else: return pandas.Series(self.response[self.symbols])
python
{ "resource": "" }
q30657
HistoricalDataRequest.response_as_single
train
def response_as_single(self, copy=0): """ convert the response map to a single data frame with Multi-Index columns """ arr = [] for sid, frame in self.response.iteritems(): if copy: frame = frame.copy() 'security' not in frame and frame.insert(0, 'security', sid) arr.append(frame.reset_index().set_index(['date', 'security'])) return concat(arr).unstack()
python
{ "resource": "" }
q30658
LSML_Supervised.fit
train
def fit(self, X, y, random_state=np.random): """Create constraints from labels and learn the LSML model. Parameters ---------- X : (n x d) matrix Input data, where each row corresponds to a single instance. y : (n) array-like Data labels. random_state : numpy.random.RandomState, optional If provided, controls random number generation. """ if self.num_labeled != 'deprecated': warnings.warn('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0', DeprecationWarning) X, y = self._prepare_inputs(X, y, ensure_min_samples=2) num_constraints = self.num_constraints if num_constraints is None: num_classes = len(np.unique(y)) num_constraints = 20 * num_classes**2 c = Constraints(y) pos_neg = c.positive_negative_pairs(num_constraints, same_length=True, random_state=random_state) return _BaseLSML._fit(self, X[np.column_stack(pos_neg)], weights=self.weights)
python
{ "resource": "" }
q30659
MLKR.fit
train
def fit(self, X, y): """ Fit MLKR model Parameters ---------- X : (n x d) array of samples y : (n) data labels """ X, y = self._prepare_inputs(X, y, y_numeric=True, ensure_min_samples=2) n, d = X.shape if y.shape[0] != n: raise ValueError('Data and label lengths mismatch: %d != %d' % (n, y.shape[0])) A = self.A0 m = self.num_dims if m is None: m = d if A is None: # initialize to PCA transformation matrix # note: not the same as n_components=m ! A = PCA().fit(X).components_.T[:m] elif A.shape != (m, d): raise ValueError('A0 needs shape (%d,%d) but got %s' % ( m, d, A.shape)) # Measure the total training time train_time = time.time() self.n_iter_ = 0 res = minimize(self._loss, A.ravel(), (X, y), method='L-BFGS-B', jac=True, tol=self.tol, options=dict(maxiter=self.max_iter)) self.transformer_ = res.x.reshape(A.shape) # Stop timer train_time = time.time() - train_time if self.verbose: cls_name = self.__class__.__name__ # Warn the user if the algorithm did not converge if not res.success: warnings.warn('[{}] MLKR did not converge: {}' .format(cls_name, res.message), ConvergenceWarning) print('[{}] Training took {:8.2f}s.'.format(cls_name, train_time)) return self
python
{ "resource": "" }
q30660
Constraints.chunks
train
def chunks(self, num_chunks=100, chunk_size=2, random_state=np.random): """ the random state object to be passed must be a numpy random seed """ chunks = -np.ones_like(self.known_label_idx, dtype=int) uniq, lookup = np.unique(self.known_labels, return_inverse=True) all_inds = [set(np.where(lookup==c)[0]) for c in xrange(len(uniq))] idx = 0 while idx < num_chunks and all_inds: if len(all_inds) == 1: c = 0 else: c = random_state.randint(0, high=len(all_inds)-1) inds = all_inds[c] if len(inds) < chunk_size: del all_inds[c] continue ii = random_state.choice(list(inds), chunk_size, replace=False) inds.difference_update(ii) chunks[ii] = idx idx += 1 if idx < num_chunks: raise ValueError('Unable to make %d chunks of %d examples each' % (num_chunks, chunk_size)) return chunks
python
{ "resource": "" }
q30661
_BaseMMC._fD
train
def _fD(self, neg_pairs, A): """The value of the dissimilarity constraint function. f = f(\sum_{ij \in D} distance(x_i, x_j)) i.e. distance can be L1: \sqrt{(x_i-x_j)A(x_i-x_j)'} """ diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :] return np.log(np.sum(np.sqrt(np.sum(np.dot(diff, A) * diff, axis=1))) + 1e-6)
python
{ "resource": "" }
q30662
_BaseMMC._fD1
train
def _fD1(self, neg_pairs, A): """The gradient of the dissimilarity constraint function w.r.t. A. For example, let distance by L1 norm: f = f(\sum_{ij \in D} \sqrt{(x_i-x_j)A(x_i-x_j)'}) df/dA_{kl} = f'* d(\sum_{ij \in D} \sqrt{(x_i-x_j)^k*(x_i-x_j)^l})/dA_{kl} Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A) so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij df/dA = f'(\sum_{ij \in D} \sqrt{tr(d_ij'*d_ij*A)}) * 0.5*(\sum_{ij \in D} (1/sqrt{tr(d_ij'*d_ij*A)})*(d_ij'*d_ij)) """ dim = neg_pairs.shape[2] diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :] # outer products of all rows in `diff` M = np.einsum('ij,ik->ijk', diff, diff) # faster version of: dist = np.sqrt(np.sum(M * A[None,:,:], axis=(1,2))) dist = np.sqrt(np.einsum('ijk,jk', M, A)) # faster version of: sum_deri = np.sum(M / (2 * (dist[:,None,None] + 1e-6)), axis=0) sum_deri = np.einsum('ijk,i->jk', M, 0.5 / (dist + 1e-6)) sum_dist = dist.sum() return sum_deri / (sum_dist + 1e-6)
python
{ "resource": "" }
q30663
_BaseMMC._fS1
train
def _fS1(self, pos_pairs, A): """The gradient of the similarity constraint function w.r.t. A. f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij' df/dA = d(d_ij*A*d_ij')/dA Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A) so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij """ dim = pos_pairs.shape[2] diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :] return np.einsum('ij,ik->jk', diff, diff)
python
{ "resource": "" }
q30664
MMC.fit
train
def fit(self, pairs, y, calibration_params=None): """Learn the MMC model. The threshold will be calibrated on the trainset using the parameters `calibration_params`. Parameters ---------- pairs : array-like, shape=(n_constraints, 2, n_features) or (n_constraints, 2) 3D Array of pairs with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. y : array-like, of shape (n_constraints,) Labels of constraints. Should be -1 for dissimilar pair, 1 for similar. calibration_params : `dict` or `None` Dictionary of parameters to give to `calibrate_threshold` for the threshold calibration step done at the end of `fit`. If `None` is given, `calibrate_threshold` will use the default parameters. Returns ------- self : object Returns the instance. """ calibration_params = (calibration_params if calibration_params is not None else dict()) self._validate_calibration_params(**calibration_params) self._fit(pairs, y) self.calibrate_threshold(pairs, y, **calibration_params) return self
python
{ "resource": "" }
q30665
MMC_Supervised.fit
train
def fit(self, X, y, random_state=np.random): """Create constraints from labels and learn the MMC model. Parameters ---------- X : (n x d) matrix Input data, where each row corresponds to a single instance. y : (n) array-like Data labels. random_state : numpy.random.RandomState, optional If provided, controls random number generation. """ if self.num_labeled != 'deprecated': warnings.warn('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0', DeprecationWarning) X, y = self._prepare_inputs(X, y, ensure_min_samples=2) num_constraints = self.num_constraints if num_constraints is None: num_classes = len(np.unique(y)) num_constraints = 20 * num_classes**2 c = Constraints(y) pos_neg = c.positive_negative_pairs(num_constraints, random_state=random_state) pairs, y = wrap_pairs(X, pos_neg) return _BaseMMC._fit(self, pairs, y)
python
{ "resource": "" }
q30666
LFDA.fit
train
def fit(self, X, y): '''Fit the LFDA model. Parameters ---------- X : (n, d) array-like Input data. y : (n,) array-like Class labels, one per point of data. ''' X, y = self._prepare_inputs(X, y, ensure_min_samples=2) unique_classes, y = np.unique(y, return_inverse=True) n, d = X.shape num_classes = len(unique_classes) if self.num_dims is None: dim = d else: if not 0 < self.num_dims <= d: raise ValueError('Invalid num_dims, must be in [1,%d]' % d) dim = self.num_dims if self.k is None: k = min(7, d - 1) elif self.k >= d: warnings.warn('Chosen k (%d) too large, using %d instead.' % (self.k,d-1)) k = d - 1 else: k = int(self.k) tSb = np.zeros((d,d)) tSw = np.zeros((d,d)) for c in xrange(num_classes): Xc = X[y==c] nc = Xc.shape[0] # classwise affinity matrix dist = pairwise_distances(Xc, metric='l2', squared=True) # distances to k-th nearest neighbor k = min(k, nc - 1) sigma = np.sqrt(np.partition(dist, k, axis=0)[:, k]) local_scale = np.outer(sigma, sigma) with np.errstate(divide='ignore', invalid='ignore'): A = np.exp(-dist/local_scale) A[local_scale==0] = 0 G = Xc.T.dot(A.sum(axis=0)[:,None] * Xc) - Xc.T.dot(A).dot(Xc) tSb += G/n + (1-nc/n)*Xc.T.dot(Xc) + _sum_outer(Xc)/n tSw += G/nc tSb -= _sum_outer(X)/n - tSw # symmetrize tSb = (tSb + tSb.T) / 2 tSw = (tSw + tSw.T) / 2 vals, vecs = _eigh(tSb, tSw, dim) order = np.argsort(-vals)[:dim] vals = vals[order].real vecs = vecs[:,order] if self.embedding_type == 'weighted': vecs *= np.sqrt(vals) elif self.embedding_type == 'orthonormalized': vecs, _ = np.linalg.qr(vecs) self.transformer_ = vecs.T return self
python
{ "resource": "" }
q30667
BaseMetricLearner._prepare_inputs
train
def _prepare_inputs(self, X, y=None, type_of_inputs='classic', **kwargs): """Initializes the preprocessor and processes inputs. See `check_input` for more details. Parameters ---------- input: array-like The input data array to check. y : array-like The input labels array to check. type_of_inputs: `str` {'classic', 'tuples'} The type of inputs to check. If 'classic', the input should be a 2D array-like of points or a 1D array like of indicators of points. If 'tuples', the input should be a 3D array-like of tuples or a 2D array-like of indicators of tuples. **kwargs: dict Arguments to pass to check_input. Returns ------- X : `numpy.ndarray` The checked input data array. y: `numpy.ndarray` (optional) The checked input labels array. """ self.check_preprocessor() return check_input(X, y, type_of_inputs=type_of_inputs, preprocessor=self.preprocessor_, estimator=self, tuple_size=getattr(self, '_tuple_size', None), **kwargs)
python
{ "resource": "" }
q30668
MahalanobisMixin.score_pairs
train
def score_pairs(self, pairs): """Returns the learned Mahalanobis distance between pairs. This distance is defined as: :math:`d_M(x, x') = \sqrt{(x-x')^T M (x-x')}` where ``M`` is the learned Mahalanobis matrix, for every pair of points ``x`` and ``x'``. This corresponds to the euclidean distance between embeddings of the points in a new space, obtained through a linear transformation. Indeed, we have also: :math:`d_M(x, x') = \sqrt{(x_e - x_e')^T (x_e- x_e')}`, with :math:`x_e = L x` (See :class:`MahalanobisMixin`). Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to score, with each row corresponding to two points, for 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- scores: `numpy.ndarray` of shape=(n_pairs,) The learned Mahalanobis distance for every pair. See Also -------- get_metric : a method that returns a function to compute the metric between two points. The difference with `score_pairs` is that it works on two 1D arrays and cannot use a preprocessor. Besides, the returned function is independent of the metric learner and hence is not modified if the metric learner is. :ref:`mahalanobis_distances` : The section of the project documentation that describes Mahalanobis Distances. """ pairs = check_input(pairs, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=2) pairwise_diffs = self.transform(pairs[:, 1, :] - pairs[:, 0, :]) # (for MahalanobisMixin, the embedding is linear so we can just embed the # difference) return np.sqrt(np.sum(pairwise_diffs**2, axis=-1))
python
{ "resource": "" }
q30669
MahalanobisMixin.transform
train
def transform(self, X): """Embeds data points in the learned linear embedding space. Transforms samples in ``X`` into ``X_embedded``, samples inside a new embedding space such that: ``X_embedded = X.dot(L.T)``, where ``L`` is the learned linear transformation (See :class:`MahalanobisMixin`). Parameters ---------- X : `numpy.ndarray`, shape=(n_samples, n_features) The data points to embed. Returns ------- X_embedded : `numpy.ndarray`, shape=(n_samples, num_dims) The embedded data points. """ X_checked = check_input(X, type_of_inputs='classic', estimator=self, preprocessor=self.preprocessor_, accept_sparse=True) return X_checked.dot(self.transformer_.T)
python
{ "resource": "" }
q30670
_PairsClassifierMixin.decision_function
train
def decision_function(self, pairs): """Returns the decision function used to classify the pairs. Returns the opposite of the learned metric value between samples in every pair, to be consistent with scikit-learn conventions. Hence it should ideally be low for dissimilar samples and high for similar samples. This is the decision function that is used to classify pairs as similar (+1), or dissimilar (-1). Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to predict, with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,) The predicted decision function value for each pair. """ pairs = check_input(pairs, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return - self.score_pairs(pairs)
python
{ "resource": "" }
q30671
_PairsClassifierMixin.calibrate_threshold
train
def calibrate_threshold(self, pairs_valid, y_valid, strategy='accuracy', min_rate=None, beta=1.): """Decision threshold calibration for pairwise binary classification Method that calibrates the decision threshold (cutoff point) of the metric learner. This threshold will then be used when calling the method `predict`. The methods for picking cutoff points make use of traditional binary classification evaluation statistics such as the true positive and true negative rates and F-scores. The threshold will be found to maximize the chosen score on the validation set ``(pairs_valid, y_valid)``. See more in the :ref:`User Guide <calibration>`. Parameters ---------- strategy : str, optional (default='accuracy') The strategy to use for choosing the cutoff threshold. 'accuracy' Selects a decision threshold that maximizes the accuracy. 'f_beta' Selects a decision threshold that maximizes the f_beta score, with beta given by the parameter `beta`. 'max_tpr' Selects a decision threshold that yields the highest true positive rate with true negative rate at least equal to the value of the parameter `min_rate`. 'max_tnr' Selects a decision threshold that yields the highest true negative rate with true positive rate at least equal to the value of the parameter `min_rate`. beta : float in [0, 1], optional (default=None) Beta value to be used in case strategy == 'f_beta'. min_rate : float in [0, 1] or None, (default=None) In case strategy is 'max_tpr' or 'max_tnr' this parameter must be set to specify the minimal value for the true negative rate or true positive rate respectively that needs to be achieved. pairs_valid : array-like, shape=(n_pairs_valid, 2, n_features) The validation set of pairs to use to set the threshold. y_valid : array-like, shape=(n_pairs_valid,) The labels of the pairs of the validation set to use to set the threshold. They must be +1 for positive pairs and -1 for negative pairs. References ---------- .. [1] Receiver-operating characteristic (ROC) plots: a fundamental evaluation tool in clinical medicine, MH Zweig, G Campbell - Clinical chemistry, 1993 .. [2] most of the code of this function is from scikit-learn's PR #10117 See Also -------- sklearn.calibration : scikit-learn's module for calibrating classifiers """ self._validate_calibration_params(strategy, min_rate, beta) pairs_valid, y_valid = self._prepare_inputs(pairs_valid, y_valid, type_of_inputs='tuples') n_samples = pairs_valid.shape[0] if strategy == 'accuracy': scores = self.decision_function(pairs_valid) scores_sorted_idces = np.argsort(scores)[::-1] scores_sorted = scores[scores_sorted_idces] # true labels ordered by decision_function value: (higher first) y_ordered = y_valid[scores_sorted_idces] # we need to add a threshold that will reject all points scores_sorted = np.concatenate([[scores_sorted[0] + 1], scores_sorted]) # finds the threshold that maximizes the accuracy: cum_tp = stable_cumsum(y_ordered == 1) # cumulative number of true # positives # we need to add the point where all samples are rejected: cum_tp = np.concatenate([[0.], cum_tp]) cum_tn_inverted = stable_cumsum(y_ordered[::-1] == -1) cum_tn = np.concatenate([[0.], cum_tn_inverted])[::-1] cum_accuracy = (cum_tp + cum_tn) / n_samples imax = np.argmax(cum_accuracy) # we set the threshold to the lowest accepted score # note: we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign self.threshold_ = - scores_sorted[imax] # note: if the best is to reject all points it's already one of the # thresholds (scores_sorted[0]) return self if strategy == 'f_beta': precision, recall, thresholds = precision_recall_curve( y_valid, self.decision_function(pairs_valid), pos_label=1) # here the thresholds are decreasing # We ignore the warnings here, in the same taste as # https://github.com/scikit-learn/scikit-learn/blob/62d205980446a1abc1065 # f4332fd74eee57fcf73/sklearn/metrics/classification.py#L1284 with np.errstate(divide='ignore', invalid='ignore'): f_beta = ((1 + beta**2) * (precision * recall) / (beta**2 * precision + recall)) # We need to set nans to zero otherwise they will be considered higher # than the others (also discussed in https://github.com/scikit-learn/ # scikit-learn/pull/10117/files#r262115773) f_beta[np.isnan(f_beta)] = 0. imax = np.argmax(f_beta) # we set the threshold to the lowest accepted score # note: we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign self.threshold_ = - thresholds[imax] # Note: we don't need to deal with rejecting all points (i.e. threshold = # max_scores + 1), since this can never happen to be optimal # (see a more detailed discussion in test_calibrate_threshold_extreme) return self fpr, tpr, thresholds = roc_curve(y_valid, self.decision_function(pairs_valid), pos_label=1) # here the thresholds are decreasing fpr, tpr, thresholds = fpr, tpr, thresholds if strategy in ['max_tpr', 'max_tnr']: if strategy == 'max_tpr': indices = np.where(1 - fpr >= min_rate)[0] imax = np.argmax(tpr[indices]) if strategy == 'max_tnr': indices = np.where(tpr >= min_rate)[0] imax = np.argmax(1 - fpr[indices]) imax_valid = indices[imax] # note: we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign if indices[imax] == len(thresholds): # we want to accept everything self.threshold_ = - (thresholds[imax_valid] - 1) else: # thanks to roc_curve, the first point will always be max_scores # + 1, see: https://github.com/scikit-learn/scikit-learn/pull/13523 self.threshold_ = - thresholds[imax_valid] return self
python
{ "resource": "" }
q30672
_PairsClassifierMixin._validate_calibration_params
train
def _validate_calibration_params(strategy='accuracy', min_rate=None, beta=1.): """Ensure that calibration parameters have allowed values""" if strategy not in ('accuracy', 'f_beta', 'max_tpr', 'max_tnr'): raise ValueError('Strategy can either be "accuracy", "f_beta" or ' '"max_tpr" or "max_tnr". Got "{}" instead.' .format(strategy)) if strategy == 'max_tpr' or strategy == 'max_tnr': if (min_rate is None or not isinstance(min_rate, (int, float)) or not min_rate >= 0 or not min_rate <= 1): raise ValueError('Parameter min_rate must be a number in' '[0, 1]. ' 'Got {} instead.'.format(min_rate)) if strategy == 'f_beta': if beta is None or not isinstance(beta, (int, float)): raise ValueError('Parameter beta must be a real number. ' 'Got {} instead.'.format(type(beta)))
python
{ "resource": "" }
q30673
_QuadrupletsClassifierMixin.predict
train
def predict(self, quadruplets): """Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet. """ check_is_fitted(self, 'transformer_') quadruplets = check_input(quadruplets, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return np.sign(self.decision_function(quadruplets))
python
{ "resource": "" }
q30674
RCA.fit
train
def fit(self, X, chunks): """Learn the RCA model. Parameters ---------- data : (n x d) data matrix Each row corresponds to a single instance chunks : (n,) array of ints When ``chunks[i] == -1``, point i doesn't belong to any chunklet. When ``chunks[i] == j``, point i belongs to chunklet j. """ X = self._prepare_inputs(X, ensure_min_samples=2) # PCA projection to remove noise and redundant information. if self.pca_comps is not None: pca = decomposition.PCA(n_components=self.pca_comps) X_t = pca.fit_transform(X) M_pca = pca.components_ else: X_t = X - X.mean(axis=0) M_pca = None chunks = np.asanyarray(chunks, dtype=int) chunk_mask, chunked_data = _chunk_mean_centering(X_t, chunks) inner_cov = np.atleast_2d(np.cov(chunked_data, rowvar=0, bias=1)) dim = self._check_dimension(np.linalg.matrix_rank(inner_cov), X_t) # Fisher Linear Discriminant projection if dim < X_t.shape[1]: total_cov = np.cov(X_t[chunk_mask], rowvar=0) tmp = np.linalg.lstsq(total_cov, inner_cov)[0] vals, vecs = np.linalg.eig(tmp) inds = np.argsort(vals)[:dim] A = vecs[:, inds] inner_cov = np.atleast_2d(A.T.dot(inner_cov).dot(A)) self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T) else: self.transformer_ = _inv_sqrtm(inner_cov).T if M_pca is not None: self.transformer_ = np.atleast_2d(self.transformer_.dot(M_pca)) return self
python
{ "resource": "" }
q30675
RCA_Supervised.fit
train
def fit(self, X, y, random_state=np.random): """Create constraints from labels and learn the RCA model. Needs num_constraints specified in constructor. Parameters ---------- X : (n x d) data matrix each row corresponds to a single instance y : (n) data labels random_state : a random.seed object to fix the random_state if needed. """ X, y = self._prepare_inputs(X, y, ensure_min_samples=2) chunks = Constraints(y).chunks(num_chunks=self.num_chunks, chunk_size=self.chunk_size, random_state=random_state) return RCA.fit(self, X, chunks)
python
{ "resource": "" }
q30676
make_name
train
def make_name(estimator): """Helper function that returns the name of estimator or the given string if a string is given """ if estimator is not None: if isinstance(estimator, six.string_types): estimator_name = estimator else: estimator_name = estimator.__class__.__name__ else: estimator_name = None return estimator_name
python
{ "resource": "" }
q30677
UploadToDeprecatedPyPIDetected.from_args
train
def from_args(cls, target_url, default_url, test_url): """Return an UploadToDeprecatedPyPIDetected instance.""" return cls("You're trying to upload to the legacy PyPI site '{}'. " "Uploading to those sites is deprecated. \n " "The new sites are pypi.org and test.pypi.org. Try using " "{} (or {}) to upload your packages instead. " "These are the default URLs for Twine now. \n More at " "https://packaging.python.org/guides/migrating-to-pypi-org/" " .".format(target_url, default_url, test_url) )
python
{ "resource": "" }
q30678
check_status_code
train
def check_status_code(response, verbose): """ Shouldn't happen, thanks to the UploadToDeprecatedPyPIDetected exception, but this is in case that breaks and it does. """ if (response.status_code == 410 and response.url.startswith(("https://pypi.python.org", "https://testpypi.python.org"))): print("It appears you're uploading to pypi.python.org (or " "testpypi.python.org). You've received a 410 error response. " "Uploading to those sites is deprecated. The new sites are " "pypi.org and test.pypi.org. Try using " "https://upload.pypi.org/legacy/ " "(or https://test.pypi.org/legacy/) to upload your packages " "instead. These are the default URLs for Twine now. More at " "https://packaging.python.org/guides/migrating-to-pypi-org/ ") try: response.raise_for_status() except HTTPError as err: if response.text: if verbose: print('Content received from server:\n{}'.format( response.text)) else: print('NOTE: Try --verbose to see response content.') raise err
python
{ "resource": "" }
q30679
no_positional
train
def no_positional(allow_self=False): """A decorator that doesn't allow for positional arguments. :param bool allow_self: Whether to allow ``self`` as a positional argument. """ def reject_positional_args(function): @functools.wraps(function) def wrapper(*args, **kwargs): allowed_positional_args = 0 if allow_self: allowed_positional_args = 1 received_positional_args = len(args) if received_positional_args > allowed_positional_args: function_name = function.__name__ verb = 'were' if received_positional_args > 1 else 'was' raise TypeError(('{}() takes {} positional arguments but {} ' '{} given').format( function_name, allowed_positional_args, received_positional_args, verb, )) return function(*args, **kwargs) return wrapper return reject_positional_args
python
{ "resource": "" }
q30680
Wheel.find_candidate_metadata_files
train
def find_candidate_metadata_files(names): """Filter files that may be METADATA files.""" tuples = [ x.split('/') for x in map(try_decode, names) if 'METADATA' in x ] return [x[1] for x in sorted([(len(x), x) for x in tuples])]
python
{ "resource": "" }
q30681
HashManager.hash
train
def hash(self): """Hash the file contents.""" with open(self.filename, "rb") as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b''): self._md5_update(content) self._sha2_update(content) self._blake_update(content)
python
{ "resource": "" }
q30682
Settings.register_argparse_arguments
train
def register_argparse_arguments(parser): """Register the arguments for argparse.""" parser.add_argument( "-r", "--repository", action=utils.EnvironmentDefault, env="TWINE_REPOSITORY", default="pypi", help="The repository (package index) to upload the package to. " "Should be a section in the config file (default: " "%(default)s). (Can also be set via %(env)s environment " "variable.)", ) parser.add_argument( "--repository-url", action=utils.EnvironmentDefault, env="TWINE_REPOSITORY_URL", default=None, required=False, help="The repository (package index) URL to upload the package to." " This overrides --repository. " "(Can also be set via %(env)s environment variable.)" ) parser.add_argument( "-s", "--sign", action="store_true", default=False, help="Sign files to upload using GPG.", ) parser.add_argument( "--sign-with", default="gpg", help="GPG program used to sign uploads (default: %(default)s).", ) parser.add_argument( "-i", "--identity", help="GPG identity used to sign files.", ) parser.add_argument( "-u", "--username", action=utils.EnvironmentDefault, env="TWINE_USERNAME", required=False, help="The username to authenticate to the repository " "(package index) as. (Can also be set via " "%(env)s environment variable.)", ) parser.add_argument( "-p", "--password", action=utils.EnvironmentDefault, env="TWINE_PASSWORD", required=False, help="The password to authenticate to the repository " "(package index) with. (Can also be set via " "%(env)s environment variable.)", ) parser.add_argument( "-c", "--comment", help="The comment to include with the distribution file.", ) parser.add_argument( "--config-file", default="~/.pypirc", help="The .pypirc config file to use.", ) parser.add_argument( "--skip-existing", default=False, action="store_true", help="Continue uploading files if one already exists. (Only valid " "when uploading to PyPI. Other implementations may not " "support this.)", ) parser.add_argument( "--cert", action=utils.EnvironmentDefault, env="TWINE_CERT", default=None, required=False, metavar="path", help="Path to alternate CA bundle (can also be set via %(env)s " "environment variable).", ) parser.add_argument( "--client-cert", metavar="path", help="Path to SSL client certificate, a single file containing the" " private key and the certificate in PEM format.", ) parser.add_argument( "--verbose", default=False, required=False, action="store_true", help="Show verbose output." ) parser.add_argument( "--disable-progress-bar", default=False, required=False, action="store_true", help="Disable the progress bar." )
python
{ "resource": "" }
q30683
Settings.from_argparse
train
def from_argparse(cls, args): """Generate the Settings from parsed arguments.""" settings = vars(args) settings['repository_name'] = settings.pop('repository') settings['cacert'] = settings.pop('cert') return cls(**settings)
python
{ "resource": "" }
q30684
Settings.check_repository_url
train
def check_repository_url(self): """Verify we are not using legacy PyPI. :raises: :class:`~twine.exceptions.UploadToDeprecatedPyPIDetected` """ repository_url = self.repository_config['repository'] if repository_url.startswith((repository.LEGACY_PYPI, repository.LEGACY_TEST_PYPI)): raise exceptions.UploadToDeprecatedPyPIDetected.from_args( repository_url, utils.DEFAULT_REPOSITORY, utils.TEST_REPOSITORY )
python
{ "resource": "" }
q30685
Settings.create_repository
train
def create_repository(self): """Create a new repository for uploading.""" repo = repository.Repository( self.repository_config['repository'], self.username, self.password, self.disable_progress_bar ) repo.set_certificate_authority(self.cacert) repo.set_client_certificate(self.client_cert) return repo
python
{ "resource": "" }
q30686
get_var_dict_from_ctx
train
def get_var_dict_from_ctx(ctx: commands.Context, prefix: str = '_'): """ Returns the dict to be used in REPL for a given Context. """ raw_var_dict = { 'author': ctx.author, 'bot': ctx.bot, 'channel': ctx.channel, 'ctx': ctx, 'find': discord.utils.find, 'get': discord.utils.get, 'guild': ctx.guild, 'message': ctx.message, 'msg': ctx.message } return {f'{prefix}{k}': v for k, v in raw_var_dict.items()}
python
{ "resource": "" }
q30687
background_reader
train
def background_reader(stream, loop: asyncio.AbstractEventLoop, callback): """ Reads a stream and forwards each line to an async callback. """ for line in iter(stream.readline, b''): loop.call_soon_threadsafe(loop.create_task, callback(line))
python
{ "resource": "" }
q30688
ShellReader.make_reader_task
train
def make_reader_task(self, stream, callback): """ Create a reader executor task for a stream. """ return self.loop.create_task(self.executor_wrapper(background_reader, stream, self.loop, callback))
python
{ "resource": "" }
q30689
ShellReader.clean_bytes
train
def clean_bytes(line): """ Cleans a byte sequence of shell directives and decodes it. """ text = line.decode('utf-8').replace('\r', '').strip('\n') return re.sub(r'\x1b[^m]*m', '', text).replace("``", "`\u200b`").strip('\n')
python
{ "resource": "" }
q30690
ShellReader.stderr_handler
train
async def stderr_handler(self, line): """ Handler for this class for stderr. """ await self.queue.put(self.clean_bytes(b'[stderr] ' + line))
python
{ "resource": "" }
q30691
wrap_code
train
def wrap_code(code: str, args: str = '') -> ast.Module: """ Compiles Python code into an async function or generator, and automatically adds return if the function body is a single evaluation. Also adds inline import expression support. """ if sys.version_info >= (3, 7): user_code = import_expression.parse(code, mode='exec') injected = '' else: injected = code mod = import_expression.parse(CORO_CODE.format(args, textwrap.indent(injected, ' ' * 8)), mode='exec') definition = mod.body[-1] # async def ...: assert isinstance(definition, ast.AsyncFunctionDef) try_block = definition.body[-1] # try: assert isinstance(try_block, ast.Try) if sys.version_info >= (3, 7): try_block.body.extend(user_code.body) else: ast.increment_lineno(mod, -16) # bring line numbers back in sync with repl ast.fix_missing_locations(mod) is_asyncgen = any(isinstance(node, ast.Yield) for node in ast.walk(try_block)) last_expr = try_block.body[-1] # if the last part isn't an expression, ignore it if not isinstance(last_expr, ast.Expr): return mod # if the last expression is not a yield if not isinstance(last_expr.value, ast.Yield): # copy the expression into a return/yield if is_asyncgen: # copy the value of the expression into a yield yield_stmt = ast.Yield(last_expr.value) ast.copy_location(yield_stmt, last_expr) # place the yield into its own expression yield_expr = ast.Expr(yield_stmt) ast.copy_location(yield_expr, last_expr) # place the yield where the original expression was try_block.body[-1] = yield_expr else: # copy the expression into a return return_stmt = ast.Return(last_expr.value) ast.copy_location(return_stmt, last_expr) # place the return where the original expression was try_block.body[-1] = return_stmt return mod
python
{ "resource": "" }
q30692
AsyncCodeExecutor.traverse
train
async def traverse(self, func): """ Traverses an async function or generator, yielding each result. This function is private. The class should be used as an iterator instead of using this method. """ # this allows the reference to be stolen async_executor = self if inspect.isasyncgenfunction(func): async for result in func(*async_executor.args): yield result else: yield await func(*async_executor.args)
python
{ "resource": "" }
q30693
get_parent_scope_from_var
train
def get_parent_scope_from_var(name, global_ok=False, skip_frames=0) -> typing.Optional[Scope]: """ Iterates up the frame stack looking for a frame-scope containing the given variable name. Returns -------- Optional[Scope] The relevant :class:`Scope` or None """ stack = inspect.stack() try: for frame_info in stack[skip_frames + 1:]: frame = None try: frame = frame_info.frame if name in frame.f_locals or (global_ok and name in frame.f_globals): return Scope(globals_=frame.f_globals, locals_=frame.f_locals) finally: del frame finally: del stack return None
python
{ "resource": "" }
q30694
get_parent_var
train
def get_parent_var(name, global_ok=False, default=None, skip_frames=0): """ Directly gets a variable from a parent frame-scope. Returns -------- Any The content of the variable found by the given name, or None. """ scope = get_parent_scope_from_var(name, global_ok=global_ok, skip_frames=skip_frames + 1) if not scope: return default if name in scope.locals: return scope.locals.get(name, default) return scope.globals.get(name, default)
python
{ "resource": "" }
q30695
Scope.clear_intersection
train
def clear_intersection(self, other_dict): """ Clears out locals and globals from this scope where the key-value pair matches with other_dict. This allows cleanup of temporary variables that may have washed up into this Scope. Arguments --------- other_dict: a :class:`dict` to be used to determine scope clearance. Returns ------- Scope The updated scope (self). """ for key, value in other_dict.items(): if key in self.globals and self.globals[key] is value: del self.globals[key] if key in self.locals and self.locals[key] is value: del self.locals[key] return self
python
{ "resource": "" }
q30696
Scope.update
train
def update(self, other): """ Updates this scope with the content of another scope. Arguments --------- other: a :class:`Scope` instance. Returns ------- Scope The updated scope (self). """ self.globals.update(other.globals) self.locals.update(other.locals) return self
python
{ "resource": "" }
q30697
send_traceback
train
async def send_traceback(destination: discord.abc.Messageable, verbosity: int, *exc_info): """ Sends a traceback of an exception to a destination. Used when REPL fails for any reason. :param destination: Where to send this information to :param verbosity: How far back this traceback should go. 0 shows just the last stack. :param exc_info: Information about this exception, from sys.exc_info or similar. :return: The last message sent """ # to make pylint stop moaning etype, value, trace = exc_info traceback_content = "".join(traceback.format_exception(etype, value, trace, verbosity)).replace("``", "`\u200b`") paginator = commands.Paginator(prefix='```py') for line in traceback_content.split('\n'): paginator.add_line(line) message = None for page in paginator.pages: message = await destination.send(page) return message
python
{ "resource": "" }
q30698
do_after_sleep
train
async def do_after_sleep(delay: float, coro, *args, **kwargs): """ Performs an action after a set amount of time. This function only calls the coroutine after the delay, preventing asyncio complaints about destroyed coros. :param delay: Time in seconds :param coro: Coroutine to run :param args: Arguments to pass to coroutine :param kwargs: Keyword arguments to pass to coroutine :return: Whatever the coroutine returned. """ await asyncio.sleep(delay) return await coro(*args, **kwargs)
python
{ "resource": "" }
q30699
attempt_add_reaction
train
async def attempt_add_reaction(msg: discord.Message, reaction: typing.Union[str, discord.Emoji])\ -> typing.Optional[discord.Reaction]: """ Try to add a reaction to a message, ignoring it if it fails for any reason. :param msg: The message to add the reaction to. :param reaction: The reaction emoji, could be a string or `discord.Emoji` :return: A `discord.Reaction` or None, depending on if it failed or not. """ try: return await msg.add_reaction(reaction) except discord.HTTPException: pass
python
{ "resource": "" }