partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
OpenAIGPTConfig.from_dict
Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.
pytorch_pretrained_bert/modeling_openai.py
def from_dict(cls, json_object): """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.""" config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config
def from_dict(cls, json_object): """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.""" config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config
[ "Constructs", "a", "OpenAIGPTConfig", "from", "a", "Python", "dictionary", "of", "parameters", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L200-L205
[ "def", "from_dict", "(", "cls", ",", "json_object", ")", ":", "config", "=", "OpenAIGPTConfig", "(", "vocab_size_or_config_json_file", "=", "-", "1", ")", "for", "key", ",", "value", "in", "json_object", ".", "items", "(", ")", ":", "config", ".", "__dict__", "[", "key", "]", "=", "value", "return", "config" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
OpenAIGPTModel.set_num_special_tokens
Update input embeddings with new embedding matrice if needed
pytorch_pretrained_bert/modeling_openai.py
def set_num_special_tokens(self, num_special_tokens): " Update input embeddings with new embedding matrice if needed " if self.config.n_special == num_special_tokens: return # Update config self.config.n_special = num_special_tokens # Build new embeddings and initialize all new embeddings (in particular the special tokens) old_embed = self.tokens_embed self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd) self.tokens_embed.to(old_embed.weight.device) self.init_weights(self.tokens_embed) # Copy word embeddings from the previous weights self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def set_num_special_tokens(self, num_special_tokens): " Update input embeddings with new embedding matrice if needed " if self.config.n_special == num_special_tokens: return # Update config self.config.n_special = num_special_tokens # Build new embeddings and initialize all new embeddings (in particular the special tokens) old_embed = self.tokens_embed self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd) self.tokens_embed.to(old_embed.weight.device) self.init_weights(self.tokens_embed) # Copy word embeddings from the previous weights self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
[ "Update", "input", "embeddings", "with", "new", "embedding", "matrice", "if", "needed" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L605-L617
[ "def", "set_num_special_tokens", "(", "self", ",", "num_special_tokens", ")", ":", "if", "self", ".", "config", ".", "n_special", "==", "num_special_tokens", ":", "return", "# Update config", "self", ".", "config", ".", "n_special", "=", "num_special_tokens", "# Build new embeddings and initialize all new embeddings (in particular the special tokens)", "old_embed", "=", "self", ".", "tokens_embed", "self", ".", "tokens_embed", "=", "nn", ".", "Embedding", "(", "self", ".", "config", ".", "total_tokens_embeddings", ",", "self", ".", "config", ".", "n_embd", ")", "self", ".", "tokens_embed", ".", "to", "(", "old_embed", ".", "weight", ".", "device", ")", "self", ".", "init_weights", "(", "self", ".", "tokens_embed", ")", "# Copy word embeddings from the previous weights", "self", ".", "tokens_embed", ".", "weight", ".", "data", "[", ":", "self", ".", "config", ".", "vocab_size", ",", ":", "]", "=", "old_embed", ".", "weight", ".", "data", "[", ":", "self", ".", "config", ".", "vocab_size", ",", ":", "]" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
OpenAIGPTLMHeadModel.set_num_special_tokens
Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings
pytorch_pretrained_bert/modeling_openai.py
def set_num_special_tokens(self, num_special_tokens): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def set_num_special_tokens(self, num_special_tokens): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
[ "Update", "input", "and", "output", "embeddings", "with", "new", "embedding", "matrice", "Make", "sure", "we", "are", "sharing", "the", "embeddings" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L710-L715
[ "def", "set_num_special_tokens", "(", "self", ",", "num_special_tokens", ")", ":", "self", ".", "transformer", ".", "set_num_special_tokens", "(", "num_special_tokens", ")", "self", ".", "lm_head", ".", "set_embeddings_weights", "(", "self", ".", "transformer", ".", "tokens_embed", ".", "weight", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
OpenAIAdam.step
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
pytorch_pretrained_bert/optimization_openai.py
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['b1'], group['b2'] state['step'] += 1 # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['e']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Add weight decay at the end (fixed version) if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0: p.data.add_(-lr_scheduled * group['weight_decay'], p.data) return loss
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['b1'], group['b2'] state['step'] += 1 # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['e']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Add weight decay at the end (fixed version) if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0: p.data.add_(-lr_scheduled * group['weight_decay'], p.data) return loss
[ "Performs", "a", "single", "optimization", "step", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/optimization_openai.py#L70-L127
[ "def", "step", "(", "self", ",", "closure", "=", "None", ")", ":", "loss", "=", "None", "if", "closure", "is", "not", "None", ":", "loss", "=", "closure", "(", ")", "for", "group", "in", "self", ".", "param_groups", ":", "for", "p", "in", "group", "[", "'params'", "]", ":", "if", "p", ".", "grad", "is", "None", ":", "continue", "grad", "=", "p", ".", "grad", ".", "data", "if", "grad", ".", "is_sparse", ":", "raise", "RuntimeError", "(", "'Adam does not support sparse gradients, please consider SparseAdam instead'", ")", "state", "=", "self", ".", "state", "[", "p", "]", "# State initialization", "if", "len", "(", "state", ")", "==", "0", ":", "state", "[", "'step'", "]", "=", "0", "# Exponential moving average of gradient values", "state", "[", "'exp_avg'", "]", "=", "torch", ".", "zeros_like", "(", "p", ".", "data", ")", "# Exponential moving average of squared gradient values", "state", "[", "'exp_avg_sq'", "]", "=", "torch", ".", "zeros_like", "(", "p", ".", "data", ")", "exp_avg", ",", "exp_avg_sq", "=", "state", "[", "'exp_avg'", "]", ",", "state", "[", "'exp_avg_sq'", "]", "beta1", ",", "beta2", "=", "group", "[", "'b1'", "]", ",", "group", "[", "'b2'", "]", "state", "[", "'step'", "]", "+=", "1", "# Add grad clipping", "if", "group", "[", "'max_grad_norm'", "]", ">", "0", ":", "clip_grad_norm_", "(", "p", ",", "group", "[", "'max_grad_norm'", "]", ")", "# Decay the first and second moment running average coefficient", "exp_avg", ".", "mul_", "(", "beta1", ")", ".", "add_", "(", "1", "-", "beta1", ",", "grad", ")", "exp_avg_sq", ".", "mul_", "(", "beta2", ")", ".", "addcmul_", "(", "1", "-", "beta2", ",", "grad", ",", "grad", ")", "denom", "=", "exp_avg_sq", ".", "sqrt", "(", ")", ".", "add_", "(", "group", "[", "'e'", "]", ")", "bias_correction1", "=", "1", "-", "beta1", "**", "state", "[", "'step'", "]", "bias_correction2", "=", "1", "-", "beta2", "**", "state", "[", "'step'", "]", "lr_scheduled", "=", "group", "[", "'lr'", "]", "lr_scheduled", "*=", "group", "[", "'schedule'", "]", ".", "get_lr", "(", "state", "[", "'step'", "]", ")", "step_size", "=", "lr_scheduled", "*", "math", ".", "sqrt", "(", "bias_correction2", ")", "/", "bias_correction1", "p", ".", "data", ".", "addcdiv_", "(", "-", "step_size", ",", "exp_avg", ",", "denom", ")", "# Add weight decay at the end (fixed version)", "if", "(", "len", "(", "p", ".", "size", "(", ")", ")", ">", "1", "or", "group", "[", "'vector_l2'", "]", ")", "and", "group", "[", "'weight_decay'", "]", ">", "0", ":", "p", ".", "data", ".", "add_", "(", "-", "lr_scheduled", "*", "group", "[", "'weight_decay'", "]", ",", "p", ".", "data", ")", "return", "loss" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_LRSchedule.get_lr
:param step: which of t_total steps we're on :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps :return: learning rate multiplier for current update
pytorch_pretrained_bert/optimization.py
def get_lr(self, step, nowarn=False): """ :param step: which of t_total steps we're on :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps :return: learning rate multiplier for current update """ if self.t_total < 0: return 1. progress = float(step) / self.t_total ret = self.get_lr_(progress) # warning for exceeding t_total (only active with warmup_linear if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress: logger.warning( "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly." .format(ret, self.__class__.__name__)) self.warned_for_t_total_at_progress = progress # end warning return ret
def get_lr(self, step, nowarn=False): """ :param step: which of t_total steps we're on :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps :return: learning rate multiplier for current update """ if self.t_total < 0: return 1. progress = float(step) / self.t_total ret = self.get_lr_(progress) # warning for exceeding t_total (only active with warmup_linear if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress: logger.warning( "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly." .format(ret, self.__class__.__name__)) self.warned_for_t_total_at_progress = progress # end warning return ret
[ ":", "param", "step", ":", "which", "of", "t_total", "steps", "we", "re", "on", ":", "param", "nowarn", ":", "set", "to", "True", "to", "suppress", "warning", "regarding", "training", "beyond", "specified", "t_total", "steps", ":", "return", ":", "learning", "rate", "multiplier", "for", "current", "update" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/optimization.py#L53-L70
[ "def", "get_lr", "(", "self", ",", "step", ",", "nowarn", "=", "False", ")", ":", "if", "self", ".", "t_total", "<", "0", ":", "return", "1.", "progress", "=", "float", "(", "step", ")", "/", "self", ".", "t_total", "ret", "=", "self", ".", "get_lr_", "(", "progress", ")", "# warning for exceeding t_total (only active with warmup_linear", "if", "not", "nowarn", "and", "self", ".", "warn_t_total", "and", "progress", ">", "1.", "and", "progress", ">", "self", ".", "warned_for_t_total_at_progress", ":", "logger", ".", "warning", "(", "\"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly.\"", ".", "format", "(", "ret", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "warned_for_t_total_at_progress", "=", "progress", "# end warning", "return", "ret" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertAdam.step
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
pytorch_pretrained_bert/optimization.py
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss
[ "Performs", "a", "single", "optimization", "step", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/optimization.py#L237-L302
[ "def", "step", "(", "self", ",", "closure", "=", "None", ")", ":", "loss", "=", "None", "if", "closure", "is", "not", "None", ":", "loss", "=", "closure", "(", ")", "for", "group", "in", "self", ".", "param_groups", ":", "for", "p", "in", "group", "[", "'params'", "]", ":", "if", "p", ".", "grad", "is", "None", ":", "continue", "grad", "=", "p", ".", "grad", ".", "data", "if", "grad", ".", "is_sparse", ":", "raise", "RuntimeError", "(", "'Adam does not support sparse gradients, please consider SparseAdam instead'", ")", "state", "=", "self", ".", "state", "[", "p", "]", "# State initialization", "if", "len", "(", "state", ")", "==", "0", ":", "state", "[", "'step'", "]", "=", "0", "# Exponential moving average of gradient values", "state", "[", "'next_m'", "]", "=", "torch", ".", "zeros_like", "(", "p", ".", "data", ")", "# Exponential moving average of squared gradient values", "state", "[", "'next_v'", "]", "=", "torch", ".", "zeros_like", "(", "p", ".", "data", ")", "next_m", ",", "next_v", "=", "state", "[", "'next_m'", "]", ",", "state", "[", "'next_v'", "]", "beta1", ",", "beta2", "=", "group", "[", "'b1'", "]", ",", "group", "[", "'b2'", "]", "# Add grad clipping", "if", "group", "[", "'max_grad_norm'", "]", ">", "0", ":", "clip_grad_norm_", "(", "p", ",", "group", "[", "'max_grad_norm'", "]", ")", "# Decay the first and second moment running average coefficient", "# In-place operations to update the averages at the same time", "next_m", ".", "mul_", "(", "beta1", ")", ".", "add_", "(", "1", "-", "beta1", ",", "grad", ")", "next_v", ".", "mul_", "(", "beta2", ")", ".", "addcmul_", "(", "1", "-", "beta2", ",", "grad", ",", "grad", ")", "update", "=", "next_m", "/", "(", "next_v", ".", "sqrt", "(", ")", "+", "group", "[", "'e'", "]", ")", "# Just adding the square of the weights to the loss function is *not*", "# the correct way of using L2 regularization/weight decay with Adam,", "# since that will interact with the m and v parameters in strange ways.", "#", "# Instead we want to decay the weights in a manner that doesn't interact", "# with the m/v parameters. This is equivalent to adding the square", "# of the weights to the loss with plain (non-momentum) SGD.", "if", "group", "[", "'weight_decay'", "]", ">", "0.0", ":", "update", "+=", "group", "[", "'weight_decay'", "]", "*", "p", ".", "data", "lr_scheduled", "=", "group", "[", "'lr'", "]", "lr_scheduled", "*=", "group", "[", "'schedule'", "]", ".", "get_lr", "(", "state", "[", "'step'", "]", ")", "update_with_lr", "=", "lr_scheduled", "*", "update", "p", ".", "data", ".", "add_", "(", "-", "update_with_lr", ")", "state", "[", "'step'", "]", "+=", "1", "# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1", "# No bias correction", "# bias_correction1 = 1 - beta1 ** state['step']", "# bias_correction2 = 1 - beta2 ** state['step']", "return", "loss" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
whitespace_tokenize
Runs basic whitespace cleaning and splitting on a piece of text.
pytorch_pretrained_bert/tokenization.py
def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens
def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens
[ "Runs", "basic", "whitespace", "cleaning", "and", "splitting", "on", "a", "piece", "of", "text", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L65-L71
[ "def", "whitespace_tokenize", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "if", "not", "text", ":", "return", "[", "]", "tokens", "=", "text", ".", "split", "(", ")", "return", "tokens" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_is_punctuation
Checks whether `chars` is a punctuation character.
pytorch_pretrained_bert/tokenization.py
def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
[ "Checks", "whether", "chars", "is", "a", "punctuation", "character", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L402-L415
[ "def", "_is_punctuation", "(", "char", ")", ":", "cp", "=", "ord", "(", "char", ")", "# We treat all non-letter/number ASCII as punctuation.", "# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode", "# Punctuation class but we treat them as punctuation anyways, for", "# consistency.", "if", "(", "(", "cp", ">=", "33", "and", "cp", "<=", "47", ")", "or", "(", "cp", ">=", "58", "and", "cp", "<=", "64", ")", "or", "(", "cp", ">=", "91", "and", "cp", "<=", "96", ")", "or", "(", "cp", ">=", "123", "and", "cp", "<=", "126", ")", ")", ":", "return", "True", "cat", "=", "unicodedata", ".", "category", "(", "char", ")", "if", "cat", ".", "startswith", "(", "\"P\"", ")", ":", "return", "True", "return", "False" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertTokenizer.convert_tokens_to_ids
Converts a sequence of tokens into ids using the vocab.
pytorch_pretrained_bert/tokenization.py
def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids
def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids
[ "Converts", "a", "sequence", "of", "tokens", "into", "ids", "using", "the", "vocab", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L117-L128
[ "def", "convert_tokens_to_ids", "(", "self", ",", "tokens", ")", ":", "ids", "=", "[", "]", "for", "token", "in", "tokens", ":", "ids", ".", "append", "(", "self", ".", "vocab", "[", "token", "]", ")", "if", "len", "(", "ids", ")", ">", "self", ".", "max_len", ":", "logger", ".", "warning", "(", "\"Token indices sequence length is longer than the specified maximum \"", "\" sequence length for this BERT model ({} > {}). Running this\"", "\" sequence through BERT will result in indexing errors\"", ".", "format", "(", "len", "(", "ids", ")", ",", "self", ".", "max_len", ")", ")", "return", "ids" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertTokenizer.convert_ids_to_tokens
Converts a sequence of ids in wordpiece tokens using the vocab.
pytorch_pretrained_bert/tokenization.py
def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens
def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens
[ "Converts", "a", "sequence", "of", "ids", "in", "wordpiece", "tokens", "using", "the", "vocab", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L130-L135
[ "def", "convert_ids_to_tokens", "(", "self", ",", "ids", ")", ":", "tokens", "=", "[", "]", "for", "i", "in", "ids", ":", "tokens", ".", "append", "(", "self", ".", "ids_to_tokens", "[", "i", "]", ")", "return", "tokens" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertTokenizer.save_vocabulary
Save the tokenizer vocabulary to a directory or file.
pytorch_pretrained_bert/tokenization.py
def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file
def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file
[ "Save", "the", "tokenizer", "vocabulary", "to", "a", "directory", "or", "file", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L137-L150
[ "def", "save_vocabulary", "(", "self", ",", "vocab_path", ")", ":", "index", "=", "0", "if", "os", ".", "path", ".", "isdir", "(", "vocab_path", ")", ":", "vocab_file", "=", "os", ".", "path", ".", "join", "(", "vocab_path", ",", "VOCAB_NAME", ")", "with", "open", "(", "vocab_file", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "writer", ":", "for", "token", ",", "token_index", "in", "sorted", "(", "self", ".", "vocab", ".", "items", "(", ")", ",", "key", "=", "lambda", "kv", ":", "kv", "[", "1", "]", ")", ":", "if", "index", "!=", "token_index", ":", "logger", ".", "warning", "(", "\"Saving vocabulary to {}: vocabulary indices are not consecutive.\"", "\" Please check that the vocabulary is not corrupted!\"", ".", "format", "(", "vocab_file", ")", ")", "index", "=", "token_index", "writer", ".", "write", "(", "token", "+", "u'\\n'", ")", "index", "+=", "1", "return", "vocab_file" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertTokenizer.from_pretrained
Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed.
pytorch_pretrained_bert/tokenization.py
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is a cased model but you have not set " "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " "you may want to check this behavior.") kwargs['do_lower_case'] = False elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is an uncased model but you have set " "`do_lower_case` to False. We are setting `do_lower_case=True` for you " "but you may want to check this behavior.") kwargs['do_lower_case'] = True else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is a cased model but you have not set " "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " "you may want to check this behavior.") kwargs['do_lower_case'] = False elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is an uncased model but you have set " "`do_lower_case` to False. We are setting `do_lower_case=True` for you " "but you may want to check this behavior.") kwargs['do_lower_case'] = True else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer
[ "Instantiate", "a", "PreTrainedBertModel", "from", "a", "pre", "-", "trained", "model", "file", ".", "Download", "and", "cache", "the", "pre", "-", "trained", "model", "file", "if", "needed", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L153-L198
[ "def", "from_pretrained", "(", "cls", ",", "pretrained_model_name_or_path", ",", "cache_dir", "=", "None", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", ":", "if", "pretrained_model_name_or_path", "in", "PRETRAINED_VOCAB_ARCHIVE_MAP", ":", "vocab_file", "=", "PRETRAINED_VOCAB_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "if", "'-cased'", "in", "pretrained_model_name_or_path", "and", "kwargs", ".", "get", "(", "'do_lower_case'", ",", "True", ")", ":", "logger", ".", "warning", "(", "\"The pre-trained model you are loading is a cased model but you have not set \"", "\"`do_lower_case` to False. We are setting `do_lower_case=False` for you but \"", "\"you may want to check this behavior.\"", ")", "kwargs", "[", "'do_lower_case'", "]", "=", "False", "elif", "'-cased'", "not", "in", "pretrained_model_name_or_path", "and", "not", "kwargs", ".", "get", "(", "'do_lower_case'", ",", "True", ")", ":", "logger", ".", "warning", "(", "\"The pre-trained model you are loading is an uncased model but you have set \"", "\"`do_lower_case` to False. We are setting `do_lower_case=True` for you \"", "\"but you may want to check this behavior.\"", ")", "kwargs", "[", "'do_lower_case'", "]", "=", "True", "else", ":", "vocab_file", "=", "pretrained_model_name_or_path", "if", "os", ".", "path", ".", "isdir", "(", "vocab_file", ")", ":", "vocab_file", "=", "os", ".", "path", ".", "join", "(", "vocab_file", ",", "VOCAB_NAME", ")", "# redirect to the cache, if necessary", "try", ":", "resolved_vocab_file", "=", "cached_path", "(", "vocab_file", ",", "cache_dir", "=", "cache_dir", ")", "except", "EnvironmentError", ":", "logger", ".", "error", "(", "\"Model name '{}' was not found in model name list ({}). \"", "\"We assumed '{}' was a path or url but couldn't find any file \"", "\"associated to this path or url.\"", ".", "format", "(", "pretrained_model_name_or_path", ",", "', '", ".", "join", "(", "PRETRAINED_VOCAB_ARCHIVE_MAP", ".", "keys", "(", ")", ")", ",", "vocab_file", ")", ")", "return", "None", "if", "resolved_vocab_file", "==", "vocab_file", ":", "logger", ".", "info", "(", "\"loading vocabulary file {}\"", ".", "format", "(", "vocab_file", ")", ")", "else", ":", "logger", ".", "info", "(", "\"loading vocabulary file {} from cache at {}\"", ".", "format", "(", "vocab_file", ",", "resolved_vocab_file", ")", ")", "if", "pretrained_model_name_or_path", "in", "PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP", ":", "# if we're using a pretrained model, ensure the tokenizer wont index sequences longer", "# than the number of positional embeddings", "max_len", "=", "PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP", "[", "pretrained_model_name_or_path", "]", "kwargs", "[", "'max_len'", "]", "=", "min", "(", "kwargs", ".", "get", "(", "'max_len'", ",", "int", "(", "1e12", ")", ")", ",", "max_len", ")", "# Instantiate tokenizer.", "tokenizer", "=", "cls", "(", "resolved_vocab_file", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", "return", "tokenizer" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BasicTokenizer.tokenize
Tokenizes a piece of text.
pytorch_pretrained_bert/tokenization.py
def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens
def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens
[ "Tokenizes", "a", "piece", "of", "text", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L215-L234
[ "def", "tokenize", "(", "self", ",", "text", ")", ":", "text", "=", "self", ".", "_clean_text", "(", "text", ")", "# This was added on November 1st, 2018 for the multilingual and Chinese", "# models. This is also applied to the English models now, but it doesn't", "# matter since the English models were not trained on any Chinese data", "# and generally don't have any Chinese data in them (there are Chinese", "# characters in the vocabulary because Wikipedia does have some Chinese", "# words in the English Wikipedia.).", "text", "=", "self", ".", "_tokenize_chinese_chars", "(", "text", ")", "orig_tokens", "=", "whitespace_tokenize", "(", "text", ")", "split_tokens", "=", "[", "]", "for", "token", "in", "orig_tokens", ":", "if", "self", ".", "do_lower_case", "and", "token", "not", "in", "self", ".", "never_split", ":", "token", "=", "token", ".", "lower", "(", ")", "token", "=", "self", ".", "_run_strip_accents", "(", "token", ")", "split_tokens", ".", "extend", "(", "self", ".", "_run_split_on_punc", "(", "token", ")", ")", "output_tokens", "=", "whitespace_tokenize", "(", "\" \"", ".", "join", "(", "split_tokens", ")", ")", "return", "output_tokens" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BasicTokenizer._run_strip_accents
Strips accents from a piece of text.
pytorch_pretrained_bert/tokenization.py
def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
[ "Strips", "accents", "from", "a", "piece", "of", "text", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L236-L245
[ "def", "_run_strip_accents", "(", "self", ",", "text", ")", ":", "text", "=", "unicodedata", ".", "normalize", "(", "\"NFD\"", ",", "text", ")", "output", "=", "[", "]", "for", "char", "in", "text", ":", "cat", "=", "unicodedata", ".", "category", "(", "char", ")", "if", "cat", "==", "\"Mn\"", ":", "continue", "output", ".", "append", "(", "char", ")", "return", "\"\"", ".", "join", "(", "output", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BasicTokenizer._tokenize_chinese_chars
Adds whitespace around any CJK character.
pytorch_pretrained_bert/tokenization.py
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
[ "Adds", "whitespace", "around", "any", "CJK", "character", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L269-L280
[ "def", "_tokenize_chinese_chars", "(", "self", ",", "text", ")", ":", "output", "=", "[", "]", "for", "char", "in", "text", ":", "cp", "=", "ord", "(", "char", ")", "if", "self", ".", "_is_chinese_char", "(", "cp", ")", ":", "output", ".", "append", "(", "\" \"", ")", "output", ".", "append", "(", "char", ")", "output", ".", "append", "(", "\" \"", ")", "else", ":", "output", ".", "append", "(", "char", ")", "return", "\"\"", ".", "join", "(", "output", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BasicTokenizer._is_chinese_char
Checks whether CP is the codepoint of a CJK character.
pytorch_pretrained_bert/tokenization.py
def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False
def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False
[ "Checks", "whether", "CP", "is", "the", "codepoint", "of", "a", "CJK", "character", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L282-L302
[ "def", "_is_chinese_char", "(", "self", ",", "cp", ")", ":", "# This defines a \"chinese character\" as anything in the CJK Unicode block:", "# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)", "#", "# Note that the CJK Unicode block is NOT all Japanese and Korean characters,", "# despite its name. The modern Korean Hangul alphabet is a different block,", "# as is Japanese Hiragana and Katakana. Those alphabets are used to write", "# space-separated words, so they are not treated specially and handled", "# like the all of the other languages.", "if", "(", "(", "cp", ">=", "0x4E00", "and", "cp", "<=", "0x9FFF", ")", "or", "#", "(", "cp", ">=", "0x3400", "and", "cp", "<=", "0x4DBF", ")", "or", "#", "(", "cp", ">=", "0x20000", "and", "cp", "<=", "0x2A6DF", ")", "or", "#", "(", "cp", ">=", "0x2A700", "and", "cp", "<=", "0x2B73F", ")", "or", "#", "(", "cp", ">=", "0x2B740", "and", "cp", "<=", "0x2B81F", ")", "or", "#", "(", "cp", ">=", "0x2B820", "and", "cp", "<=", "0x2CEAF", ")", "or", "(", "cp", ">=", "0xF900", "and", "cp", "<=", "0xFAFF", ")", "or", "#", "(", "cp", ">=", "0x2F800", "and", "cp", "<=", "0x2FA1F", ")", ")", ":", "#", "return", "True", "return", "False" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
WordpieceTokenizer.tokenize
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens.
pytorch_pretrained_bert/tokenization.py
def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
[ "Tokenizes", "a", "piece", "of", "text", "into", "its", "word", "pieces", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L326-L375
[ "def", "tokenize", "(", "self", ",", "text", ")", ":", "output_tokens", "=", "[", "]", "for", "token", "in", "whitespace_tokenize", "(", "text", ")", ":", "chars", "=", "list", "(", "token", ")", "if", "len", "(", "chars", ")", ">", "self", ".", "max_input_chars_per_word", ":", "output_tokens", ".", "append", "(", "self", ".", "unk_token", ")", "continue", "is_bad", "=", "False", "start", "=", "0", "sub_tokens", "=", "[", "]", "while", "start", "<", "len", "(", "chars", ")", ":", "end", "=", "len", "(", "chars", ")", "cur_substr", "=", "None", "while", "start", "<", "end", ":", "substr", "=", "\"\"", ".", "join", "(", "chars", "[", "start", ":", "end", "]", ")", "if", "start", ">", "0", ":", "substr", "=", "\"##\"", "+", "substr", "if", "substr", "in", "self", ".", "vocab", ":", "cur_substr", "=", "substr", "break", "end", "-=", "1", "if", "cur_substr", "is", "None", ":", "is_bad", "=", "True", "break", "sub_tokens", ".", "append", "(", "cur_substr", ")", "start", "=", "end", "if", "is_bad", ":", "output_tokens", ".", "append", "(", "self", ".", "unk_token", ")", "else", ":", "output_tokens", ".", "extend", "(", "sub_tokens", ")", "return", "output_tokens" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
load_rocstories_dataset
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
examples/run_openai_gpt.py
def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output
def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output
[ "Output", "a", "list", "of", "tuples", "(", "story", "1st", "continuation", "2nd", "continuation", "label", ")" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_openai_gpt.py#L56-L64
[ "def", "load_rocstories_dataset", "(", "dataset_path", ")", ":", "with", "open", "(", "dataset_path", ",", "encoding", "=", "'utf_8'", ")", "as", "f", ":", "f", "=", "csv", ".", "reader", "(", "f", ")", "output", "=", "[", "]", "next", "(", "f", ")", "# skip the first line", "for", "line", "in", "tqdm", "(", "f", ")", ":", "output", ".", "append", "(", "(", "' '", ".", "join", "(", "line", "[", "1", ":", "5", "]", ")", ",", "line", "[", "5", "]", ",", "line", "[", "6", "]", ",", "int", "(", "line", "[", "-", "1", "]", ")", "-", "1", ")", ")", "return", "output" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
pre_process_datasets
Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
examples/run_openai_gpt.py
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, :len(with_cont1)] = with_cont1 input_ids[i, 1, :len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:] lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:] mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, :len(with_cont1)] = with_cont1 input_ids[i, 1, :len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:] lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:] mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets
[ "Pre", "-", "process", "datasets", "containing", "lists", "of", "tuples", "(", "story", "1st", "continuation", "2nd", "continuation", "label", ")" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_openai_gpt.py#L66-L91
[ "def", "pre_process_datasets", "(", "encoded_datasets", ",", "input_len", ",", "cap_length", ",", "start_token", ",", "delimiter_token", ",", "clf_token", ")", ":", "tensor_datasets", "=", "[", "]", "for", "dataset", "in", "encoded_datasets", ":", "n_batch", "=", "len", "(", "dataset", ")", "input_ids", "=", "np", ".", "zeros", "(", "(", "n_batch", ",", "2", ",", "input_len", ")", ",", "dtype", "=", "np", ".", "int64", ")", "mc_token_ids", "=", "np", ".", "zeros", "(", "(", "n_batch", ",", "2", ")", ",", "dtype", "=", "np", ".", "int64", ")", "lm_labels", "=", "np", ".", "full", "(", "(", "n_batch", ",", "2", ",", "input_len", ")", ",", "fill_value", "=", "-", "1", ",", "dtype", "=", "np", ".", "int64", ")", "mc_labels", "=", "np", ".", "zeros", "(", "(", "n_batch", ",", ")", ",", "dtype", "=", "np", ".", "int64", ")", "for", "i", ",", "(", "story", ",", "cont1", ",", "cont2", ",", "mc_label", ")", ",", "in", "enumerate", "(", "dataset", ")", ":", "with_cont1", "=", "[", "start_token", "]", "+", "story", "[", ":", "cap_length", "]", "+", "[", "delimiter_token", "]", "+", "cont1", "[", ":", "cap_length", "]", "+", "[", "clf_token", "]", "with_cont2", "=", "[", "start_token", "]", "+", "story", "[", ":", "cap_length", "]", "+", "[", "delimiter_token", "]", "+", "cont2", "[", ":", "cap_length", "]", "+", "[", "clf_token", "]", "input_ids", "[", "i", ",", "0", ",", ":", "len", "(", "with_cont1", ")", "]", "=", "with_cont1", "input_ids", "[", "i", ",", "1", ",", ":", "len", "(", "with_cont2", ")", "]", "=", "with_cont2", "mc_token_ids", "[", "i", ",", "0", "]", "=", "len", "(", "with_cont1", ")", "-", "1", "mc_token_ids", "[", "i", ",", "1", "]", "=", "len", "(", "with_cont2", ")", "-", "1", "lm_labels", "[", "i", ",", "0", ",", ":", "len", "(", "with_cont1", ")", "-", "1", "]", "=", "with_cont1", "[", "1", ":", "]", "lm_labels", "[", "i", ",", "1", ",", ":", "len", "(", "with_cont2", ")", "-", "1", "]", "=", "with_cont2", "[", "1", ":", "]", "mc_labels", "[", "i", "]", "=", "mc_label", "all_inputs", "=", "(", "input_ids", ",", "mc_token_ids", ",", "lm_labels", ",", "mc_labels", ")", "tensor_datasets", ".", "append", "(", "tuple", "(", "torch", ".", "tensor", "(", "t", ")", "for", "t", "in", "all_inputs", ")", ")", "return", "tensor_datasets" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
random_word
Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :return: (list of str, list of int), masked tokens and related labels for LM prediction
examples/lm_finetuning/simple_lm_finetuning.py
def random_word(tokens, tokenizer): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :return: (list of str, list of int), masked tokens and related labels for LM prediction """ output_label = [] for i, token in enumerate(tokens): prob = random.random() # mask token with 15% probability if prob < 0.15: prob /= 0.15 # 80% randomly change token to mask token if prob < 0.8: tokens[i] = "[MASK]" # 10% randomly change token to random token elif prob < 0.9: tokens[i] = random.choice(list(tokenizer.vocab.items()))[0] # -> rest 10% randomly keep current token # append current token to output (we will predict these later) try: output_label.append(tokenizer.vocab[token]) except KeyError: # For unknown words (should not occur with BPE vocab) output_label.append(tokenizer.vocab["[UNK]"]) logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token)) else: # no masking token (will be ignored by loss function later) output_label.append(-1) return tokens, output_label
def random_word(tokens, tokenizer): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :return: (list of str, list of int), masked tokens and related labels for LM prediction """ output_label = [] for i, token in enumerate(tokens): prob = random.random() # mask token with 15% probability if prob < 0.15: prob /= 0.15 # 80% randomly change token to mask token if prob < 0.8: tokens[i] = "[MASK]" # 10% randomly change token to random token elif prob < 0.9: tokens[i] = random.choice(list(tokenizer.vocab.items()))[0] # -> rest 10% randomly keep current token # append current token to output (we will predict these later) try: output_label.append(tokenizer.vocab[token]) except KeyError: # For unknown words (should not occur with BPE vocab) output_label.append(tokenizer.vocab["[UNK]"]) logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token)) else: # no masking token (will be ignored by loss function later) output_label.append(-1) return tokens, output_label
[ "Masking", "some", "random", "tokens", "for", "Language", "Model", "task", "with", "probabilities", "as", "in", "the", "original", "BERT", "paper", ".", ":", "param", "tokens", ":", "list", "of", "str", "tokenized", "sentence", ".", ":", "param", "tokenizer", ":", "Tokenizer", "object", "used", "for", "tokenization", "(", "we", "need", "it", "s", "vocab", "here", ")", ":", "return", ":", "(", "list", "of", "str", "list", "of", "int", ")", "masked", "tokens", "and", "related", "labels", "for", "LM", "prediction" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L267-L303
[ "def", "random_word", "(", "tokens", ",", "tokenizer", ")", ":", "output_label", "=", "[", "]", "for", "i", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "prob", "=", "random", ".", "random", "(", ")", "# mask token with 15% probability", "if", "prob", "<", "0.15", ":", "prob", "/=", "0.15", "# 80% randomly change token to mask token", "if", "prob", "<", "0.8", ":", "tokens", "[", "i", "]", "=", "\"[MASK]\"", "# 10% randomly change token to random token", "elif", "prob", "<", "0.9", ":", "tokens", "[", "i", "]", "=", "random", ".", "choice", "(", "list", "(", "tokenizer", ".", "vocab", ".", "items", "(", ")", ")", ")", "[", "0", "]", "# -> rest 10% randomly keep current token", "# append current token to output (we will predict these later)", "try", ":", "output_label", ".", "append", "(", "tokenizer", ".", "vocab", "[", "token", "]", ")", "except", "KeyError", ":", "# For unknown words (should not occur with BPE vocab)", "output_label", ".", "append", "(", "tokenizer", ".", "vocab", "[", "\"[UNK]\"", "]", ")", "logger", ".", "warning", "(", "\"Cannot find token '{}' in vocab. Using [UNK] insetad\"", ".", "format", "(", "token", ")", ")", "else", ":", "# no masking token (will be ignored by loss function later)", "output_label", ".", "append", "(", "-", "1", ")", "return", "tokens", ",", "output_label" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
convert_example_to_features
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with IDs, LM labels, input_mask, CLS and SEP tokens etc. :param example: InputExample, containing sentence input as strings and is_next label :param max_seq_length: int, maximum length of sequence. :param tokenizer: Tokenizer :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
examples/lm_finetuning/simple_lm_finetuning.py
def convert_example_to_features(example, max_seq_length, tokenizer): """ Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with IDs, LM labels, input_mask, CLS and SEP tokens etc. :param example: InputExample, containing sentence input as strings and is_next label :param max_seq_length: int, maximum length of sequence. :param tokenizer: Tokenizer :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training) """ tokens_a = example.tokens_a tokens_b = example.tokens_b # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) tokens_a, t1_label = random_word(tokens_a, tokenizer) tokens_b, t2_label = random_word(tokens_b, tokenizer) # concatenate lm labels and account for CLS, SEP, SEP lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1]) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) assert len(tokens_b) > 0 for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) lm_label_ids.append(-1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(lm_label_ids) == max_seq_length if example.guid < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("LM label: %s " % (lm_label_ids)) logger.info("Is next sentence label: %s " % (example.is_next)) features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, is_next=example.is_next) return features
def convert_example_to_features(example, max_seq_length, tokenizer): """ Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with IDs, LM labels, input_mask, CLS and SEP tokens etc. :param example: InputExample, containing sentence input as strings and is_next label :param max_seq_length: int, maximum length of sequence. :param tokenizer: Tokenizer :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training) """ tokens_a = example.tokens_a tokens_b = example.tokens_b # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) tokens_a, t1_label = random_word(tokens_a, tokenizer) tokens_b, t2_label = random_word(tokens_b, tokenizer) # concatenate lm labels and account for CLS, SEP, SEP lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1]) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) assert len(tokens_b) > 0 for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) lm_label_ids.append(-1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(lm_label_ids) == max_seq_length if example.guid < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("LM label: %s " % (lm_label_ids)) logger.info("Is next sentence label: %s " % (example.is_next)) features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, is_next=example.is_next) return features
[ "Convert", "a", "raw", "sample", "(", "pair", "of", "sentences", "as", "tokenized", "strings", ")", "into", "a", "proper", "training", "sample", "with", "IDs", "LM", "labels", "input_mask", "CLS", "and", "SEP", "tokens", "etc", ".", ":", "param", "example", ":", "InputExample", "containing", "sentence", "input", "as", "strings", "and", "is_next", "label", ":", "param", "max_seq_length", ":", "int", "maximum", "length", "of", "sequence", ".", ":", "param", "tokenizer", ":", "Tokenizer", ":", "return", ":", "InputFeatures", "containing", "all", "inputs", "and", "labels", "of", "one", "sample", "as", "IDs", "(", "as", "used", "for", "model", "training", ")" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L306-L397
[ "def", "convert_example_to_features", "(", "example", ",", "max_seq_length", ",", "tokenizer", ")", ":", "tokens_a", "=", "example", ".", "tokens_a", "tokens_b", "=", "example", ".", "tokens_b", "# Modifies `tokens_a` and `tokens_b` in place so that the total", "# length is less than the specified length.", "# Account for [CLS], [SEP], [SEP] with \"- 3\"", "_truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "max_seq_length", "-", "3", ")", "tokens_a", ",", "t1_label", "=", "random_word", "(", "tokens_a", ",", "tokenizer", ")", "tokens_b", ",", "t2_label", "=", "random_word", "(", "tokens_b", ",", "tokenizer", ")", "# concatenate lm labels and account for CLS, SEP, SEP", "lm_label_ids", "=", "(", "[", "-", "1", "]", "+", "t1_label", "+", "[", "-", "1", "]", "+", "t2_label", "+", "[", "-", "1", "]", ")", "# The convention in BERT is:", "# (a) For sequence pairs:", "# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]", "# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1", "# (b) For single sequences:", "# tokens: [CLS] the dog is hairy . [SEP]", "# type_ids: 0 0 0 0 0 0 0", "#", "# Where \"type_ids\" are used to indicate whether this is the first", "# sequence or the second sequence. The embedding vectors for `type=0` and", "# `type=1` were learned during pre-training and are added to the wordpiece", "# embedding vector (and position vector). This is not *strictly* necessary", "# since the [SEP] token unambigiously separates the sequences, but it makes", "# it easier for the model to learn the concept of sequences.", "#", "# For classification tasks, the first vector (corresponding to [CLS]) is", "# used as as the \"sentence vector\". Note that this only makes sense because", "# the entire model is fine-tuned.", "tokens", "=", "[", "]", "segment_ids", "=", "[", "]", "tokens", ".", "append", "(", "\"[CLS]\"", ")", "segment_ids", ".", "append", "(", "0", ")", "for", "token", "in", "tokens_a", ":", "tokens", ".", "append", "(", "token", ")", "segment_ids", ".", "append", "(", "0", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "segment_ids", ".", "append", "(", "0", ")", "assert", "len", "(", "tokens_b", ")", ">", "0", "for", "token", "in", "tokens_b", ":", "tokens", ".", "append", "(", "token", ")", "segment_ids", ".", "append", "(", "1", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "segment_ids", ".", "append", "(", "1", ")", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "tokens", ")", "# The mask has 1 for real tokens and 0 for padding tokens. Only real", "# tokens are attended to.", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "# Zero-pad up to the sequence length.", "while", "len", "(", "input_ids", ")", "<", "max_seq_length", ":", "input_ids", ".", "append", "(", "0", ")", "input_mask", ".", "append", "(", "0", ")", "segment_ids", ".", "append", "(", "0", ")", "lm_label_ids", ".", "append", "(", "-", "1", ")", "assert", "len", "(", "input_ids", ")", "==", "max_seq_length", "assert", "len", "(", "input_mask", ")", "==", "max_seq_length", "assert", "len", "(", "segment_ids", ")", "==", "max_seq_length", "assert", "len", "(", "lm_label_ids", ")", "==", "max_seq_length", "if", "example", ".", "guid", "<", "5", ":", "logger", ".", "info", "(", "\"*** Example ***\"", ")", "logger", ".", "info", "(", "\"guid: %s\"", "%", "(", "example", ".", "guid", ")", ")", "logger", ".", "info", "(", "\"tokens: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "tokens", "]", ")", ")", "logger", ".", "info", "(", "\"input_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_ids", "]", ")", ")", "logger", ".", "info", "(", "\"input_mask: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_mask", "]", ")", ")", "logger", ".", "info", "(", "\"segment_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "segment_ids", "]", ")", ")", "logger", ".", "info", "(", "\"LM label: %s \"", "%", "(", "lm_label_ids", ")", ")", "logger", ".", "info", "(", "\"Is next sentence label: %s \"", "%", "(", "example", ".", "is_next", ")", ")", "features", "=", "InputFeatures", "(", "input_ids", "=", "input_ids", ",", "input_mask", "=", "input_mask", ",", "segment_ids", "=", "segment_ids", ",", "lm_label_ids", "=", "lm_label_ids", ",", "is_next", "=", "example", ".", "is_next", ")", "return", "features" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BERTDataset.random_sent
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences from one doc. With 50% the second sentence will be a random one from another doc. :param index: int, index of sample. :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
examples/lm_finetuning/simple_lm_finetuning.py
def random_sent(self, index): """ Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences from one doc. With 50% the second sentence will be a random one from another doc. :param index: int, index of sample. :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label """ t1, t2 = self.get_corpus_line(index) if random.random() > 0.5: label = 0 else: t2 = self.get_random_line() label = 1 assert len(t1) > 0 assert len(t2) > 0 return t1, t2, label
def random_sent(self, index): """ Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences from one doc. With 50% the second sentence will be a random one from another doc. :param index: int, index of sample. :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label """ t1, t2 = self.get_corpus_line(index) if random.random() > 0.5: label = 0 else: t2 = self.get_random_line() label = 1 assert len(t1) > 0 assert len(t2) > 0 return t1, t2, label
[ "Get", "one", "sample", "from", "corpus", "consisting", "of", "two", "sentences", ".", "With", "prob", ".", "50%", "these", "are", "two", "subsequent", "sentences", "from", "one", "doc", ".", "With", "50%", "the", "second", "sentence", "will", "be", "a", "random", "one", "from", "another", "doc", ".", ":", "param", "index", ":", "int", "index", "of", "sample", ".", ":", "return", ":", "(", "str", "str", "int", ")", "sentence", "1", "sentence", "2", "isNextSentence", "Label" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L141-L157
[ "def", "random_sent", "(", "self", ",", "index", ")", ":", "t1", ",", "t2", "=", "self", ".", "get_corpus_line", "(", "index", ")", "if", "random", ".", "random", "(", ")", ">", "0.5", ":", "label", "=", "0", "else", ":", "t2", "=", "self", ".", "get_random_line", "(", ")", "label", "=", "1", "assert", "len", "(", "t1", ")", ">", "0", "assert", "len", "(", "t2", ")", ">", "0", "return", "t1", ",", "t2", ",", "label" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BERTDataset.get_corpus_line
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc. :param item: int, index of sample. :return: (str, str), two subsequent sentences from corpus
examples/lm_finetuning/simple_lm_finetuning.py
def get_corpus_line(self, item): """ Get one sample from corpus consisting of a pair of two subsequent lines from the same doc. :param item: int, index of sample. :return: (str, str), two subsequent sentences from corpus """ t1 = "" t2 = "" assert item < self.corpus_lines if self.on_memory: sample = self.sample_to_doc[item] t1 = self.all_docs[sample["doc_id"]][sample["line"]] t2 = self.all_docs[sample["doc_id"]][sample["line"]+1] # used later to avoid random nextSentence from same doc self.current_doc = sample["doc_id"] return t1, t2 else: if self.line_buffer is None: # read first non-empty line of file while t1 == "" : t1 = next(self.file).strip() t2 = next(self.file).strip() else: # use t2 from previous iteration as new t1 t1 = self.line_buffer t2 = next(self.file).strip() # skip empty rows that are used for separating documents and keep track of current doc id while t2 == "" or t1 == "": t1 = next(self.file).strip() t2 = next(self.file).strip() self.current_doc = self.current_doc+1 self.line_buffer = t2 assert t1 != "" assert t2 != "" return t1, t2
def get_corpus_line(self, item): """ Get one sample from corpus consisting of a pair of two subsequent lines from the same doc. :param item: int, index of sample. :return: (str, str), two subsequent sentences from corpus """ t1 = "" t2 = "" assert item < self.corpus_lines if self.on_memory: sample = self.sample_to_doc[item] t1 = self.all_docs[sample["doc_id"]][sample["line"]] t2 = self.all_docs[sample["doc_id"]][sample["line"]+1] # used later to avoid random nextSentence from same doc self.current_doc = sample["doc_id"] return t1, t2 else: if self.line_buffer is None: # read first non-empty line of file while t1 == "" : t1 = next(self.file).strip() t2 = next(self.file).strip() else: # use t2 from previous iteration as new t1 t1 = self.line_buffer t2 = next(self.file).strip() # skip empty rows that are used for separating documents and keep track of current doc id while t2 == "" or t1 == "": t1 = next(self.file).strip() t2 = next(self.file).strip() self.current_doc = self.current_doc+1 self.line_buffer = t2 assert t1 != "" assert t2 != "" return t1, t2
[ "Get", "one", "sample", "from", "corpus", "consisting", "of", "a", "pair", "of", "two", "subsequent", "lines", "from", "the", "same", "doc", ".", ":", "param", "item", ":", "int", "index", "of", "sample", ".", ":", "return", ":", "(", "str", "str", ")", "two", "subsequent", "sentences", "from", "corpus" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L159-L194
[ "def", "get_corpus_line", "(", "self", ",", "item", ")", ":", "t1", "=", "\"\"", "t2", "=", "\"\"", "assert", "item", "<", "self", ".", "corpus_lines", "if", "self", ".", "on_memory", ":", "sample", "=", "self", ".", "sample_to_doc", "[", "item", "]", "t1", "=", "self", ".", "all_docs", "[", "sample", "[", "\"doc_id\"", "]", "]", "[", "sample", "[", "\"line\"", "]", "]", "t2", "=", "self", ".", "all_docs", "[", "sample", "[", "\"doc_id\"", "]", "]", "[", "sample", "[", "\"line\"", "]", "+", "1", "]", "# used later to avoid random nextSentence from same doc", "self", ".", "current_doc", "=", "sample", "[", "\"doc_id\"", "]", "return", "t1", ",", "t2", "else", ":", "if", "self", ".", "line_buffer", "is", "None", ":", "# read first non-empty line of file", "while", "t1", "==", "\"\"", ":", "t1", "=", "next", "(", "self", ".", "file", ")", ".", "strip", "(", ")", "t2", "=", "next", "(", "self", ".", "file", ")", ".", "strip", "(", ")", "else", ":", "# use t2 from previous iteration as new t1", "t1", "=", "self", ".", "line_buffer", "t2", "=", "next", "(", "self", ".", "file", ")", ".", "strip", "(", ")", "# skip empty rows that are used for separating documents and keep track of current doc id", "while", "t2", "==", "\"\"", "or", "t1", "==", "\"\"", ":", "t1", "=", "next", "(", "self", ".", "file", ")", ".", "strip", "(", ")", "t2", "=", "next", "(", "self", ".", "file", ")", ".", "strip", "(", ")", "self", ".", "current_doc", "=", "self", ".", "current_doc", "+", "1", "self", ".", "line_buffer", "=", "t2", "assert", "t1", "!=", "\"\"", "assert", "t2", "!=", "\"\"", "return", "t1", ",", "t2" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BERTDataset.get_random_line
Get random line from another document for nextSentence task. :return: str, content of one line
examples/lm_finetuning/simple_lm_finetuning.py
def get_random_line(self): """ Get random line from another document for nextSentence task. :return: str, content of one line """ # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document we're processing. for _ in range(10): if self.on_memory: rand_doc_idx = random.randint(0, len(self.all_docs)-1) rand_doc = self.all_docs[rand_doc_idx] line = rand_doc[random.randrange(len(rand_doc))] else: rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000) #pick random line for _ in range(rand_index): line = self.get_next_line() #check if our picked random line is really from another doc like we want it to be if self.current_random_doc != self.current_doc: break return line
def get_random_line(self): """ Get random line from another document for nextSentence task. :return: str, content of one line """ # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document we're processing. for _ in range(10): if self.on_memory: rand_doc_idx = random.randint(0, len(self.all_docs)-1) rand_doc = self.all_docs[rand_doc_idx] line = rand_doc[random.randrange(len(rand_doc))] else: rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000) #pick random line for _ in range(rand_index): line = self.get_next_line() #check if our picked random line is really from another doc like we want it to be if self.current_random_doc != self.current_doc: break return line
[ "Get", "random", "line", "from", "another", "document", "for", "nextSentence", "task", ".", ":", "return", ":", "str", "content", "of", "one", "line" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L196-L217
[ "def", "get_random_line", "(", "self", ")", ":", "# Similar to original tf repo: This outer loop should rarely go for more than one iteration for large", "# corpora. However, just to be careful, we try to make sure that", "# the random document is not the same as the document we're processing.", "for", "_", "in", "range", "(", "10", ")", ":", "if", "self", ".", "on_memory", ":", "rand_doc_idx", "=", "random", ".", "randint", "(", "0", ",", "len", "(", "self", ".", "all_docs", ")", "-", "1", ")", "rand_doc", "=", "self", ".", "all_docs", "[", "rand_doc_idx", "]", "line", "=", "rand_doc", "[", "random", ".", "randrange", "(", "len", "(", "rand_doc", ")", ")", "]", "else", ":", "rand_index", "=", "random", ".", "randint", "(", "1", ",", "self", ".", "corpus_lines", "if", "self", ".", "corpus_lines", "<", "1000", "else", "1000", ")", "#pick random line", "for", "_", "in", "range", "(", "rand_index", ")", ":", "line", "=", "self", ".", "get_next_line", "(", ")", "#check if our picked random line is really from another doc like we want it to be", "if", "self", ".", "current_random_doc", "!=", "self", ".", "current_doc", ":", "break", "return", "line" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BERTDataset.get_next_line
Gets next line of random_file and starts over when reaching end of file
examples/lm_finetuning/simple_lm_finetuning.py
def get_next_line(self): """ Gets next line of random_file and starts over when reaching end of file""" try: line = next(self.random_file).strip() #keep track of which document we are currently looking at to later avoid having the same doc as t1 if line == "": self.current_random_doc = self.current_random_doc + 1 line = next(self.random_file).strip() except StopIteration: self.random_file.close() self.random_file = open(self.corpus_path, "r", encoding=self.encoding) line = next(self.random_file).strip() return line
def get_next_line(self): """ Gets next line of random_file and starts over when reaching end of file""" try: line = next(self.random_file).strip() #keep track of which document we are currently looking at to later avoid having the same doc as t1 if line == "": self.current_random_doc = self.current_random_doc + 1 line = next(self.random_file).strip() except StopIteration: self.random_file.close() self.random_file = open(self.corpus_path, "r", encoding=self.encoding) line = next(self.random_file).strip() return line
[ "Gets", "next", "line", "of", "random_file", "and", "starts", "over", "when", "reaching", "end", "of", "file" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L219-L231
[ "def", "get_next_line", "(", "self", ")", ":", "try", ":", "line", "=", "next", "(", "self", ".", "random_file", ")", ".", "strip", "(", ")", "#keep track of which document we are currently looking at to later avoid having the same doc as t1", "if", "line", "==", "\"\"", ":", "self", ".", "current_random_doc", "=", "self", ".", "current_random_doc", "+", "1", "line", "=", "next", "(", "self", ".", "random_file", ")", ".", "strip", "(", ")", "except", "StopIteration", ":", "self", ".", "random_file", ".", "close", "(", ")", "self", ".", "random_file", "=", "open", "(", "self", ".", "corpus_path", ",", "\"r\"", ",", "encoding", "=", "self", ".", "encoding", ")", "line", "=", "next", "(", "self", ".", "random_file", ")", ".", "strip", "(", ")", "return", "line" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
create_masked_lm_predictions
Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables.
examples/lm_finetuning/pregenerate_training_data.py
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list): """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables.""" cand_indices = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indices.append(i) num_to_mask = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) shuffle(cand_indices) mask_indices = sorted(sample(cand_indices, num_to_mask)) masked_token_labels = [] for index in mask_indices: # 80% of the time, replace with [MASK] if random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = choice(vocab_list) masked_token_labels.append(tokens[index]) # Once we've saved the true label for that token, we can overwrite it with the masked version tokens[index] = masked_token return tokens, mask_indices, masked_token_labels
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list): """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables.""" cand_indices = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indices.append(i) num_to_mask = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) shuffle(cand_indices) mask_indices = sorted(sample(cand_indices, num_to_mask)) masked_token_labels = [] for index in mask_indices: # 80% of the time, replace with [MASK] if random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = choice(vocab_list) masked_token_labels.append(tokens[index]) # Once we've saved the true label for that token, we can overwrite it with the masked version tokens[index] = masked_token return tokens, mask_indices, masked_token_labels
[ "Creates", "the", "predictions", "for", "the", "masked", "LM", "objective", ".", "This", "is", "mostly", "copied", "from", "the", "Google", "BERT", "repo", "but", "with", "several", "refactors", "to", "clean", "it", "up", "and", "remove", "a", "lot", "of", "unnecessary", "variables", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/pregenerate_training_data.py#L102-L131
[ "def", "create_masked_lm_predictions", "(", "tokens", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_list", ")", ":", "cand_indices", "=", "[", "]", "for", "(", "i", ",", "token", ")", "in", "enumerate", "(", "tokens", ")", ":", "if", "token", "==", "\"[CLS]\"", "or", "token", "==", "\"[SEP]\"", ":", "continue", "cand_indices", ".", "append", "(", "i", ")", "num_to_mask", "=", "min", "(", "max_predictions_per_seq", ",", "max", "(", "1", ",", "int", "(", "round", "(", "len", "(", "tokens", ")", "*", "masked_lm_prob", ")", ")", ")", ")", "shuffle", "(", "cand_indices", ")", "mask_indices", "=", "sorted", "(", "sample", "(", "cand_indices", ",", "num_to_mask", ")", ")", "masked_token_labels", "=", "[", "]", "for", "index", "in", "mask_indices", ":", "# 80% of the time, replace with [MASK]", "if", "random", "(", ")", "<", "0.8", ":", "masked_token", "=", "\"[MASK]\"", "else", ":", "# 10% of the time, keep original", "if", "random", "(", ")", "<", "0.5", ":", "masked_token", "=", "tokens", "[", "index", "]", "# 10% of the time, replace with random word", "else", ":", "masked_token", "=", "choice", "(", "vocab_list", ")", "masked_token_labels", ".", "append", "(", "tokens", "[", "index", "]", ")", "# Once we've saved the true label for that token, we can overwrite it with the masked version", "tokens", "[", "index", "]", "=", "masked_token", "return", "tokens", ",", "mask_indices", ",", "masked_token_labels" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
create_instances_from_document
This code is mostly a duplicate of the equivalent function from Google BERT's repo. However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function. Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task.
examples/lm_finetuning/pregenerate_training_data.py
def create_instances_from_document( doc_database, doc_idx, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_list): """This code is mostly a duplicate of the equivalent function from Google BERT's repo. However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function. Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task.""" document = doc_database[doc_idx] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if random() < short_seq_prob: target_seq_length = randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = randrange(1, len(current_chunk)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next if len(current_chunk) == 1 or random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # Sample a random document, with longer docs being sampled more frequently random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True) random_start = randrange(0, len(random_document)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"] # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP] # They are 1 for the B tokens and the final [SEP] segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)] tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_list) instance = { "tokens": tokens, "segment_ids": segment_ids, "is_random_next": is_random_next, "masked_lm_positions": masked_lm_positions, "masked_lm_labels": masked_lm_labels} instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
def create_instances_from_document( doc_database, doc_idx, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_list): """This code is mostly a duplicate of the equivalent function from Google BERT's repo. However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function. Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task.""" document = doc_database[doc_idx] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if random() < short_seq_prob: target_seq_length = randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = randrange(1, len(current_chunk)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next if len(current_chunk) == 1 or random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # Sample a random document, with longer docs being sampled more frequently random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True) random_start = randrange(0, len(random_document)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"] # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP] # They are 1 for the B tokens and the final [SEP] segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)] tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_list) instance = { "tokens": tokens, "segment_ids": segment_ids, "is_random_next": is_random_next, "masked_lm_positions": masked_lm_positions, "masked_lm_labels": masked_lm_labels} instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
[ "This", "code", "is", "mostly", "a", "duplicate", "of", "the", "equivalent", "function", "from", "Google", "BERT", "s", "repo", ".", "However", "we", "make", "some", "changes", "and", "improvements", ".", "Sampling", "is", "improved", "and", "no", "longer", "requires", "a", "loop", "in", "this", "function", ".", "Also", "documents", "are", "sampled", "proportionally", "to", "the", "number", "of", "sentences", "they", "contain", "which", "means", "each", "sentence", "(", "rather", "than", "each", "document", ")", "has", "an", "equal", "chance", "of", "being", "sampled", "as", "a", "false", "example", "for", "the", "NextSentence", "task", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/pregenerate_training_data.py#L134-L229
[ "def", "create_instances_from_document", "(", "doc_database", ",", "doc_idx", ",", "max_seq_length", ",", "short_seq_prob", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_list", ")", ":", "document", "=", "doc_database", "[", "doc_idx", "]", "# Account for [CLS], [SEP], [SEP]", "max_num_tokens", "=", "max_seq_length", "-", "3", "# We *usually* want to fill up the entire sequence since we are padding", "# to `max_seq_length` anyways, so short sequences are generally wasted", "# computation. However, we *sometimes*", "# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter", "# sequences to minimize the mismatch between pre-training and fine-tuning.", "# The `target_seq_length` is just a rough target however, whereas", "# `max_seq_length` is a hard limit.", "target_seq_length", "=", "max_num_tokens", "if", "random", "(", ")", "<", "short_seq_prob", ":", "target_seq_length", "=", "randint", "(", "2", ",", "max_num_tokens", ")", "# We DON'T just concatenate all of the tokens from a document into a long", "# sequence and choose an arbitrary split point because this would make the", "# next sentence prediction task too easy. Instead, we split the input into", "# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user", "# input.", "instances", "=", "[", "]", "current_chunk", "=", "[", "]", "current_length", "=", "0", "i", "=", "0", "while", "i", "<", "len", "(", "document", ")", ":", "segment", "=", "document", "[", "i", "]", "current_chunk", ".", "append", "(", "segment", ")", "current_length", "+=", "len", "(", "segment", ")", "if", "i", "==", "len", "(", "document", ")", "-", "1", "or", "current_length", ">=", "target_seq_length", ":", "if", "current_chunk", ":", "# `a_end` is how many segments from `current_chunk` go into the `A`", "# (first) sentence.", "a_end", "=", "1", "if", "len", "(", "current_chunk", ")", ">=", "2", ":", "a_end", "=", "randrange", "(", "1", ",", "len", "(", "current_chunk", ")", ")", "tokens_a", "=", "[", "]", "for", "j", "in", "range", "(", "a_end", ")", ":", "tokens_a", ".", "extend", "(", "current_chunk", "[", "j", "]", ")", "tokens_b", "=", "[", "]", "# Random next", "if", "len", "(", "current_chunk", ")", "==", "1", "or", "random", "(", ")", "<", "0.5", ":", "is_random_next", "=", "True", "target_b_length", "=", "target_seq_length", "-", "len", "(", "tokens_a", ")", "# Sample a random document, with longer docs being sampled more frequently", "random_document", "=", "doc_database", ".", "sample_doc", "(", "current_idx", "=", "doc_idx", ",", "sentence_weighted", "=", "True", ")", "random_start", "=", "randrange", "(", "0", ",", "len", "(", "random_document", ")", ")", "for", "j", "in", "range", "(", "random_start", ",", "len", "(", "random_document", ")", ")", ":", "tokens_b", ".", "extend", "(", "random_document", "[", "j", "]", ")", "if", "len", "(", "tokens_b", ")", ">=", "target_b_length", ":", "break", "# We didn't actually use these segments so we \"put them back\" so", "# they don't go to waste.", "num_unused_segments", "=", "len", "(", "current_chunk", ")", "-", "a_end", "i", "-=", "num_unused_segments", "# Actual next", "else", ":", "is_random_next", "=", "False", "for", "j", "in", "range", "(", "a_end", ",", "len", "(", "current_chunk", ")", ")", ":", "tokens_b", ".", "extend", "(", "current_chunk", "[", "j", "]", ")", "truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "max_num_tokens", ")", "assert", "len", "(", "tokens_a", ")", ">=", "1", "assert", "len", "(", "tokens_b", ")", ">=", "1", "tokens", "=", "[", "\"[CLS]\"", "]", "+", "tokens_a", "+", "[", "\"[SEP]\"", "]", "+", "tokens_b", "+", "[", "\"[SEP]\"", "]", "# The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP]", "# They are 1 for the B tokens and the final [SEP]", "segment_ids", "=", "[", "0", "for", "_", "in", "range", "(", "len", "(", "tokens_a", ")", "+", "2", ")", "]", "+", "[", "1", "for", "_", "in", "range", "(", "len", "(", "tokens_b", ")", "+", "1", ")", "]", "tokens", ",", "masked_lm_positions", ",", "masked_lm_labels", "=", "create_masked_lm_predictions", "(", "tokens", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_list", ")", "instance", "=", "{", "\"tokens\"", ":", "tokens", ",", "\"segment_ids\"", ":", "segment_ids", ",", "\"is_random_next\"", ":", "is_random_next", ",", "\"masked_lm_positions\"", ":", "masked_lm_positions", ",", "\"masked_lm_labels\"", ":", "masked_lm_labels", "}", "instances", ".", "append", "(", "instance", ")", "current_chunk", "=", "[", "]", "current_length", "=", "0", "i", "+=", "1", "return", "instances" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
sample_logits
embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample]
pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
def sample_logits(embedding, bias, labels, inputs, sampler): """ embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] """ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels) n_sample = neg_samples.size(0) b1, b2 = labels.size(0), labels.size(1) all_ids = torch.cat([labels.view(-1), neg_samples]) all_w = embedding(all_ids) true_w = all_w[: -n_sample].view(b1, b2, -1) sample_w = all_w[- n_sample:].view(n_sample, -1) all_b = bias[all_ids] true_b = all_b[: -n_sample].view(b1, b2) sample_b = all_b[- n_sample:] hit = (labels[:, :, None] == neg_samples).detach() true_logits = torch.einsum('ijk,ijk->ij', [true_w, inputs]) + true_b - true_log_probs sample_logits = torch.einsum('lk,ijk->ijl', [sample_w, inputs]) + sample_b - samp_log_probs sample_logits.masked_fill_(hit, -1e30) logits = torch.cat([true_logits[:, :, None], sample_logits], -1) return logits
def sample_logits(embedding, bias, labels, inputs, sampler): """ embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] """ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels) n_sample = neg_samples.size(0) b1, b2 = labels.size(0), labels.size(1) all_ids = torch.cat([labels.view(-1), neg_samples]) all_w = embedding(all_ids) true_w = all_w[: -n_sample].view(b1, b2, -1) sample_w = all_w[- n_sample:].view(n_sample, -1) all_b = bias[all_ids] true_b = all_b[: -n_sample].view(b1, b2) sample_b = all_b[- n_sample:] hit = (labels[:, :, None] == neg_samples).detach() true_logits = torch.einsum('ijk,ijk->ij', [true_w, inputs]) + true_b - true_log_probs sample_logits = torch.einsum('lk,ijk->ijl', [sample_w, inputs]) + sample_b - samp_log_probs sample_logits.masked_fill_(hit, -1e30) logits = torch.cat([true_logits[:, :, None], sample_logits], -1) return logits
[ "embedding", ":", "an", "nn", ".", "Embedding", "layer", "bias", ":", "[", "n_vocab", "]", "labels", ":", "[", "b1", "b2", "]", "inputs", ":", "[", "b1", "b2", "n_emb", "]", "sampler", ":", "you", "may", "use", "a", "LogUniformSampler", "Return", "logits", ":", "[", "b1", "b2", "1", "+", "n_sample", "]" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py#L302-L333
[ "def", "sample_logits", "(", "embedding", ",", "bias", ",", "labels", ",", "inputs", ",", "sampler", ")", ":", "true_log_probs", ",", "samp_log_probs", ",", "neg_samples", "=", "sampler", ".", "sample", "(", "labels", ")", "n_sample", "=", "neg_samples", ".", "size", "(", "0", ")", "b1", ",", "b2", "=", "labels", ".", "size", "(", "0", ")", ",", "labels", ".", "size", "(", "1", ")", "all_ids", "=", "torch", ".", "cat", "(", "[", "labels", ".", "view", "(", "-", "1", ")", ",", "neg_samples", "]", ")", "all_w", "=", "embedding", "(", "all_ids", ")", "true_w", "=", "all_w", "[", ":", "-", "n_sample", "]", ".", "view", "(", "b1", ",", "b2", ",", "-", "1", ")", "sample_w", "=", "all_w", "[", "-", "n_sample", ":", "]", ".", "view", "(", "n_sample", ",", "-", "1", ")", "all_b", "=", "bias", "[", "all_ids", "]", "true_b", "=", "all_b", "[", ":", "-", "n_sample", "]", ".", "view", "(", "b1", ",", "b2", ")", "sample_b", "=", "all_b", "[", "-", "n_sample", ":", "]", "hit", "=", "(", "labels", "[", ":", ",", ":", ",", "None", "]", "==", "neg_samples", ")", ".", "detach", "(", ")", "true_logits", "=", "torch", ".", "einsum", "(", "'ijk,ijk->ij'", ",", "[", "true_w", ",", "inputs", "]", ")", "+", "true_b", "-", "true_log_probs", "sample_logits", "=", "torch", ".", "einsum", "(", "'lk,ijk->ijl'", ",", "[", "sample_w", ",", "inputs", "]", ")", "+", "sample_b", "-", "samp_log_probs", "sample_logits", ".", "masked_fill_", "(", "hit", ",", "-", "1e30", ")", "logits", "=", "torch", ".", "cat", "(", "[", "true_logits", "[", ":", ",", ":", ",", "None", "]", ",", "sample_logits", "]", ",", "-", "1", ")", "return", "logits" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
ProjectedAdaptiveLogSoftmax.forward
Params: hidden :: [len*bsz x d_proj] target :: [len*bsz] Return: if target is None: out :: [len*bsz] Negative log likelihood else: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary We could replace this implementation by the native PyTorch one if their's had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
def forward(self, hidden, target=None, keep_order=False): ''' Params: hidden :: [len*bsz x d_proj] target :: [len*bsz] Return: if target is None: out :: [len*bsz] Negative log likelihood else: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary We could replace this implementation by the native PyTorch one if their's had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 ''' if target is not None: target = target.view(-1) if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if target is not None: output = -F.log_softmax(logit, dim=-1) \ .gather(1, target.unsqueeze(1)).squeeze(1) else: output = F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if target is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if target is not None: mask_i = (target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if target is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster if target is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if target is not None: if (hasattr(self, 'keep_order') and self.keep_order) or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out
def forward(self, hidden, target=None, keep_order=False): ''' Params: hidden :: [len*bsz x d_proj] target :: [len*bsz] Return: if target is None: out :: [len*bsz] Negative log likelihood else: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary We could replace this implementation by the native PyTorch one if their's had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 ''' if target is not None: target = target.view(-1) if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if target is not None: output = -F.log_softmax(logit, dim=-1) \ .gather(1, target.unsqueeze(1)).squeeze(1) else: output = F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if target is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if target is not None: mask_i = (target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if target is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster if target is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if target is not None: if (hasattr(self, 'keep_order') and self.keep_order) or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out
[ "Params", ":", "hidden", "::", "[", "len", "*", "bsz", "x", "d_proj", "]", "target", "::", "[", "len", "*", "bsz", "]", "Return", ":", "if", "target", "is", "None", ":", "out", "::", "[", "len", "*", "bsz", "]", "Negative", "log", "likelihood", "else", ":", "out", "::", "[", "len", "*", "bsz", "x", "n_tokens", "]", "log", "probabilities", "of", "tokens", "over", "the", "vocabulary", "We", "could", "replace", "this", "implementation", "by", "the", "native", "PyTorch", "one", "if", "their", "s", "had", "an", "option", "to", "set", "bias", "on", "all", "clusters", "in", "the", "native", "one", ".", "here", ":", "https", ":", "//", "github", ".", "com", "/", "pytorch", "/", "pytorch", "/", "blob", "/", "dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da", "/", "torch", "/", "nn", "/", "modules", "/", "adaptive", ".", "py#L138" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py#L92-L195
[ "def", "forward", "(", "self", ",", "hidden", ",", "target", "=", "None", ",", "keep_order", "=", "False", ")", ":", "if", "target", "is", "not", "None", ":", "target", "=", "target", ".", "view", "(", "-", "1", ")", "if", "hidden", ".", "size", "(", "0", ")", "!=", "target", ".", "size", "(", "0", ")", ":", "raise", "RuntimeError", "(", "'Input and target should have the same size '", "'in the batch dimension.'", ")", "if", "self", ".", "n_clusters", "==", "0", ":", "logit", "=", "self", ".", "_compute_logit", "(", "hidden", ",", "self", ".", "out_layers", "[", "0", "]", ".", "weight", ",", "self", ".", "out_layers", "[", "0", "]", ".", "bias", ",", "self", ".", "out_projs", "[", "0", "]", ")", "if", "target", "is", "not", "None", ":", "output", "=", "-", "F", ".", "log_softmax", "(", "logit", ",", "dim", "=", "-", "1", ")", ".", "gather", "(", "1", ",", "target", ".", "unsqueeze", "(", "1", ")", ")", ".", "squeeze", "(", "1", ")", "else", ":", "output", "=", "F", ".", "log_softmax", "(", "logit", ",", "dim", "=", "-", "1", ")", "else", ":", "# construct weights and biases", "weights", ",", "biases", "=", "[", "]", ",", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "cutoffs", ")", ")", ":", "if", "self", ".", "div_val", "==", "1", ":", "l_idx", ",", "r_idx", "=", "self", ".", "cutoff_ends", "[", "i", "]", ",", "self", ".", "cutoff_ends", "[", "i", "+", "1", "]", "weight_i", "=", "self", ".", "out_layers", "[", "0", "]", ".", "weight", "[", "l_idx", ":", "r_idx", "]", "bias_i", "=", "self", ".", "out_layers", "[", "0", "]", ".", "bias", "[", "l_idx", ":", "r_idx", "]", "else", ":", "weight_i", "=", "self", ".", "out_layers", "[", "i", "]", ".", "weight", "bias_i", "=", "self", ".", "out_layers", "[", "i", "]", ".", "bias", "if", "i", "==", "0", ":", "weight_i", "=", "torch", ".", "cat", "(", "[", "weight_i", ",", "self", ".", "cluster_weight", "]", ",", "dim", "=", "0", ")", "bias_i", "=", "torch", ".", "cat", "(", "[", "bias_i", ",", "self", ".", "cluster_bias", "]", ",", "dim", "=", "0", ")", "weights", ".", "append", "(", "weight_i", ")", "biases", ".", "append", "(", "bias_i", ")", "head_weight", ",", "head_bias", ",", "head_proj", "=", "weights", "[", "0", "]", ",", "biases", "[", "0", "]", ",", "self", ".", "out_projs", "[", "0", "]", "head_logit", "=", "self", ".", "_compute_logit", "(", "hidden", ",", "head_weight", ",", "head_bias", ",", "head_proj", ")", "head_logprob", "=", "F", ".", "log_softmax", "(", "head_logit", ",", "dim", "=", "1", ")", "if", "target", "is", "None", ":", "out", "=", "hidden", ".", "new_empty", "(", "(", "head_logit", ".", "size", "(", "0", ")", ",", "self", ".", "n_token", ")", ")", "else", ":", "out", "=", "torch", ".", "zeros_like", "(", "target", ",", "dtype", "=", "hidden", ".", "dtype", ",", "device", "=", "hidden", ".", "device", ")", "offset", "=", "0", "cutoff_values", "=", "[", "0", "]", "+", "self", ".", "cutoffs", "for", "i", "in", "range", "(", "len", "(", "cutoff_values", ")", "-", "1", ")", ":", "l_idx", ",", "r_idx", "=", "cutoff_values", "[", "i", "]", ",", "cutoff_values", "[", "i", "+", "1", "]", "if", "target", "is", "not", "None", ":", "mask_i", "=", "(", "target", ">=", "l_idx", ")", "&", "(", "target", "<", "r_idx", ")", "indices_i", "=", "mask_i", ".", "nonzero", "(", ")", ".", "squeeze", "(", ")", "if", "indices_i", ".", "numel", "(", ")", "==", "0", ":", "continue", "target_i", "=", "target", ".", "index_select", "(", "0", ",", "indices_i", ")", "-", "l_idx", "head_logprob_i", "=", "head_logprob", ".", "index_select", "(", "0", ",", "indices_i", ")", "hidden_i", "=", "hidden", ".", "index_select", "(", "0", ",", "indices_i", ")", "else", ":", "hidden_i", "=", "hidden", "if", "i", "==", "0", ":", "if", "target", "is", "not", "None", ":", "logprob_i", "=", "head_logprob_i", ".", "gather", "(", "1", ",", "target_i", "[", ":", ",", "None", "]", ")", ".", "squeeze", "(", "1", ")", "else", ":", "out", "[", ":", ",", ":", "self", ".", "cutoffs", "[", "0", "]", "]", "=", "head_logprob", "[", ":", ",", ":", "self", ".", "cutoffs", "[", "0", "]", "]", "else", ":", "weight_i", ",", "bias_i", ",", "proj_i", "=", "weights", "[", "i", "]", ",", "biases", "[", "i", "]", ",", "self", ".", "out_projs", "[", "i", "]", "tail_logit_i", "=", "self", ".", "_compute_logit", "(", "hidden_i", ",", "weight_i", ",", "bias_i", ",", "proj_i", ")", "tail_logprob_i", "=", "F", ".", "log_softmax", "(", "tail_logit_i", ",", "dim", "=", "1", ")", "cluster_prob_idx", "=", "self", ".", "cutoffs", "[", "0", "]", "+", "i", "-", "1", "# No probability for the head cluster", "if", "target", "is", "not", "None", ":", "logprob_i", "=", "head_logprob_i", "[", ":", ",", "cluster_prob_idx", "]", "+", "tail_logprob_i", ".", "gather", "(", "1", ",", "target_i", "[", ":", ",", "None", "]", ")", ".", "squeeze", "(", "1", ")", "else", ":", "logprob_i", "=", "head_logprob", "[", ":", ",", "cluster_prob_idx", ",", "None", "]", "+", "tail_logprob_i", "out", "[", ":", ",", "l_idx", ":", "r_idx", "]", "=", "logprob_i", "if", "target", "is", "not", "None", ":", "if", "(", "hasattr", "(", "self", ",", "'keep_order'", ")", "and", "self", ".", "keep_order", ")", "or", "keep_order", ":", "out", ".", "index_copy_", "(", "0", ",", "indices_i", ",", "-", "logprob_i", ")", "else", ":", "out", "[", "offset", ":", "offset", "+", "logprob_i", ".", "size", "(", "0", ")", "]", ".", "copy_", "(", "-", "logprob_i", ")", "offset", "+=", "logprob_i", ".", "size", "(", "0", ")", "return", "out" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
ProjectedAdaptiveLogSoftmax.log_prob
r""" Computes log probabilities for all :math:`n\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\_features)` - Output: :math:`(N, n\_classes)`
pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
def log_prob(self, hidden): r""" Computes log probabilities for all :math:`n\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\_features)` - Output: :math:`(N, n\_classes)` """ if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out
def log_prob(self, hidden): r""" Computes log probabilities for all :math:`n\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\_features)` - Output: :math:`(N, n\_classes)` """ if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out
[ "r", "Computes", "log", "probabilities", "for", "all", ":", "math", ":", "n", "\\", "_classes", "From", ":", "https", ":", "//", "github", ".", "com", "/", "pytorch", "/", "pytorch", "/", "blob", "/", "master", "/", "torch", "/", "nn", "/", "modules", "/", "adaptive", ".", "py", "Args", ":", "hidden", "(", "Tensor", ")", ":", "a", "minibatch", "of", "examples", "Returns", ":", "log", "-", "probabilities", "of", "for", "each", "class", ":", "math", ":", "c", "in", "range", ":", "math", ":", "0", "<", "=", "c", "<", "=", "n", "\\", "_classes", "where", ":", "math", ":", "n", "\\", "_classes", "is", "a", "parameter", "passed", "to", "AdaptiveLogSoftmaxWithLoss", "constructor", ".", "Shape", ":", "-", "Input", ":", ":", "math", ":", "(", "N", "in", "\\", "_features", ")", "-", "Output", ":", ":", "math", ":", "(", "N", "n", "\\", "_classes", ")" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py#L198-L257
[ "def", "log_prob", "(", "self", ",", "hidden", ")", ":", "if", "self", ".", "n_clusters", "==", "0", ":", "logit", "=", "self", ".", "_compute_logit", "(", "hidden", ",", "self", ".", "out_layers", "[", "0", "]", ".", "weight", ",", "self", ".", "out_layers", "[", "0", "]", ".", "bias", ",", "self", ".", "out_projs", "[", "0", "]", ")", "return", "F", ".", "log_softmax", "(", "logit", ",", "dim", "=", "-", "1", ")", "else", ":", "# construct weights and biases", "weights", ",", "biases", "=", "[", "]", ",", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "cutoffs", ")", ")", ":", "if", "self", ".", "div_val", "==", "1", ":", "l_idx", ",", "r_idx", "=", "self", ".", "cutoff_ends", "[", "i", "]", ",", "self", ".", "cutoff_ends", "[", "i", "+", "1", "]", "weight_i", "=", "self", ".", "out_layers", "[", "0", "]", ".", "weight", "[", "l_idx", ":", "r_idx", "]", "bias_i", "=", "self", ".", "out_layers", "[", "0", "]", ".", "bias", "[", "l_idx", ":", "r_idx", "]", "else", ":", "weight_i", "=", "self", ".", "out_layers", "[", "i", "]", ".", "weight", "bias_i", "=", "self", ".", "out_layers", "[", "i", "]", ".", "bias", "if", "i", "==", "0", ":", "weight_i", "=", "torch", ".", "cat", "(", "[", "weight_i", ",", "self", ".", "cluster_weight", "]", ",", "dim", "=", "0", ")", "bias_i", "=", "torch", ".", "cat", "(", "[", "bias_i", ",", "self", ".", "cluster_bias", "]", ",", "dim", "=", "0", ")", "weights", ".", "append", "(", "weight_i", ")", "biases", ".", "append", "(", "bias_i", ")", "head_weight", ",", "head_bias", ",", "head_proj", "=", "weights", "[", "0", "]", ",", "biases", "[", "0", "]", ",", "self", ".", "out_projs", "[", "0", "]", "head_logit", "=", "self", ".", "_compute_logit", "(", "hidden", ",", "head_weight", ",", "head_bias", ",", "head_proj", ")", "out", "=", "hidden", ".", "new_empty", "(", "(", "head_logit", ".", "size", "(", "0", ")", ",", "self", ".", "n_token", ")", ")", "head_logprob", "=", "F", ".", "log_softmax", "(", "head_logit", ",", "dim", "=", "1", ")", "cutoff_values", "=", "[", "0", "]", "+", "self", ".", "cutoffs", "for", "i", "in", "range", "(", "len", "(", "cutoff_values", ")", "-", "1", ")", ":", "start_idx", ",", "stop_idx", "=", "cutoff_values", "[", "i", "]", ",", "cutoff_values", "[", "i", "+", "1", "]", "if", "i", "==", "0", ":", "out", "[", ":", ",", ":", "self", ".", "cutoffs", "[", "0", "]", "]", "=", "head_logprob", "[", ":", ",", ":", "self", ".", "cutoffs", "[", "0", "]", "]", "else", ":", "weight_i", ",", "bias_i", ",", "proj_i", "=", "weights", "[", "i", "]", ",", "biases", "[", "i", "]", ",", "self", ".", "out_projs", "[", "i", "]", "tail_logit_i", "=", "self", ".", "_compute_logit", "(", "hidden", ",", "weight_i", ",", "bias_i", ",", "proj_i", ")", "tail_logprob_i", "=", "F", ".", "log_softmax", "(", "tail_logit_i", ",", "dim", "=", "1", ")", "logprob_i", "=", "head_logprob", "[", ":", ",", "-", "i", "]", "+", "tail_logprob_i", "out", "[", ":", ",", "start_idx", ",", "stop_idx", "]", "=", "logprob_i", "return", "out" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
LogUniformSampler.sample
labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample]
pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
def sample(self, labels): """ labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample] """ # neg_samples = torch.empty(0).long() n_sample = self.n_sample n_tries = 2 * n_sample with torch.no_grad(): neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique() device = labels.device neg_samples = neg_samples.to(device) true_log_probs = self.log_q[labels].to(device) samp_log_probs = self.log_q[neg_samples].to(device) return true_log_probs, samp_log_probs, neg_samples
def sample(self, labels): """ labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample] """ # neg_samples = torch.empty(0).long() n_sample = self.n_sample n_tries = 2 * n_sample with torch.no_grad(): neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique() device = labels.device neg_samples = neg_samples.to(device) true_log_probs = self.log_q[labels].to(device) samp_log_probs = self.log_q[neg_samples].to(device) return true_log_probs, samp_log_probs, neg_samples
[ "labels", ":", "[", "b1", "b2", "]", "Return", "true_log_probs", ":", "[", "b1", "b2", "]", "samp_log_probs", ":", "[", "n_sample", "]", "neg_samples", ":", "[", "n_sample", "]" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py#L281-L300
[ "def", "sample", "(", "self", ",", "labels", ")", ":", "# neg_samples = torch.empty(0).long()", "n_sample", "=", "self", ".", "n_sample", "n_tries", "=", "2", "*", "n_sample", "with", "torch", ".", "no_grad", "(", ")", ":", "neg_samples", "=", "torch", ".", "multinomial", "(", "self", ".", "dist", ",", "n_tries", ",", "replacement", "=", "True", ")", ".", "unique", "(", ")", "device", "=", "labels", ".", "device", "neg_samples", "=", "neg_samples", ".", "to", "(", "device", ")", "true_log_probs", "=", "self", ".", "log_q", "[", "labels", "]", ".", "to", "(", "device", ")", "samp_log_probs", "=", "self", ".", "log_q", "[", "neg_samples", "]", ".", "to", "(", "device", ")", "return", "true_log_probs", ",", "samp_log_probs", ",", "neg_samples" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
build_tf_to_pytorch_map
A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
pytorch_pretrained_bert/modeling_transfo_xl.py
def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, 'transformer'): # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax tf_to_pt_map.update({ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias}) for i, (out_l, proj_l, tie_proj) in enumerate(zip( model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i if config.tie_weight: tf_to_pt_map.update({ layer_str + 'b': out_l.bias}) else: raise NotImplementedError # I don't think this is implemented in the TF code tf_to_pt_map.update({ layer_str + 'lookup_table': out_l.weight, layer_str + 'b': out_l.bias}) if not tie_proj: tf_to_pt_map.update({ layer_str + 'proj': proj_l }) # Now load the rest of the transformer model = model.transformer # Embeddings for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = "transformer/adaptive_embed/cutoff_%d/" % i tf_to_pt_map.update({ layer_str + 'lookup_table': embed_l.weight, layer_str + 'proj_W': proj_l }) # Transformer blocks for i, b in enumerate(model.layers): layer_str = "transformer/layer_%d/" % i tf_to_pt_map.update({ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight, layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight, layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight, layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight, layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias, layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight, layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias, }) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({ 'transformer/r_r_bias': r_r_list, 'transformer/r_w_bias': r_w_list}) return tf_to_pt_map
def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, 'transformer'): # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax tf_to_pt_map.update({ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias}) for i, (out_l, proj_l, tie_proj) in enumerate(zip( model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i if config.tie_weight: tf_to_pt_map.update({ layer_str + 'b': out_l.bias}) else: raise NotImplementedError # I don't think this is implemented in the TF code tf_to_pt_map.update({ layer_str + 'lookup_table': out_l.weight, layer_str + 'b': out_l.bias}) if not tie_proj: tf_to_pt_map.update({ layer_str + 'proj': proj_l }) # Now load the rest of the transformer model = model.transformer # Embeddings for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = "transformer/adaptive_embed/cutoff_%d/" % i tf_to_pt_map.update({ layer_str + 'lookup_table': embed_l.weight, layer_str + 'proj_W': proj_l }) # Transformer blocks for i, b in enumerate(model.layers): layer_str = "transformer/layer_%d/" % i tf_to_pt_map.update({ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight, layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight, layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight, layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight, layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias, layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight, layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias, }) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({ 'transformer/r_r_bias': r_r_list, 'transformer/r_w_bias': r_w_list}) return tf_to_pt_map
[ "A", "map", "of", "modules", "from", "TF", "to", "PyTorch", ".", "This", "time", "I", "use", "a", "map", "to", "keep", "the", "PyTorch", "model", "as", "identical", "to", "the", "original", "PyTorch", "model", "as", "possible", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L56-L126
[ "def", "build_tf_to_pytorch_map", "(", "model", ",", "config", ")", ":", "tf_to_pt_map", "=", "{", "}", "if", "hasattr", "(", "model", ",", "'transformer'", ")", ":", "# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax", "tf_to_pt_map", ".", "update", "(", "{", "\"transformer/adaptive_softmax/cutoff_0/cluster_W\"", ":", "model", ".", "crit", ".", "cluster_weight", ",", "\"transformer/adaptive_softmax/cutoff_0/cluster_b\"", ":", "model", ".", "crit", ".", "cluster_bias", "}", ")", "for", "i", ",", "(", "out_l", ",", "proj_l", ",", "tie_proj", ")", "in", "enumerate", "(", "zip", "(", "model", ".", "crit", ".", "out_layers", ",", "model", ".", "crit", ".", "out_projs", ",", "config", ".", "tie_projs", ")", ")", ":", "layer_str", "=", "\"transformer/adaptive_softmax/cutoff_%d/\"", "%", "i", "if", "config", ".", "tie_weight", ":", "tf_to_pt_map", ".", "update", "(", "{", "layer_str", "+", "'b'", ":", "out_l", ".", "bias", "}", ")", "else", ":", "raise", "NotImplementedError", "# I don't think this is implemented in the TF code", "tf_to_pt_map", ".", "update", "(", "{", "layer_str", "+", "'lookup_table'", ":", "out_l", ".", "weight", ",", "layer_str", "+", "'b'", ":", "out_l", ".", "bias", "}", ")", "if", "not", "tie_proj", ":", "tf_to_pt_map", ".", "update", "(", "{", "layer_str", "+", "'proj'", ":", "proj_l", "}", ")", "# Now load the rest of the transformer", "model", "=", "model", ".", "transformer", "# Embeddings", "for", "i", ",", "(", "embed_l", ",", "proj_l", ")", "in", "enumerate", "(", "zip", "(", "model", ".", "word_emb", ".", "emb_layers", ",", "model", ".", "word_emb", ".", "emb_projs", ")", ")", ":", "layer_str", "=", "\"transformer/adaptive_embed/cutoff_%d/\"", "%", "i", "tf_to_pt_map", ".", "update", "(", "{", "layer_str", "+", "'lookup_table'", ":", "embed_l", ".", "weight", ",", "layer_str", "+", "'proj_W'", ":", "proj_l", "}", ")", "# Transformer blocks", "for", "i", ",", "b", "in", "enumerate", "(", "model", ".", "layers", ")", ":", "layer_str", "=", "\"transformer/layer_%d/\"", "%", "i", "tf_to_pt_map", ".", "update", "(", "{", "layer_str", "+", "\"rel_attn/LayerNorm/gamma\"", ":", "b", ".", "dec_attn", ".", "layer_norm", ".", "weight", ",", "layer_str", "+", "\"rel_attn/LayerNorm/beta\"", ":", "b", ".", "dec_attn", ".", "layer_norm", ".", "bias", ",", "layer_str", "+", "\"rel_attn/o/kernel\"", ":", "b", ".", "dec_attn", ".", "o_net", ".", "weight", ",", "layer_str", "+", "\"rel_attn/qkv/kernel\"", ":", "b", ".", "dec_attn", ".", "qkv_net", ".", "weight", ",", "layer_str", "+", "\"rel_attn/r/kernel\"", ":", "b", ".", "dec_attn", ".", "r_net", ".", "weight", ",", "layer_str", "+", "\"ff/LayerNorm/gamma\"", ":", "b", ".", "pos_ff", ".", "layer_norm", ".", "weight", ",", "layer_str", "+", "\"ff/LayerNorm/beta\"", ":", "b", ".", "pos_ff", ".", "layer_norm", ".", "bias", ",", "layer_str", "+", "\"ff/layer_1/kernel\"", ":", "b", ".", "pos_ff", ".", "CoreNet", "[", "0", "]", ".", "weight", ",", "layer_str", "+", "\"ff/layer_1/bias\"", ":", "b", ".", "pos_ff", ".", "CoreNet", "[", "0", "]", ".", "bias", ",", "layer_str", "+", "\"ff/layer_2/kernel\"", ":", "b", ".", "pos_ff", ".", "CoreNet", "[", "3", "]", ".", "weight", ",", "layer_str", "+", "\"ff/layer_2/bias\"", ":", "b", ".", "pos_ff", ".", "CoreNet", "[", "3", "]", ".", "bias", ",", "}", ")", "# Relative positioning biases", "if", "config", ".", "untie_r", ":", "r_r_list", "=", "[", "]", "r_w_list", "=", "[", "]", "for", "b", "in", "model", ".", "layers", ":", "r_r_list", ".", "append", "(", "b", ".", "dec_attn", ".", "r_r_bias", ")", "r_w_list", ".", "append", "(", "b", ".", "dec_attn", ".", "r_w_bias", ")", "else", ":", "r_r_list", "=", "[", "model", ".", "r_r_bias", "]", "r_w_list", "=", "[", "model", ".", "r_w_bias", "]", "tf_to_pt_map", ".", "update", "(", "{", "'transformer/r_r_bias'", ":", "r_r_list", ",", "'transformer/r_w_bias'", ":", "r_w_list", "}", ")", "return", "tf_to_pt_map" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
load_tf_weights_in_transfo_xl
Load tf checkpoints in a pytorch model
pytorch_pretrained_bert/modeling_transfo_xl.py
def load_tf_weights_in_transfo_xl(model, config, tf_path): """ Load tf checkpoints in a pytorch model """ try: import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_to_pytorch_map(model, config) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for name, pointer in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if 'kernel' in name or 'proj' in name: array = np.transpose(array) if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1: # Here we will split the TF weigths assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise print("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + '/Adam', None) tf_weights.pop(name + '/Adam_1', None) print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys()))) return model
def load_tf_weights_in_transfo_xl(model, config, tf_path): """ Load tf checkpoints in a pytorch model """ try: import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_to_pytorch_map(model, config) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for name, pointer in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if 'kernel' in name or 'proj' in name: array = np.transpose(array) if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1: # Here we will split the TF weigths assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise print("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + '/Adam', None) tf_weights.pop(name + '/Adam_1', None) print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys()))) return model
[ "Load", "tf", "checkpoints", "in", "a", "pytorch", "model" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L128-L181
[ "def", "load_tf_weights_in_transfo_xl", "(", "model", ",", "config", ",", "tf_path", ")", ":", "try", ":", "import", "numpy", "as", "np", "import", "tensorflow", "as", "tf", "except", "ImportError", ":", "print", "(", "\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"", "\"https://www.tensorflow.org/install/ for installation instructions.\"", ")", "raise", "# Build TF to PyTorch weights loading map", "tf_to_pt_map", "=", "build_tf_to_pytorch_map", "(", "model", ",", "config", ")", "# Load weights from TF model", "init_vars", "=", "tf", ".", "train", ".", "list_variables", "(", "tf_path", ")", "tf_weights", "=", "{", "}", "for", "name", ",", "shape", "in", "init_vars", ":", "print", "(", "\"Loading TF weight {} with shape {}\"", ".", "format", "(", "name", ",", "shape", ")", ")", "array", "=", "tf", ".", "train", ".", "load_variable", "(", "tf_path", ",", "name", ")", "tf_weights", "[", "name", "]", "=", "array", "for", "name", ",", "pointer", "in", "tf_to_pt_map", ".", "items", "(", ")", ":", "assert", "name", "in", "tf_weights", "array", "=", "tf_weights", "[", "name", "]", "# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v", "# which are not required for using pretrained model", "if", "'kernel'", "in", "name", "or", "'proj'", "in", "name", ":", "array", "=", "np", ".", "transpose", "(", "array", ")", "if", "(", "'r_r_bias'", "in", "name", "or", "'r_w_bias'", "in", "name", ")", "and", "len", "(", "pointer", ")", ">", "1", ":", "# Here we will split the TF weigths", "assert", "len", "(", "pointer", ")", "==", "array", ".", "shape", "[", "0", "]", "for", "i", ",", "p_i", "in", "enumerate", "(", "pointer", ")", ":", "arr_i", "=", "array", "[", "i", ",", "...", "]", "try", ":", "assert", "p_i", ".", "shape", "==", "arr_i", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "p_i", ".", "shape", ",", "arr_i", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {} for layer {}\"", ".", "format", "(", "name", ",", "i", ")", ")", "p_i", ".", "data", "=", "torch", ".", "from_numpy", "(", "arr_i", ")", "else", ":", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {}\"", ".", "format", "(", "name", ")", ")", "pointer", ".", "data", "=", "torch", ".", "from_numpy", "(", "array", ")", "tf_weights", ".", "pop", "(", "name", ",", "None", ")", "tf_weights", ".", "pop", "(", "name", "+", "'/Adam'", ",", "None", ")", "tf_weights", ".", "pop", "(", "name", "+", "'/Adam_1'", ",", "None", ")", "print", "(", "\"Weights not copied to PyTorch model: {}\"", ".", "format", "(", "', '", ".", "join", "(", "tf_weights", ".", "keys", "(", ")", ")", ")", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
TransfoXLPreTrainedModel.init_weights
Initialize the weights.
pytorch_pretrained_bert/modeling_transfo_xl.py
def init_weights(self, m): """ Initialize the weights. """ classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: self.init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): self.init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: self.init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: self.init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('TransformerLM') != -1: if hasattr(m, 'r_emb'): self.init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): self.init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): self.init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): self.init_bias(m.r_bias)
def init_weights(self, m): """ Initialize the weights. """ classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: self.init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): self.init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: self.init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: self.init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('TransformerLM') != -1: if hasattr(m, 'r_emb'): self.init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): self.init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): self.init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): self.init_bias(m.r_bias)
[ "Initialize", "the", "weights", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L846-L885
[ "def", "init_weights", "(", "self", ",", "m", ")", ":", "classname", "=", "m", ".", "__class__", ".", "__name__", "if", "classname", ".", "find", "(", "'Linear'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'weight'", ")", "and", "m", ".", "weight", "is", "not", "None", ":", "self", ".", "init_weight", "(", "m", ".", "weight", ")", "if", "hasattr", "(", "m", ",", "'bias'", ")", "and", "m", ".", "bias", "is", "not", "None", ":", "self", ".", "init_bias", "(", "m", ".", "bias", ")", "elif", "classname", ".", "find", "(", "'AdaptiveEmbedding'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'emb_projs'", ")", ":", "for", "i", "in", "range", "(", "len", "(", "m", ".", "emb_projs", ")", ")", ":", "if", "m", ".", "emb_projs", "[", "i", "]", "is", "not", "None", ":", "nn", ".", "init", ".", "normal_", "(", "m", ".", "emb_projs", "[", "i", "]", ",", "0.0", ",", "self", ".", "config", ".", "proj_init_std", ")", "elif", "classname", ".", "find", "(", "'Embedding'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'weight'", ")", ":", "self", ".", "init_weight", "(", "m", ".", "weight", ")", "elif", "classname", ".", "find", "(", "'ProjectedAdaptiveLogSoftmax'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'cluster_weight'", ")", "and", "m", ".", "cluster_weight", "is", "not", "None", ":", "self", ".", "init_weight", "(", "m", ".", "cluster_weight", ")", "if", "hasattr", "(", "m", ",", "'cluster_bias'", ")", "and", "m", ".", "cluster_bias", "is", "not", "None", ":", "self", ".", "init_bias", "(", "m", ".", "cluster_bias", ")", "if", "hasattr", "(", "m", ",", "'out_projs'", ")", ":", "for", "i", "in", "range", "(", "len", "(", "m", ".", "out_projs", ")", ")", ":", "if", "m", ".", "out_projs", "[", "i", "]", "is", "not", "None", ":", "nn", ".", "init", ".", "normal_", "(", "m", ".", "out_projs", "[", "i", "]", ",", "0.0", ",", "self", ".", "config", ".", "proj_init_std", ")", "elif", "classname", ".", "find", "(", "'LayerNorm'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'weight'", ")", ":", "nn", ".", "init", ".", "normal_", "(", "m", ".", "weight", ",", "1.0", ",", "self", ".", "config", ".", "init_std", ")", "if", "hasattr", "(", "m", ",", "'bias'", ")", "and", "m", ".", "bias", "is", "not", "None", ":", "self", ".", "init_bias", "(", "m", ".", "bias", ")", "elif", "classname", ".", "find", "(", "'TransformerLM'", ")", "!=", "-", "1", ":", "if", "hasattr", "(", "m", ",", "'r_emb'", ")", ":", "self", ".", "init_weight", "(", "m", ".", "r_emb", ")", "if", "hasattr", "(", "m", ",", "'r_w_bias'", ")", ":", "self", ".", "init_weight", "(", "m", ".", "r_w_bias", ")", "if", "hasattr", "(", "m", ",", "'r_r_bias'", ")", ":", "self", ".", "init_weight", "(", "m", ".", "r_r_bias", ")", "if", "hasattr", "(", "m", ",", "'r_bias'", ")", ":", "self", ".", "init_bias", "(", "m", ".", "r_bias", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
TransfoXLPreTrainedModel.from_pretrained
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `transfo-xl` - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification)
pytorch_pretrained_bert/modeling_transfo_xl.py
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `transfo-xl` - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file, config_file)) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = TransfoXLConfig.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()): start_prefix = 'transformer.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) # Make sure we are still sharing the input and output embeddings if hasattr(model, 'tie_weights'): model.tie_weights() return model
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `transfo-xl` - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file, config_file)) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = TransfoXLConfig.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()): start_prefix = 'transformer.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) # Make sure we are still sharing the input and output embeddings if hasattr(model, 'tie_weights'): model.tie_weights() return model
[ "Instantiate", "a", "TransfoXLPreTrainedModel", "from", "a", "pre", "-", "trained", "model", "file", "or", "a", "pytorch", "state", "dict", ".", "Download", "and", "cache", "the", "pre", "-", "trained", "model", "file", "if", "needed", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L891-L986
[ "def", "from_pretrained", "(", "cls", ",", "pretrained_model_name_or_path", ",", "state_dict", "=", "None", ",", "cache_dir", "=", "None", ",", "from_tf", "=", "False", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", ":", "if", "pretrained_model_name_or_path", "in", "PRETRAINED_MODEL_ARCHIVE_MAP", ":", "archive_file", "=", "PRETRAINED_MODEL_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "config_file", "=", "PRETRAINED_CONFIG_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "else", ":", "archive_file", "=", "os", ".", "path", ".", "join", "(", "pretrained_model_name_or_path", ",", "WEIGHTS_NAME", ")", "config_file", "=", "os", ".", "path", ".", "join", "(", "pretrained_model_name_or_path", ",", "CONFIG_NAME", ")", "# redirect to the cache, if necessary", "try", ":", "resolved_archive_file", "=", "cached_path", "(", "archive_file", ",", "cache_dir", "=", "cache_dir", ")", "resolved_config_file", "=", "cached_path", "(", "config_file", ",", "cache_dir", "=", "cache_dir", ")", "except", "EnvironmentError", ":", "logger", ".", "error", "(", "\"Model name '{}' was not found in model name list ({}). \"", "\"We assumed '{}' was a path or url but couldn't find files {} and {} \"", "\"at this path or url.\"", ".", "format", "(", "pretrained_model_name_or_path", ",", "', '", ".", "join", "(", "PRETRAINED_MODEL_ARCHIVE_MAP", ".", "keys", "(", ")", ")", ",", "pretrained_model_name_or_path", ",", "archive_file", ",", "config_file", ")", ")", "return", "None", "if", "resolved_archive_file", "==", "archive_file", "and", "resolved_config_file", "==", "config_file", ":", "logger", ".", "info", "(", "\"loading weights file {}\"", ".", "format", "(", "archive_file", ")", ")", "logger", ".", "info", "(", "\"loading configuration file {}\"", ".", "format", "(", "config_file", ")", ")", "else", ":", "logger", ".", "info", "(", "\"loading weights file {} from cache at {}\"", ".", "format", "(", "archive_file", ",", "resolved_archive_file", ")", ")", "logger", ".", "info", "(", "\"loading configuration file {} from cache at {}\"", ".", "format", "(", "config_file", ",", "resolved_config_file", ")", ")", "# Load config", "config", "=", "TransfoXLConfig", ".", "from_json_file", "(", "resolved_config_file", ")", "logger", ".", "info", "(", "\"Model config {}\"", ".", "format", "(", "config", ")", ")", "# Instantiate model.", "model", "=", "cls", "(", "config", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", "if", "state_dict", "is", "None", "and", "not", "from_tf", ":", "state_dict", "=", "torch", ".", "load", "(", "resolved_archive_file", ",", "map_location", "=", "'cpu'", ")", "if", "from_tf", ":", "# Directly load from a TensorFlow checkpoint", "return", "load_tf_weights_in_transfo_xl", "(", "model", ",", "config", ",", "pretrained_model_name_or_path", ")", "missing_keys", "=", "[", "]", "unexpected_keys", "=", "[", "]", "error_msgs", "=", "[", "]", "# copy state_dict so _load_from_state_dict can modify it", "metadata", "=", "getattr", "(", "state_dict", ",", "'_metadata'", ",", "None", ")", "state_dict", "=", "state_dict", ".", "copy", "(", ")", "if", "metadata", "is", "not", "None", ":", "state_dict", ".", "_metadata", "=", "metadata", "def", "load", "(", "module", ",", "prefix", "=", "''", ")", ":", "local_metadata", "=", "{", "}", "if", "metadata", "is", "None", "else", "metadata", ".", "get", "(", "prefix", "[", ":", "-", "1", "]", ",", "{", "}", ")", "module", ".", "_load_from_state_dict", "(", "state_dict", ",", "prefix", ",", "local_metadata", ",", "True", ",", "missing_keys", ",", "unexpected_keys", ",", "error_msgs", ")", "for", "name", ",", "child", "in", "module", ".", "_modules", ".", "items", "(", ")", ":", "if", "child", "is", "not", "None", ":", "load", "(", "child", ",", "prefix", "+", "name", "+", "'.'", ")", "start_prefix", "=", "''", "if", "not", "hasattr", "(", "model", ",", "'transformer'", ")", "and", "any", "(", "s", ".", "startswith", "(", "'transformer.'", ")", "for", "s", "in", "state_dict", ".", "keys", "(", ")", ")", ":", "start_prefix", "=", "'transformer.'", "load", "(", "model", ",", "prefix", "=", "start_prefix", ")", "if", "len", "(", "missing_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights of {} not initialized from pretrained model: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "missing_keys", ")", ")", "if", "len", "(", "unexpected_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights from pretrained model not used in {}: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "unexpected_keys", ")", ")", "if", "len", "(", "error_msgs", ")", ">", "0", ":", "raise", "RuntimeError", "(", "'Error(s) in loading state_dict for {}:\\n\\t{}'", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "\"\\n\\t\"", ".", "join", "(", "error_msgs", ")", ")", ")", "# Make sure we are still sharing the input and output embeddings", "if", "hasattr", "(", "model", ",", "'tie_weights'", ")", ":", "model", ".", "tie_weights", "(", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
TransfoXLModel.forward
Params: input_ids :: [bsz, len] mems :: optional mems from previous forwar passes (or init_mems) list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Returns: tuple (last_hidden, new_mems) where: new_mems: list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] last_hidden: output of the last layer: shape :: [bsz, len, self.config.d_model]
pytorch_pretrained_bert/modeling_transfo_xl.py
def forward(self, input_ids, mems=None): """ Params: input_ids :: [bsz, len] mems :: optional mems from previous forwar passes (or init_mems) list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Returns: tuple (last_hidden, new_mems) where: new_mems: list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] last_hidden: output of the last layer: shape :: [bsz, len, self.config.d_model] """ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] input_ids = input_ids.transpose(0, 1).contiguous() if mems is None: mems = self.init_mems(input_ids) last_hidden, new_mems = self._forward(input_ids, mems=mems) # We transpose back here to shape [bsz, len, hidden_dim] last_hidden = last_hidden.transpose(0, 1).contiguous() return (last_hidden, new_mems)
def forward(self, input_ids, mems=None): """ Params: input_ids :: [bsz, len] mems :: optional mems from previous forwar passes (or init_mems) list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Returns: tuple (last_hidden, new_mems) where: new_mems: list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] last_hidden: output of the last layer: shape :: [bsz, len, self.config.d_model] """ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] input_ids = input_ids.transpose(0, 1).contiguous() if mems is None: mems = self.init_mems(input_ids) last_hidden, new_mems = self._forward(input_ids, mems=mems) # We transpose back here to shape [bsz, len, hidden_dim] last_hidden = last_hidden.transpose(0, 1).contiguous() return (last_hidden, new_mems)
[ "Params", ":", "input_ids", "::", "[", "bsz", "len", "]", "mems", "::", "optional", "mems", "from", "previous", "forwar", "passes", "(", "or", "init_mems", ")", "list", "(", "num", "layers", ")", "of", "mem", "states", "at", "the", "entry", "of", "each", "layer", "shape", "::", "[", "self", ".", "config", ".", "mem_len", "bsz", "self", ".", "config", ".", "d_model", "]", "Note", "that", "the", "first", "two", "dimensions", "are", "transposed", "in", "mems", "with", "regards", "to", "input_ids", "and", "target", "Returns", ":", "tuple", "(", "last_hidden", "new_mems", ")", "where", ":", "new_mems", ":", "list", "(", "num", "layers", ")", "of", "mem", "states", "at", "the", "entry", "of", "each", "layer", "shape", "::", "[", "self", ".", "config", ".", "mem_len", "bsz", "self", ".", "config", ".", "d_model", "]", "last_hidden", ":", "output", "of", "the", "last", "layer", ":", "shape", "::", "[", "bsz", "len", "self", ".", "config", ".", "d_model", "]" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L1239-L1263
[ "def", "forward", "(", "self", ",", "input_ids", ",", "mems", "=", "None", ")", ":", "# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library", "# so we transpose here from shape [bsz, len] to shape [len, bsz]", "input_ids", "=", "input_ids", ".", "transpose", "(", "0", ",", "1", ")", ".", "contiguous", "(", ")", "if", "mems", "is", "None", ":", "mems", "=", "self", ".", "init_mems", "(", "input_ids", ")", "last_hidden", ",", "new_mems", "=", "self", ".", "_forward", "(", "input_ids", ",", "mems", "=", "mems", ")", "# We transpose back here to shape [bsz, len, hidden_dim]", "last_hidden", "=", "last_hidden", ".", "transpose", "(", "0", ",", "1", ")", ".", "contiguous", "(", ")", "return", "(", "last_hidden", ",", "new_mems", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
TransfoXLLMHeadModel.tie_weights
Run this to be sure output and input (adaptive) softmax weights are tied
pytorch_pretrained_bert/modeling_transfo_xl.py
def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ # sampled softmax if self.sample_softmax > 0: if self.config.tie_weight: self.out_layer.weight = self.transformer.word_emb.weight # adaptive softmax (including standard softmax) else: if self.config.tie_weight: for i in range(len(self.crit.out_layers)): self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ # sampled softmax if self.sample_softmax > 0: if self.config.tie_weight: self.out_layer.weight = self.transformer.word_emb.weight # adaptive softmax (including standard softmax) else: if self.config.tie_weight: for i in range(len(self.crit.out_layers)): self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
[ "Run", "this", "to", "be", "sure", "output", "and", "input", "(", "adaptive", ")", "softmax", "weights", "are", "tied" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L1331-L1347
[ "def", "tie_weights", "(", "self", ")", ":", "# sampled softmax", "if", "self", ".", "sample_softmax", ">", "0", ":", "if", "self", ".", "config", ".", "tie_weight", ":", "self", ".", "out_layer", ".", "weight", "=", "self", ".", "transformer", ".", "word_emb", ".", "weight", "# adaptive softmax (including standard softmax)", "else", ":", "if", "self", ".", "config", ".", "tie_weight", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "crit", ".", "out_layers", ")", ")", ":", "self", ".", "crit", ".", "out_layers", "[", "i", "]", ".", "weight", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_layers", "[", "i", "]", ".", "weight", "if", "self", ".", "config", ".", "tie_projs", ":", "for", "i", ",", "tie_proj", "in", "enumerate", "(", "self", ".", "config", ".", "tie_projs", ")", ":", "if", "tie_proj", "and", "self", ".", "config", ".", "div_val", "==", "1", "and", "self", ".", "config", ".", "d_model", "!=", "self", ".", "config", ".", "d_embed", ":", "self", ".", "crit", ".", "out_projs", "[", "i", "]", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_projs", "[", "0", "]", "elif", "tie_proj", "and", "self", ".", "config", ".", "div_val", "!=", "1", ":", "self", ".", "crit", ".", "out_projs", "[", "i", "]", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_projs", "[", "i", "]" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
TransfoXLLMHeadModel.forward
Params: input_ids :: [bsz, len] target :: [bsz, len] Returns: tuple(softmax_output, new_mems) where: new_mems: list (num layers) of hidden states at the entry of each layer shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids softmax_output: output of the (adaptive) softmax: if target is None: Negative log likelihood of shape :: [bsz, len] else: log probabilities of tokens, shape :: [bsz, len, n_tokens]
pytorch_pretrained_bert/modeling_transfo_xl.py
def forward(self, input_ids, target=None, mems=None): """ Params: input_ids :: [bsz, len] target :: [bsz, len] Returns: tuple(softmax_output, new_mems) where: new_mems: list (num layers) of hidden states at the entry of each layer shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids softmax_output: output of the (adaptive) softmax: if target is None: Negative log likelihood of shape :: [bsz, len] else: log probabilities of tokens, shape :: [bsz, len, n_tokens] """ bsz = input_ids.size(0) tgt_len = input_ids.size(1) last_hidden, new_mems = self.transformer(input_ids, mems) pred_hid = last_hidden[:, -tgt_len:] if self.sample_softmax > 0 and self.training: assert self.config.tie_weight logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler) softmax_output = -F.log_softmax(logit, -1)[:, :, 0] else: softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target) if target is None: softmax_output = softmax_output.view(bsz, tgt_len, -1) else: softmax_output = softmax_output.view(bsz, tgt_len) # We transpose back return (softmax_output, new_mems)
def forward(self, input_ids, target=None, mems=None): """ Params: input_ids :: [bsz, len] target :: [bsz, len] Returns: tuple(softmax_output, new_mems) where: new_mems: list (num layers) of hidden states at the entry of each layer shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids softmax_output: output of the (adaptive) softmax: if target is None: Negative log likelihood of shape :: [bsz, len] else: log probabilities of tokens, shape :: [bsz, len, n_tokens] """ bsz = input_ids.size(0) tgt_len = input_ids.size(1) last_hidden, new_mems = self.transformer(input_ids, mems) pred_hid = last_hidden[:, -tgt_len:] if self.sample_softmax > 0 and self.training: assert self.config.tie_weight logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler) softmax_output = -F.log_softmax(logit, -1)[:, :, 0] else: softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target) if target is None: softmax_output = softmax_output.view(bsz, tgt_len, -1) else: softmax_output = softmax_output.view(bsz, tgt_len) # We transpose back return (softmax_output, new_mems)
[ "Params", ":", "input_ids", "::", "[", "bsz", "len", "]", "target", "::", "[", "bsz", "len", "]", "Returns", ":", "tuple", "(", "softmax_output", "new_mems", ")", "where", ":", "new_mems", ":", "list", "(", "num", "layers", ")", "of", "hidden", "states", "at", "the", "entry", "of", "each", "layer", "shape", "::", "[", "mem_len", "bsz", "self", ".", "config", ".", "d_model", "]", "::", "Warning", ":", "shapes", "are", "transposed", "here", "w", ".", "regards", "to", "input_ids", "softmax_output", ":", "output", "of", "the", "(", "adaptive", ")", "softmax", ":", "if", "target", "is", "None", ":", "Negative", "log", "likelihood", "of", "shape", "::", "[", "bsz", "len", "]", "else", ":", "log", "probabilities", "of", "tokens", "shape", "::", "[", "bsz", "len", "n_tokens", "]" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L1355-L1387
[ "def", "forward", "(", "self", ",", "input_ids", ",", "target", "=", "None", ",", "mems", "=", "None", ")", ":", "bsz", "=", "input_ids", ".", "size", "(", "0", ")", "tgt_len", "=", "input_ids", ".", "size", "(", "1", ")", "last_hidden", ",", "new_mems", "=", "self", ".", "transformer", "(", "input_ids", ",", "mems", ")", "pred_hid", "=", "last_hidden", "[", ":", ",", "-", "tgt_len", ":", "]", "if", "self", ".", "sample_softmax", ">", "0", "and", "self", ".", "training", ":", "assert", "self", ".", "config", ".", "tie_weight", "logit", "=", "sample_logits", "(", "self", ".", "transformer", ".", "word_emb", ",", "self", ".", "out_layer", ".", "bias", ",", "target", ",", "pred_hid", ",", "self", ".", "sampler", ")", "softmax_output", "=", "-", "F", ".", "log_softmax", "(", "logit", ",", "-", "1", ")", "[", ":", ",", ":", ",", "0", "]", "else", ":", "softmax_output", "=", "self", ".", "crit", "(", "pred_hid", ".", "view", "(", "-", "1", ",", "pred_hid", ".", "size", "(", "-", "1", ")", ")", ",", "target", ")", "if", "target", "is", "None", ":", "softmax_output", "=", "softmax_output", ".", "view", "(", "bsz", ",", "tgt_len", ",", "-", "1", ")", "else", ":", "softmax_output", "=", "softmax_output", ".", "view", "(", "bsz", ",", "tgt_len", ")", "# We transpose back", "return", "(", "softmax_output", ",", "new_mems", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
to_offset
Return DateOffset object from string or tuple representation or datetime.timedelta object Parameters ---------- freq : str, tuple, datetime.timedelta, DateOffset or None Returns ------- DateOffset None if freq is None. Raises ------ ValueError If freq is an invalid frequency See Also -------- DateOffset Examples -------- >>> to_offset('5min') <5 * Minutes> >>> to_offset('1D1H') <25 * Hours> >>> to_offset(('W', 2)) <2 * Weeks: weekday=6> >>> to_offset((2, 'B')) <2 * BusinessDays> >>> to_offset(datetime.timedelta(days=1)) <Day> >>> to_offset(Hour()) <Hour>
pandas/tseries/frequencies.py
def to_offset(freq): """ Return DateOffset object from string or tuple representation or datetime.timedelta object Parameters ---------- freq : str, tuple, datetime.timedelta, DateOffset or None Returns ------- DateOffset None if freq is None. Raises ------ ValueError If freq is an invalid frequency See Also -------- DateOffset Examples -------- >>> to_offset('5min') <5 * Minutes> >>> to_offset('1D1H') <25 * Hours> >>> to_offset(('W', 2)) <2 * Weeks: weekday=6> >>> to_offset((2, 'B')) <2 * BusinessDays> >>> to_offset(datetime.timedelta(days=1)) <Day> >>> to_offset(Hour()) <Hour> """ if freq is None: return None if isinstance(freq, DateOffset): return freq if isinstance(freq, tuple): name = freq[0] stride = freq[1] if isinstance(stride, str): name, stride = stride, name name, _ = libfreqs._base_and_stride(name) delta = get_offset(name) * stride elif isinstance(freq, timedelta): delta = None freq = Timedelta(freq) try: for name in freq.components._fields: offset = _name_to_offset_map[name] stride = getattr(freq.components, name) if stride != 0: offset = stride * offset if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) else: delta = None stride_sign = None try: splitted = re.split(libfreqs.opattern, freq) if splitted[-1] != '' and not splitted[-1].isspace(): # the last element must be blank raise ValueError('last element must be blank') for sep, stride, name in zip(splitted[0::4], splitted[1::4], splitted[2::4]): if sep != '' and not sep.isspace(): raise ValueError('separator must be spaces') prefix = libfreqs._lite_rule_alias.get(name) or name if stride_sign is None: stride_sign = -1 if stride.startswith('-') else 1 if not stride: stride = 1 if prefix in Resolution._reso_str_bump_map.keys(): stride, name = Resolution.get_stride_from_decimal( float(stride), prefix ) stride = int(stride) offset = get_offset(name) offset = offset * int(np.fabs(stride) * stride_sign) if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) if delta is None: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) return delta
def to_offset(freq): """ Return DateOffset object from string or tuple representation or datetime.timedelta object Parameters ---------- freq : str, tuple, datetime.timedelta, DateOffset or None Returns ------- DateOffset None if freq is None. Raises ------ ValueError If freq is an invalid frequency See Also -------- DateOffset Examples -------- >>> to_offset('5min') <5 * Minutes> >>> to_offset('1D1H') <25 * Hours> >>> to_offset(('W', 2)) <2 * Weeks: weekday=6> >>> to_offset((2, 'B')) <2 * BusinessDays> >>> to_offset(datetime.timedelta(days=1)) <Day> >>> to_offset(Hour()) <Hour> """ if freq is None: return None if isinstance(freq, DateOffset): return freq if isinstance(freq, tuple): name = freq[0] stride = freq[1] if isinstance(stride, str): name, stride = stride, name name, _ = libfreqs._base_and_stride(name) delta = get_offset(name) * stride elif isinstance(freq, timedelta): delta = None freq = Timedelta(freq) try: for name in freq.components._fields: offset = _name_to_offset_map[name] stride = getattr(freq.components, name) if stride != 0: offset = stride * offset if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) else: delta = None stride_sign = None try: splitted = re.split(libfreqs.opattern, freq) if splitted[-1] != '' and not splitted[-1].isspace(): # the last element must be blank raise ValueError('last element must be blank') for sep, stride, name in zip(splitted[0::4], splitted[1::4], splitted[2::4]): if sep != '' and not sep.isspace(): raise ValueError('separator must be spaces') prefix = libfreqs._lite_rule_alias.get(name) or name if stride_sign is None: stride_sign = -1 if stride.startswith('-') else 1 if not stride: stride = 1 if prefix in Resolution._reso_str_bump_map.keys(): stride, name = Resolution.get_stride_from_decimal( float(stride), prefix ) stride = int(stride) offset = get_offset(name) offset = offset * int(np.fabs(stride) * stride_sign) if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) if delta is None: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) return delta
[ "Return", "DateOffset", "object", "from", "string", "or", "tuple", "representation", "or", "datetime", ".", "timedelta", "object" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L57-L164
[ "def", "to_offset", "(", "freq", ")", ":", "if", "freq", "is", "None", ":", "return", "None", "if", "isinstance", "(", "freq", ",", "DateOffset", ")", ":", "return", "freq", "if", "isinstance", "(", "freq", ",", "tuple", ")", ":", "name", "=", "freq", "[", "0", "]", "stride", "=", "freq", "[", "1", "]", "if", "isinstance", "(", "stride", ",", "str", ")", ":", "name", ",", "stride", "=", "stride", ",", "name", "name", ",", "_", "=", "libfreqs", ".", "_base_and_stride", "(", "name", ")", "delta", "=", "get_offset", "(", "name", ")", "*", "stride", "elif", "isinstance", "(", "freq", ",", "timedelta", ")", ":", "delta", "=", "None", "freq", "=", "Timedelta", "(", "freq", ")", "try", ":", "for", "name", "in", "freq", ".", "components", ".", "_fields", ":", "offset", "=", "_name_to_offset_map", "[", "name", "]", "stride", "=", "getattr", "(", "freq", ".", "components", ",", "name", ")", "if", "stride", "!=", "0", ":", "offset", "=", "stride", "*", "offset", "if", "delta", "is", "None", ":", "delta", "=", "offset", "else", ":", "delta", "=", "delta", "+", "offset", "except", "Exception", ":", "raise", "ValueError", "(", "libfreqs", ".", "INVALID_FREQ_ERR_MSG", ".", "format", "(", "freq", ")", ")", "else", ":", "delta", "=", "None", "stride_sign", "=", "None", "try", ":", "splitted", "=", "re", ".", "split", "(", "libfreqs", ".", "opattern", ",", "freq", ")", "if", "splitted", "[", "-", "1", "]", "!=", "''", "and", "not", "splitted", "[", "-", "1", "]", ".", "isspace", "(", ")", ":", "# the last element must be blank", "raise", "ValueError", "(", "'last element must be blank'", ")", "for", "sep", ",", "stride", ",", "name", "in", "zip", "(", "splitted", "[", "0", ":", ":", "4", "]", ",", "splitted", "[", "1", ":", ":", "4", "]", ",", "splitted", "[", "2", ":", ":", "4", "]", ")", ":", "if", "sep", "!=", "''", "and", "not", "sep", ".", "isspace", "(", ")", ":", "raise", "ValueError", "(", "'separator must be spaces'", ")", "prefix", "=", "libfreqs", ".", "_lite_rule_alias", ".", "get", "(", "name", ")", "or", "name", "if", "stride_sign", "is", "None", ":", "stride_sign", "=", "-", "1", "if", "stride", ".", "startswith", "(", "'-'", ")", "else", "1", "if", "not", "stride", ":", "stride", "=", "1", "if", "prefix", "in", "Resolution", ".", "_reso_str_bump_map", ".", "keys", "(", ")", ":", "stride", ",", "name", "=", "Resolution", ".", "get_stride_from_decimal", "(", "float", "(", "stride", ")", ",", "prefix", ")", "stride", "=", "int", "(", "stride", ")", "offset", "=", "get_offset", "(", "name", ")", "offset", "=", "offset", "*", "int", "(", "np", ".", "fabs", "(", "stride", ")", "*", "stride_sign", ")", "if", "delta", "is", "None", ":", "delta", "=", "offset", "else", ":", "delta", "=", "delta", "+", "offset", "except", "Exception", ":", "raise", "ValueError", "(", "libfreqs", ".", "INVALID_FREQ_ERR_MSG", ".", "format", "(", "freq", ")", ")", "if", "delta", "is", "None", ":", "raise", "ValueError", "(", "libfreqs", ".", "INVALID_FREQ_ERR_MSG", ".", "format", "(", "freq", ")", ")", "return", "delta" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_offset
Return DateOffset object associated with rule name Examples -------- get_offset('EOM') --> BMonthEnd(1)
pandas/tseries/frequencies.py
def get_offset(name): """ Return DateOffset object associated with rule name Examples -------- get_offset('EOM') --> BMonthEnd(1) """ if name not in libfreqs._dont_uppercase: name = name.upper() name = libfreqs._lite_rule_alias.get(name, name) name = libfreqs._lite_rule_alias.get(name.lower(), name) else: name = libfreqs._lite_rule_alias.get(name, name) if name not in _offset_map: try: split = name.split('-') klass = prefix_mapping[split[0]] # handles case where there's no suffix (and will TypeError if too # many '-') offset = klass._from_name(*split[1:]) except (ValueError, TypeError, KeyError): # bad prefix or suffix raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) # cache _offset_map[name] = offset return _offset_map[name]
def get_offset(name): """ Return DateOffset object associated with rule name Examples -------- get_offset('EOM') --> BMonthEnd(1) """ if name not in libfreqs._dont_uppercase: name = name.upper() name = libfreqs._lite_rule_alias.get(name, name) name = libfreqs._lite_rule_alias.get(name.lower(), name) else: name = libfreqs._lite_rule_alias.get(name, name) if name not in _offset_map: try: split = name.split('-') klass = prefix_mapping[split[0]] # handles case where there's no suffix (and will TypeError if too # many '-') offset = klass._from_name(*split[1:]) except (ValueError, TypeError, KeyError): # bad prefix or suffix raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) # cache _offset_map[name] = offset return _offset_map[name]
[ "Return", "DateOffset", "object", "associated", "with", "rule", "name" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L167-L195
[ "def", "get_offset", "(", "name", ")", ":", "if", "name", "not", "in", "libfreqs", ".", "_dont_uppercase", ":", "name", "=", "name", ".", "upper", "(", ")", "name", "=", "libfreqs", ".", "_lite_rule_alias", ".", "get", "(", "name", ",", "name", ")", "name", "=", "libfreqs", ".", "_lite_rule_alias", ".", "get", "(", "name", ".", "lower", "(", ")", ",", "name", ")", "else", ":", "name", "=", "libfreqs", ".", "_lite_rule_alias", ".", "get", "(", "name", ",", "name", ")", "if", "name", "not", "in", "_offset_map", ":", "try", ":", "split", "=", "name", ".", "split", "(", "'-'", ")", "klass", "=", "prefix_mapping", "[", "split", "[", "0", "]", "]", "# handles case where there's no suffix (and will TypeError if too", "# many '-')", "offset", "=", "klass", ".", "_from_name", "(", "*", "split", "[", "1", ":", "]", ")", "except", "(", "ValueError", ",", "TypeError", ",", "KeyError", ")", ":", "# bad prefix or suffix", "raise", "ValueError", "(", "libfreqs", ".", "INVALID_FREQ_ERR_MSG", ".", "format", "(", "name", ")", ")", "# cache", "_offset_map", "[", "name", "]", "=", "offset", "return", "_offset_map", "[", "name", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
infer_freq
Infer the most likely frequency given the input index. If the frequency is uncertain, a warning will be printed. Parameters ---------- index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) warn : boolean, default True Returns ------- str or None None if no discernible frequency TypeError if the index is not datetime-like ValueError if there are less than three values.
pandas/tseries/frequencies.py
def infer_freq(index, warn=True): """ Infer the most likely frequency given the input index. If the frequency is uncertain, a warning will be printed. Parameters ---------- index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) warn : boolean, default True Returns ------- str or None None if no discernible frequency TypeError if the index is not datetime-like ValueError if there are less than three values. """ import pandas as pd if isinstance(index, ABCSeries): values = index._values if not (is_datetime64_dtype(values) or is_timedelta64_dtype(values) or values.dtype == object): raise TypeError("cannot infer freq from a non-convertible dtype " "on a Series of {dtype}".format(dtype=index.dtype)) index = values if is_period_arraylike(index): raise TypeError("PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq.") elif is_timedelta64_dtype(index): # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index, warn=warn) return inferer.get_freq() if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex): if isinstance(index, (pd.Int64Index, pd.Float64Index)): raise TypeError("cannot infer freq from a non-convertible index " "type {type}".format(type=type(index))) index = index.values if not isinstance(index, pd.DatetimeIndex): try: index = pd.DatetimeIndex(index) except AmbiguousTimeError: index = pd.DatetimeIndex(index.asi8) inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq()
def infer_freq(index, warn=True): """ Infer the most likely frequency given the input index. If the frequency is uncertain, a warning will be printed. Parameters ---------- index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) warn : boolean, default True Returns ------- str or None None if no discernible frequency TypeError if the index is not datetime-like ValueError if there are less than three values. """ import pandas as pd if isinstance(index, ABCSeries): values = index._values if not (is_datetime64_dtype(values) or is_timedelta64_dtype(values) or values.dtype == object): raise TypeError("cannot infer freq from a non-convertible dtype " "on a Series of {dtype}".format(dtype=index.dtype)) index = values if is_period_arraylike(index): raise TypeError("PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq.") elif is_timedelta64_dtype(index): # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index, warn=warn) return inferer.get_freq() if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex): if isinstance(index, (pd.Int64Index, pd.Float64Index)): raise TypeError("cannot infer freq from a non-convertible index " "type {type}".format(type=type(index))) index = index.values if not isinstance(index, pd.DatetimeIndex): try: index = pd.DatetimeIndex(index) except AmbiguousTimeError: index = pd.DatetimeIndex(index.asi8) inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq()
[ "Infer", "the", "most", "likely", "frequency", "given", "the", "input", "index", ".", "If", "the", "frequency", "is", "uncertain", "a", "warning", "will", "be", "printed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L202-L252
[ "def", "infer_freq", "(", "index", ",", "warn", "=", "True", ")", ":", "import", "pandas", "as", "pd", "if", "isinstance", "(", "index", ",", "ABCSeries", ")", ":", "values", "=", "index", ".", "_values", "if", "not", "(", "is_datetime64_dtype", "(", "values", ")", "or", "is_timedelta64_dtype", "(", "values", ")", "or", "values", ".", "dtype", "==", "object", ")", ":", "raise", "TypeError", "(", "\"cannot infer freq from a non-convertible dtype \"", "\"on a Series of {dtype}\"", ".", "format", "(", "dtype", "=", "index", ".", "dtype", ")", ")", "index", "=", "values", "if", "is_period_arraylike", "(", "index", ")", ":", "raise", "TypeError", "(", "\"PeriodIndex given. Check the `freq` attribute \"", "\"instead of using infer_freq.\"", ")", "elif", "is_timedelta64_dtype", "(", "index", ")", ":", "# Allow TimedeltaIndex and TimedeltaArray", "inferer", "=", "_TimedeltaFrequencyInferer", "(", "index", ",", "warn", "=", "warn", ")", "return", "inferer", ".", "get_freq", "(", ")", "if", "isinstance", "(", "index", ",", "pd", ".", "Index", ")", "and", "not", "isinstance", "(", "index", ",", "pd", ".", "DatetimeIndex", ")", ":", "if", "isinstance", "(", "index", ",", "(", "pd", ".", "Int64Index", ",", "pd", ".", "Float64Index", ")", ")", ":", "raise", "TypeError", "(", "\"cannot infer freq from a non-convertible index \"", "\"type {type}\"", ".", "format", "(", "type", "=", "type", "(", "index", ")", ")", ")", "index", "=", "index", ".", "values", "if", "not", "isinstance", "(", "index", ",", "pd", ".", "DatetimeIndex", ")", ":", "try", ":", "index", "=", "pd", ".", "DatetimeIndex", "(", "index", ")", "except", "AmbiguousTimeError", ":", "index", "=", "pd", ".", "DatetimeIndex", "(", "index", ".", "asi8", ")", "inferer", "=", "_FrequencyInferer", "(", "index", ",", "warn", "=", "warn", ")", "return", "inferer", ".", "get_freq", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_FrequencyInferer.get_freq
Find the appropriate frequency string to describe the inferred frequency of self.values Returns ------- str or None
pandas/tseries/frequencies.py
def get_freq(self): """ Find the appropriate frequency string to describe the inferred frequency of self.values Returns ------- str or None """ if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return 'BH' # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 elif not self.is_unique_asi8: return None delta = self.deltas_asi8[0] if _is_multiple(delta, _ONE_HOUR): # Hours return _maybe_add_count('H', delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): # Minutes return _maybe_add_count('T', delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): # Seconds return _maybe_add_count('S', delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): # Milliseconds return _maybe_add_count('L', delta / _ONE_MILLI) elif _is_multiple(delta, _ONE_MICRO): # Microseconds return _maybe_add_count('U', delta / _ONE_MICRO) else: # Nanoseconds return _maybe_add_count('N', delta)
def get_freq(self): """ Find the appropriate frequency string to describe the inferred frequency of self.values Returns ------- str or None """ if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return 'BH' # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 elif not self.is_unique_asi8: return None delta = self.deltas_asi8[0] if _is_multiple(delta, _ONE_HOUR): # Hours return _maybe_add_count('H', delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): # Minutes return _maybe_add_count('T', delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): # Seconds return _maybe_add_count('S', delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): # Milliseconds return _maybe_add_count('L', delta / _ONE_MILLI) elif _is_multiple(delta, _ONE_MICRO): # Microseconds return _maybe_add_count('U', delta / _ONE_MICRO) else: # Nanoseconds return _maybe_add_count('N', delta)
[ "Find", "the", "appropriate", "frequency", "string", "to", "describe", "the", "inferred", "frequency", "of", "self", ".", "values" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L294-L337
[ "def", "get_freq", "(", "self", ")", ":", "if", "not", "self", ".", "is_monotonic", "or", "not", "self", ".", "index", ".", "_is_unique", ":", "return", "None", "delta", "=", "self", ".", "deltas", "[", "0", "]", "if", "_is_multiple", "(", "delta", ",", "_ONE_DAY", ")", ":", "return", "self", ".", "_infer_daily_rule", "(", ")", "# Business hourly, maybe. 17: one day / 65: one weekend", "if", "self", ".", "hour_deltas", "in", "(", "[", "1", ",", "17", "]", ",", "[", "1", ",", "65", "]", ",", "[", "1", ",", "17", ",", "65", "]", ")", ":", "return", "'BH'", "# Possibly intraday frequency. Here we use the", "# original .asi8 values as the modified values", "# will not work around DST transitions. See #8772", "elif", "not", "self", ".", "is_unique_asi8", ":", "return", "None", "delta", "=", "self", ".", "deltas_asi8", "[", "0", "]", "if", "_is_multiple", "(", "delta", ",", "_ONE_HOUR", ")", ":", "# Hours", "return", "_maybe_add_count", "(", "'H'", ",", "delta", "/", "_ONE_HOUR", ")", "elif", "_is_multiple", "(", "delta", ",", "_ONE_MINUTE", ")", ":", "# Minutes", "return", "_maybe_add_count", "(", "'T'", ",", "delta", "/", "_ONE_MINUTE", ")", "elif", "_is_multiple", "(", "delta", ",", "_ONE_SECOND", ")", ":", "# Seconds", "return", "_maybe_add_count", "(", "'S'", ",", "delta", "/", "_ONE_SECOND", ")", "elif", "_is_multiple", "(", "delta", ",", "_ONE_MILLI", ")", ":", "# Milliseconds", "return", "_maybe_add_count", "(", "'L'", ",", "delta", "/", "_ONE_MILLI", ")", "elif", "_is_multiple", "(", "delta", ",", "_ONE_MICRO", ")", ":", "# Microseconds", "return", "_maybe_add_count", "(", "'U'", ",", "delta", "/", "_ONE_MICRO", ")", "else", ":", "# Nanoseconds", "return", "_maybe_add_count", "(", "'N'", ",", "delta", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
load
load a pickle, with a provided encoding if compat is True: fake the old class hierarchy if it works, then return the new type objects Parameters ---------- fh : a filelike object encoding : an optional encoding is_verbose : show exception output
pandas/compat/pickle_compat.py
def load(fh, encoding=None, is_verbose=False): """load a pickle, with a provided encoding if compat is True: fake the old class hierarchy if it works, then return the new type objects Parameters ---------- fh : a filelike object encoding : an optional encoding is_verbose : show exception output """ try: fh.seek(0) if encoding is not None: up = Unpickler(fh, encoding=encoding) else: up = Unpickler(fh) up.is_verbose = is_verbose return up.load() except (ValueError, TypeError): raise
def load(fh, encoding=None, is_verbose=False): """load a pickle, with a provided encoding if compat is True: fake the old class hierarchy if it works, then return the new type objects Parameters ---------- fh : a filelike object encoding : an optional encoding is_verbose : show exception output """ try: fh.seek(0) if encoding is not None: up = Unpickler(fh, encoding=encoding) else: up = Unpickler(fh) up.is_verbose = is_verbose return up.load() except (ValueError, TypeError): raise
[ "load", "a", "pickle", "with", "a", "provided", "encoding" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/pickle_compat.py#L189-L213
[ "def", "load", "(", "fh", ",", "encoding", "=", "None", ",", "is_verbose", "=", "False", ")", ":", "try", ":", "fh", ".", "seek", "(", "0", ")", "if", "encoding", "is", "not", "None", ":", "up", "=", "Unpickler", "(", "fh", ",", "encoding", "=", "encoding", ")", "else", ":", "up", "=", "Unpickler", "(", "fh", ")", "up", ".", "is_verbose", "=", "is_verbose", "return", "up", ".", "load", "(", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_new_Index
This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__.
pandas/core/indexes/base.py
def _new_Index(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. """ # required for backward compat, because PI can't be instantiated with # ordinals through __new__ GH #13277 if issubclass(cls, ABCPeriodIndex): from pandas.core.indexes.period import _new_PeriodIndex return _new_PeriodIndex(cls, **d) return cls.__new__(cls, **d)
def _new_Index(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. """ # required for backward compat, because PI can't be instantiated with # ordinals through __new__ GH #13277 if issubclass(cls, ABCPeriodIndex): from pandas.core.indexes.period import _new_PeriodIndex return _new_PeriodIndex(cls, **d) return cls.__new__(cls, **d)
[ "This", "is", "called", "upon", "unpickling", "rather", "than", "the", "default", "which", "doesn", "t", "have", "arguments", "and", "breaks", "__new__", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L153-L163
[ "def", "_new_Index", "(", "cls", ",", "d", ")", ":", "# required for backward compat, because PI can't be instantiated with", "# ordinals through __new__ GH #13277", "if", "issubclass", "(", "cls", ",", "ABCPeriodIndex", ")", ":", "from", "pandas", ".", "core", ".", "indexes", ".", "period", "import", "_new_PeriodIndex", "return", "_new_PeriodIndex", "(", "cls", ",", "*", "*", "d", ")", "return", "cls", ".", "__new__", "(", "cls", ",", "*", "*", "d", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
ensure_index_from_sequences
Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- ensure_index
pandas/core/indexes/base.py
def ensure_index_from_sequences(sequences, names=None): """ Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- ensure_index """ from .multi import MultiIndex if len(sequences) == 1: if names is not None: names = names[0] return Index(sequences[0], name=names) else: return MultiIndex.from_arrays(sequences, names=names)
def ensure_index_from_sequences(sequences, names=None): """ Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- ensure_index """ from .multi import MultiIndex if len(sequences) == 1: if names is not None: names = names[0] return Index(sequences[0], name=names) else: return MultiIndex.from_arrays(sequences, names=names)
[ "Construct", "an", "index", "from", "sequences", "of", "data", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5277-L5315
[ "def", "ensure_index_from_sequences", "(", "sequences", ",", "names", "=", "None", ")", ":", "from", ".", "multi", "import", "MultiIndex", "if", "len", "(", "sequences", ")", "==", "1", ":", "if", "names", "is", "not", "None", ":", "names", "=", "names", "[", "0", "]", "return", "Index", "(", "sequences", "[", "0", "]", ",", "name", "=", "names", ")", "else", ":", "return", "MultiIndex", ".", "from_arrays", "(", "sequences", ",", "names", "=", "names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
ensure_index
Ensure that we have an index from some index-like object. Parameters ---------- index : sequence An Index or other sequence copy : bool Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], codes=[[0, 0], [0, 1]]) See Also -------- ensure_index_from_sequences
pandas/core/indexes/base.py
def ensure_index(index_like, copy=False): """ Ensure that we have an index from some index-like object. Parameters ---------- index : sequence An Index or other sequence copy : bool Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], codes=[[0, 0], [0, 1]]) See Also -------- ensure_index_from_sequences """ if isinstance(index_like, Index): if copy: index_like = index_like.copy() return index_like if hasattr(index_like, 'name'): return Index(index_like, name=index_like.name, copy=copy) if is_iterator(index_like): index_like = list(index_like) # must check for exactly list here because of strict type # check in clean_index_list if isinstance(index_like, list): if type(index_like) != list: index_like = list(index_like) converted, all_arrays = lib.clean_index_list(index_like) if len(converted) > 0 and all_arrays: from .multi import MultiIndex return MultiIndex.from_arrays(converted) else: index_like = converted else: # clean_index_list does the equivalent of copying # so only need to do this if not list instance if copy: from copy import copy index_like = copy(index_like) return Index(index_like)
def ensure_index(index_like, copy=False): """ Ensure that we have an index from some index-like object. Parameters ---------- index : sequence An Index or other sequence copy : bool Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], codes=[[0, 0], [0, 1]]) See Also -------- ensure_index_from_sequences """ if isinstance(index_like, Index): if copy: index_like = index_like.copy() return index_like if hasattr(index_like, 'name'): return Index(index_like, name=index_like.name, copy=copy) if is_iterator(index_like): index_like = list(index_like) # must check for exactly list here because of strict type # check in clean_index_list if isinstance(index_like, list): if type(index_like) != list: index_like = list(index_like) converted, all_arrays = lib.clean_index_list(index_like) if len(converted) > 0 and all_arrays: from .multi import MultiIndex return MultiIndex.from_arrays(converted) else: index_like = converted else: # clean_index_list does the equivalent of copying # so only need to do this if not list instance if copy: from copy import copy index_like = copy(index_like) return Index(index_like)
[ "Ensure", "that", "we", "have", "an", "index", "from", "some", "index", "-", "like", "object", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5318-L5378
[ "def", "ensure_index", "(", "index_like", ",", "copy", "=", "False", ")", ":", "if", "isinstance", "(", "index_like", ",", "Index", ")", ":", "if", "copy", ":", "index_like", "=", "index_like", ".", "copy", "(", ")", "return", "index_like", "if", "hasattr", "(", "index_like", ",", "'name'", ")", ":", "return", "Index", "(", "index_like", ",", "name", "=", "index_like", ".", "name", ",", "copy", "=", "copy", ")", "if", "is_iterator", "(", "index_like", ")", ":", "index_like", "=", "list", "(", "index_like", ")", "# must check for exactly list here because of strict type", "# check in clean_index_list", "if", "isinstance", "(", "index_like", ",", "list", ")", ":", "if", "type", "(", "index_like", ")", "!=", "list", ":", "index_like", "=", "list", "(", "index_like", ")", "converted", ",", "all_arrays", "=", "lib", ".", "clean_index_list", "(", "index_like", ")", "if", "len", "(", "converted", ")", ">", "0", "and", "all_arrays", ":", "from", ".", "multi", "import", "MultiIndex", "return", "MultiIndex", ".", "from_arrays", "(", "converted", ")", "else", ":", "index_like", "=", "converted", "else", ":", "# clean_index_list does the equivalent of copying", "# so only need to do this if not list instance", "if", "copy", ":", "from", "copy", "import", "copy", "index_like", "=", "copy", "(", "index_like", ")", "return", "Index", "(", "index_like", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_trim_front
Trims zeros and decimal points.
pandas/core/indexes/base.py
def _trim_front(strings): """ Trims zeros and decimal points. """ trimmed = strings while len(strings) > 0 and all(x[0] == ' ' for x in trimmed): trimmed = [x[1:] for x in trimmed] return trimmed
def _trim_front(strings): """ Trims zeros and decimal points. """ trimmed = strings while len(strings) > 0 and all(x[0] == ' ' for x in trimmed): trimmed = [x[1:] for x in trimmed] return trimmed
[ "Trims", "zeros", "and", "decimal", "points", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5393-L5400
[ "def", "_trim_front", "(", "strings", ")", ":", "trimmed", "=", "strings", "while", "len", "(", "strings", ")", ">", "0", "and", "all", "(", "x", "[", "0", "]", "==", "' '", "for", "x", "in", "trimmed", ")", ":", "trimmed", "=", "[", "x", "[", "1", ":", "]", "for", "x", "in", "trimmed", "]", "return", "trimmed" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._simple_new
We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse.
pandas/core/indexes/base.py
def _simple_new(cls, values, name=None, dtype=None, **kwargs): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ if not hasattr(values, 'dtype'): if (values is None or not len(values)) and dtype is not None: values = np.empty(0, dtype=dtype) else: values = np.array(values, copy=False) if is_object_dtype(values): values = cls(values, name=name, dtype=dtype, **kwargs)._ndarray_values if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, # which should be always at this point. values = np.asarray(values._values) result = object.__new__(cls) result._data = values # _index_data is a (temporary?) fix to ensure that the direct data # manipulation we do in `_libs/reduction.pyx` continues to work. # We need access to the actual ndarray, since we're messing with # data buffers and strides. We don't re-use `_ndarray_values`, since # we actually set this value too. result._index_data = values result.name = name for k, v in kwargs.items(): setattr(result, k, v) return result._reset_identity()
def _simple_new(cls, values, name=None, dtype=None, **kwargs): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ if not hasattr(values, 'dtype'): if (values is None or not len(values)) and dtype is not None: values = np.empty(0, dtype=dtype) else: values = np.array(values, copy=False) if is_object_dtype(values): values = cls(values, name=name, dtype=dtype, **kwargs)._ndarray_values if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, # which should be always at this point. values = np.asarray(values._values) result = object.__new__(cls) result._data = values # _index_data is a (temporary?) fix to ensure that the direct data # manipulation we do in `_libs/reduction.pyx` continues to work. # We need access to the actual ndarray, since we're messing with # data buffers and strides. We don't re-use `_ndarray_values`, since # we actually set this value too. result._index_data = values result.name = name for k, v in kwargs.items(): setattr(result, k, v) return result._reset_identity()
[ "We", "require", "that", "we", "have", "a", "dtype", "compat", "for", "the", "values", ".", "If", "we", "are", "passed", "a", "non", "-", "dtype", "compat", "then", "coerce", "using", "the", "constructor", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L506-L539
[ "def", "_simple_new", "(", "cls", ",", "values", ",", "name", "=", "None", ",", "dtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "values", ",", "'dtype'", ")", ":", "if", "(", "values", "is", "None", "or", "not", "len", "(", "values", ")", ")", "and", "dtype", "is", "not", "None", ":", "values", "=", "np", ".", "empty", "(", "0", ",", "dtype", "=", "dtype", ")", "else", ":", "values", "=", "np", ".", "array", "(", "values", ",", "copy", "=", "False", ")", "if", "is_object_dtype", "(", "values", ")", ":", "values", "=", "cls", "(", "values", ",", "name", "=", "name", ",", "dtype", "=", "dtype", ",", "*", "*", "kwargs", ")", ".", "_ndarray_values", "if", "isinstance", "(", "values", ",", "(", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "# Index._data must always be an ndarray.", "# This is no-copy for when _values is an ndarray,", "# which should be always at this point.", "values", "=", "np", ".", "asarray", "(", "values", ".", "_values", ")", "result", "=", "object", ".", "__new__", "(", "cls", ")", "result", ".", "_data", "=", "values", "# _index_data is a (temporary?) fix to ensure that the direct data", "# manipulation we do in `_libs/reduction.pyx` continues to work.", "# We need access to the actual ndarray, since we're messing with", "# data buffers and strides. We don't re-use `_ndarray_values`, since", "# we actually set this value too.", "result", ".", "_index_data", "=", "values", "result", ".", "name", "=", "name", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "result", ",", "k", ",", "v", ")", "return", "result", ".", "_reset_identity", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._shallow_copy_with_infer
Create a new Index inferring the class with passed value, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional kwargs : updates the default attributes for this Index
pandas/core/indexes/base.py
def _shallow_copy_with_infer(self, values, **kwargs): """ Create a new Index inferring the class with passed value, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional kwargs : updates the default attributes for this Index """ attributes = self._get_attributes_dict() attributes.update(kwargs) attributes['copy'] = False if not len(values) and 'dtype' not in kwargs: attributes['dtype'] = self.dtype if self._infer_as_myclass: try: return self._constructor(values, **attributes) except (TypeError, ValueError): pass return Index(values, **attributes)
def _shallow_copy_with_infer(self, values, **kwargs): """ Create a new Index inferring the class with passed value, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional kwargs : updates the default attributes for this Index """ attributes = self._get_attributes_dict() attributes.update(kwargs) attributes['copy'] = False if not len(values) and 'dtype' not in kwargs: attributes['dtype'] = self.dtype if self._infer_as_myclass: try: return self._constructor(values, **attributes) except (TypeError, ValueError): pass return Index(values, **attributes)
[ "Create", "a", "new", "Index", "inferring", "the", "class", "with", "passed", "value", "don", "t", "copy", "the", "data", "use", "the", "same", "object", "attributes", "with", "passed", "in", "attributes", "taking", "precedence", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L585-L608
[ "def", "_shallow_copy_with_infer", "(", "self", ",", "values", ",", "*", "*", "kwargs", ")", ":", "attributes", "=", "self", ".", "_get_attributes_dict", "(", ")", "attributes", ".", "update", "(", "kwargs", ")", "attributes", "[", "'copy'", "]", "=", "False", "if", "not", "len", "(", "values", ")", "and", "'dtype'", "not", "in", "kwargs", ":", "attributes", "[", "'dtype'", "]", "=", "self", ".", "dtype", "if", "self", ".", "_infer_as_myclass", ":", "try", ":", "return", "self", ".", "_constructor", "(", "values", ",", "*", "*", "attributes", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "return", "Index", "(", "values", ",", "*", "*", "attributes", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.is_
More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object other object to compare against. Returns ------- True if both have same underlying data, False otherwise : bool
pandas/core/indexes/base.py
def is_(self, other): """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object other object to compare against. Returns ------- True if both have same underlying data, False otherwise : bool """ # use something other than None to be clearer return self._id is getattr( other, '_id', Ellipsis) and self._id is not None
def is_(self, other): """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object other object to compare against. Returns ------- True if both have same underlying data, False otherwise : bool """ # use something other than None to be clearer return self._id is getattr( other, '_id', Ellipsis) and self._id is not None
[ "More", "flexible", "faster", "check", "like", "is", "but", "that", "works", "through", "views", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L614-L632
[ "def", "is_", "(", "self", ",", "other", ")", ":", "# use something other than None to be clearer", "return", "self", ".", "_id", "is", "getattr", "(", "other", ",", "'_id'", ",", "Ellipsis", ")", "and", "self", ".", "_id", "is", "not", "None" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._assert_take_fillable
Internal method to handle NA filling of take.
pandas/core/indexes/base.py
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take. """ indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = algos.take(values, indices, allow_fill=allow_fill, fill_value=na_value) else: taken = values.take(indices) return taken
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take. """ indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = algos.take(values, indices, allow_fill=allow_fill, fill_value=na_value) else: taken = values.take(indices) return taken
[ "Internal", "method", "to", "handle", "NA", "filling", "of", "take", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L803-L822
[ "def", "_assert_take_fillable", "(", "self", ",", "values", ",", "indices", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "None", ",", "na_value", "=", "np", ".", "nan", ")", ":", "indices", "=", "ensure_platform_int", "(", "indices", ")", "# only fill if we are passing a non-None fill_value", "if", "allow_fill", "and", "fill_value", "is", "not", "None", ":", "if", "(", "indices", "<", "-", "1", ")", ".", "any", "(", ")", ":", "msg", "=", "(", "'When allow_fill=True and fill_value is not None, '", "'all indices must be >= -1'", ")", "raise", "ValueError", "(", "msg", ")", "taken", "=", "algos", ".", "take", "(", "values", ",", "indices", ",", "allow_fill", "=", "allow_fill", ",", "fill_value", "=", "na_value", ")", "else", ":", "taken", "=", "values", ".", "take", "(", "indices", ")", "return", "taken" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._format_data
Return the formatted data as a unicode string.
pandas/core/indexes/base.py
def _format_data(self, name=None): """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = not (self.inferred_type in ('string', 'unicode') or (self.inferred_type == 'categorical' and is_object_dtype(self.categories))) return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name)
def _format_data(self, name=None): """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = not (self.inferred_type in ('string', 'unicode') or (self.inferred_type == 'categorical' and is_object_dtype(self.categories))) return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name)
[ "Return", "the", "formatted", "data", "as", "a", "unicode", "string", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L958-L969
[ "def", "_format_data", "(", "self", ",", "name", "=", "None", ")", ":", "# do we want to justify (only do so for non-objects)", "is_justify", "=", "not", "(", "self", ".", "inferred_type", "in", "(", "'string'", ",", "'unicode'", ")", "or", "(", "self", ".", "inferred_type", "==", "'categorical'", "and", "is_object_dtype", "(", "self", ".", "categories", ")", ")", ")", "return", "format_object_summary", "(", "self", ",", "self", ".", "_formatter_func", ",", "is_justify", "=", "is_justify", ",", "name", "=", "name", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.format
Render a string representation of the Index.
pandas/core/indexes/base.py
def format(self, name=False, formatter=None, **kwargs): """ Render a string representation of the Index. """ header = [] if name: header.append(pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if self.name is not None else '') if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, **kwargs)
def format(self, name=False, formatter=None, **kwargs): """ Render a string representation of the Index. """ header = [] if name: header.append(pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if self.name is not None else '') if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, **kwargs)
[ "Render", "a", "string", "representation", "of", "the", "Index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L981-L994
[ "def", "format", "(", "self", ",", "name", "=", "False", ",", "formatter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "header", "=", "[", "]", "if", "name", ":", "header", ".", "append", "(", "pprint_thing", "(", "self", ".", "name", ",", "escape_chars", "=", "(", "'\\t'", ",", "'\\r'", ",", "'\\n'", ")", ")", "if", "self", ".", "name", "is", "not", "None", "else", "''", ")", "if", "formatter", "is", "not", "None", ":", "return", "header", "+", "list", "(", "self", ".", "map", "(", "formatter", ")", ")", "return", "self", ".", "_format_with_header", "(", "header", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.to_native_types
Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values
pandas/core/indexes/base.py
def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values """ values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs)
def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values """ values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs)
[ "Format", "specified", "values", "of", "self", "and", "return", "them", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1022-L1046
[ "def", "to_native_types", "(", "self", ",", "slicer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "values", "=", "self", "if", "slicer", "is", "not", "None", ":", "values", "=", "values", "[", "slicer", "]", "return", "values", ".", "_format_native_types", "(", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._format_native_types
Actually format specific types of the index.
pandas/core/indexes/base.py
def _format_native_types(self, na_rep='', quoting=None, **kwargs): """ Actually format specific types of the index. """ mask = isna(self) if not self.is_object() and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values
def _format_native_types(self, na_rep='', quoting=None, **kwargs): """ Actually format specific types of the index. """ mask = isna(self) if not self.is_object() and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values
[ "Actually", "format", "specific", "types", "of", "the", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1048-L1059
[ "def", "_format_native_types", "(", "self", ",", "na_rep", "=", "''", ",", "quoting", "=", "None", ",", "*", "*", "kwargs", ")", ":", "mask", "=", "isna", "(", "self", ")", "if", "not", "self", ".", "is_object", "(", ")", "and", "not", "quoting", ":", "values", "=", "np", ".", "asarray", "(", "self", ")", ".", "astype", "(", "str", ")", "else", ":", "values", "=", "np", ".", "array", "(", "self", ",", "dtype", "=", "object", ",", "copy", "=", "True", ")", "values", "[", "mask", "]", "=", "na_rep", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._summary
Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index
pandas/core/indexes/base.py
def _summary(self, name=None): """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, 'format') and not isinstance(head, str): head = head.format() tail = self[-1] if hasattr(tail, 'format') and not isinstance(tail, str): tail = tail.format() index_summary = ', %s to %s' % (pprint_thing(head), pprint_thing(tail)) else: index_summary = '' if name is None: name = type(self).__name__ return '%s: %s entries%s' % (name, len(self), index_summary)
def _summary(self, name=None): """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, 'format') and not isinstance(head, str): head = head.format() tail = self[-1] if hasattr(tail, 'format') and not isinstance(tail, str): tail = tail.format() index_summary = ', %s to %s' % (pprint_thing(head), pprint_thing(tail)) else: index_summary = '' if name is None: name = type(self).__name__ return '%s: %s entries%s' % (name, len(self), index_summary)
[ "Return", "a", "summarized", "representation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1061-L1088
[ "def", "_summary", "(", "self", ",", "name", "=", "None", ")", ":", "if", "len", "(", "self", ")", ">", "0", ":", "head", "=", "self", "[", "0", "]", "if", "hasattr", "(", "head", ",", "'format'", ")", "and", "not", "isinstance", "(", "head", ",", "str", ")", ":", "head", "=", "head", ".", "format", "(", ")", "tail", "=", "self", "[", "-", "1", "]", "if", "hasattr", "(", "tail", ",", "'format'", ")", "and", "not", "isinstance", "(", "tail", ",", "str", ")", ":", "tail", "=", "tail", ".", "format", "(", ")", "index_summary", "=", "', %s to %s'", "%", "(", "pprint_thing", "(", "head", ")", ",", "pprint_thing", "(", "tail", ")", ")", "else", ":", "index_summary", "=", "''", "if", "name", "is", "None", ":", "name", "=", "type", "(", "self", ")", ".", "__name__", "return", "'%s: %s entries%s'", "%", "(", "name", ",", "len", "(", "self", ")", ",", "index_summary", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.summary
Return a summarized representation. .. deprecated:: 0.23.0
pandas/core/indexes/base.py
def summary(self, name=None): """ Return a summarized representation. .. deprecated:: 0.23.0 """ warnings.warn("'summary' is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=2) return self._summary(name)
def summary(self, name=None): """ Return a summarized representation. .. deprecated:: 0.23.0 """ warnings.warn("'summary' is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=2) return self._summary(name)
[ "Return", "a", "summarized", "representation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1090-L1098
[ "def", "summary", "(", "self", ",", "name", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"'summary' is deprecated and will be removed in a \"", "\"future version.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_summary", "(", "name", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.to_series
Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values.
pandas/core/indexes/base.py
def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. """ from pandas import Series if index is None: index = self._shallow_copy() if name is None: name = self.name return Series(self.values.copy(), index=index, name=name)
def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. """ from pandas import Series if index is None: index = self._shallow_copy() if name is None: name = self.name return Series(self.values.copy(), index=index, name=name)
[ "Create", "a", "Series", "with", "both", "index", "and", "values", "equal", "to", "the", "index", "keys", "useful", "with", "map", "for", "returning", "an", "indexer", "based", "on", "an", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1123-L1148
[ "def", "to_series", "(", "self", ",", "index", "=", "None", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "Series", "if", "index", "is", "None", ":", "index", "=", "self", ".", "_shallow_copy", "(", ")", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "return", "Series", "(", "self", ".", "values", ".", "copy", "(", ")", ",", "index", "=", "index", ",", "name", "=", "name", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.to_frame
Create a DataFrame with a column containing the Index. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index. name : object, default None The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow
pandas/core/indexes/base.py
def to_frame(self, index=True, name=None): """ Create a DataFrame with a column containing the Index. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index. name : object, default None The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is None: name = self.name or 0 result = DataFrame({name: self._values.copy()}) if index: result.index = self return result
def to_frame(self, index=True, name=None): """ Create a DataFrame with a column containing the Index. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index. name : object, default None The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is None: name = self.name or 0 result = DataFrame({name: self._values.copy()}) if index: result.index = self return result
[ "Create", "a", "DataFrame", "with", "a", "column", "containing", "the", "Index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1150-L1209
[ "def", "to_frame", "(", "self", ",", "index", "=", "True", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "DataFrame", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "or", "0", "result", "=", "DataFrame", "(", "{", "name", ":", "self", ".", "_values", ".", "copy", "(", ")", "}", ")", "if", "index", ":", "result", ".", "index", "=", "self", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._validate_names
Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex.
pandas/core/indexes/base.py
def _validate_names(self, name=None, names=None, deep=False): """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") elif names is None and name is None: return deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") return names else: if not is_list_like(name): return [name] return name
def _validate_names(self, name=None, names=None, deep=False): """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") elif names is None and name is None: return deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") return names else: if not is_list_like(name): return [name] return name
[ "Handles", "the", "quirks", "of", "having", "a", "singular", "name", "parameter", "for", "general", "Index", "and", "plural", "names", "parameter", "for", "MultiIndex", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1214-L1231
[ "def", "_validate_names", "(", "self", ",", "name", "=", "None", ",", "names", "=", "None", ",", "deep", "=", "False", ")", ":", "from", "copy", "import", "deepcopy", "if", "names", "is", "not", "None", "and", "name", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Can only provide one of `names` and `name`\"", ")", "elif", "names", "is", "None", "and", "name", "is", "None", ":", "return", "deepcopy", "(", "self", ".", "names", ")", "if", "deep", "else", "self", ".", "names", "elif", "names", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "names", ")", ":", "raise", "TypeError", "(", "\"Must pass list-like as `names`.\"", ")", "return", "names", "else", ":", "if", "not", "is_list_like", "(", "name", ")", ":", "return", "[", "name", "]", "return", "name" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._set_names
Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable.
pandas/core/indexes/base.py
def _set_names(self, values, level=None): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError('Names must be a list-like') if len(values) != 1: raise ValueError('Length of new names must be 1, got %d' % len(values)) # GH 20527 # All items in 'name' need to be hashable: for name in values: if not is_hashable(name): raise TypeError('{}.name must be a hashable type' .format(self.__class__.__name__)) self.name = values[0]
def _set_names(self, values, level=None): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError('Names must be a list-like') if len(values) != 1: raise ValueError('Length of new names must be 1, got %d' % len(values)) # GH 20527 # All items in 'name' need to be hashable: for name in values: if not is_hashable(name): raise TypeError('{}.name must be a hashable type' .format(self.__class__.__name__)) self.name = values[0]
[ "Set", "new", "names", "on", "index", ".", "Each", "name", "has", "to", "be", "a", "hashable", "type", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1236-L1264
[ "def", "_set_names", "(", "self", ",", "values", ",", "level", "=", "None", ")", ":", "if", "not", "is_list_like", "(", "values", ")", ":", "raise", "ValueError", "(", "'Names must be a list-like'", ")", "if", "len", "(", "values", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Length of new names must be 1, got %d'", "%", "len", "(", "values", ")", ")", "# GH 20527", "# All items in 'name' need to be hashable:", "for", "name", "in", "values", ":", "if", "not", "is_hashable", "(", "name", ")", ":", "raise", "TypeError", "(", "'{}.name must be a hashable type'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "name", "=", "values", "[", "0", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.set_names
Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]]) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year'])
pandas/core/indexes/base.py
def set_names(self, names, level=None, inplace=False): """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]]) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError('Level must be None for non-MultiIndex') if level is not None and not is_list_like(level) and is_list_like( names): msg = "Names must be a string when a single level is provided." raise TypeError(msg) if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._shallow_copy() idx._set_names(names, level=level) if not inplace: return idx
def set_names(self, names, level=None, inplace=False): """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]]) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError('Level must be None for non-MultiIndex') if level is not None and not is_list_like(level) and is_list_like( names): msg = "Names must be a string when a single level is provided." raise TypeError(msg) if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._shallow_copy() idx._set_names(names, level=level) if not inplace: return idx
[ "Set", "Index", "or", "MultiIndex", "name", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1268-L1340
[ "def", "set_names", "(", "self", ",", "names", ",", "level", "=", "None", ",", "inplace", "=", "False", ")", ":", "if", "level", "is", "not", "None", "and", "not", "isinstance", "(", "self", ",", "ABCMultiIndex", ")", ":", "raise", "ValueError", "(", "'Level must be None for non-MultiIndex'", ")", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", "and", "is_list_like", "(", "names", ")", ":", "msg", "=", "\"Names must be a string when a single level is provided.\"", "raise", "TypeError", "(", "msg", ")", "if", "not", "is_list_like", "(", "names", ")", "and", "level", "is", "None", "and", "self", ".", "nlevels", ">", "1", ":", "raise", "TypeError", "(", "\"Must pass list-like as `names`.\"", ")", "if", "not", "is_list_like", "(", "names", ")", ":", "names", "=", "[", "names", "]", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", ":", "level", "=", "[", "level", "]", "if", "inplace", ":", "idx", "=", "self", "else", ":", "idx", "=", "self", ".", "_shallow_copy", "(", ")", "idx", ".", "_set_names", "(", "names", ",", "level", "=", "level", ")", "if", "not", "inplace", ":", "return", "idx" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.rename
Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : boolean, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`.
pandas/core/indexes/base.py
def rename(self, name, inplace=False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : boolean, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace)
def rename(self, name, inplace=False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : boolean, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace)
[ "Alter", "Index", "or", "MultiIndex", "name", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1342-L1387
[ "def", "rename", "(", "self", ",", "name", ",", "inplace", "=", "False", ")", ":", "return", "self", ".", "set_names", "(", "[", "name", "]", ",", "inplace", "=", "inplace", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._validate_index_level
Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex.
pandas/core/indexes/base.py
def _validate_index_level(self, level): """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError("Too many levels: Index has only 1 level," " %d is not a valid level number" % (level, )) elif level > 0: raise IndexError("Too many levels:" " Index has only 1 level, not %d" % (level + 1)) elif level != self.name: raise KeyError('Level %s must be same as name (%s)' % (level, self.name))
def _validate_index_level(self, level): """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError("Too many levels: Index has only 1 level," " %d is not a valid level number" % (level, )) elif level > 0: raise IndexError("Too many levels:" " Index has only 1 level, not %d" % (level + 1)) elif level != self.name: raise KeyError('Level %s must be same as name (%s)' % (level, self.name))
[ "Validate", "index", "level", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1402-L1420
[ "def", "_validate_index_level", "(", "self", ",", "level", ")", ":", "if", "isinstance", "(", "level", ",", "int", ")", ":", "if", "level", "<", "0", "and", "level", "!=", "-", "1", ":", "raise", "IndexError", "(", "\"Too many levels: Index has only 1 level,\"", "\" %d is not a valid level number\"", "%", "(", "level", ",", ")", ")", "elif", "level", ">", "0", ":", "raise", "IndexError", "(", "\"Too many levels:\"", "\" Index has only 1 level, not %d\"", "%", "(", "level", "+", "1", ")", ")", "elif", "level", "!=", "self", ".", "name", ":", "raise", "KeyError", "(", "'Level %s must be same as name (%s)'", "%", "(", "level", ",", "self", ".", "name", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.sortlevel
For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index
pandas/core/indexes/base.py
def sortlevel(self, level=None, ascending=True, sort_remaining=None): """ For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ return self.sort_values(return_indexer=True, ascending=ascending)
def sortlevel(self, level=None, ascending=True, sort_remaining=None): """ For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ return self.sort_values(return_indexer=True, ascending=ascending)
[ "For", "internal", "compatibility", "with", "with", "the", "Index", "API", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1426-L1443
[ "def", "sortlevel", "(", "self", ",", "level", "=", "None", ",", "ascending", "=", "True", ",", "sort_remaining", "=", "None", ")", ":", "return", "self", ".", "sort_values", "(", "return_indexer", "=", "True", ",", "ascending", "=", "ascending", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.droplevel
Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex
pandas/core/indexes/base.py
def droplevel(self, level=0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] if len(level) == 0: return self if len(level) >= self.nlevels: raise ValueError("Cannot remove {} levels from an index with {} " "levels: at least one level must be " "left.".format(len(level), self.nlevels)) # The two checks above guarantee that here self is a MultiIndex new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result.name = new_names[0] return result else: from .multi import MultiIndex return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
def droplevel(self, level=0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] if len(level) == 0: return self if len(level) >= self.nlevels: raise ValueError("Cannot remove {} levels from an index with {} " "levels: at least one level must be " "left.".format(len(level), self.nlevels)) # The two checks above guarantee that here self is a MultiIndex new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result.name = new_names[0] return result else: from .multi import MultiIndex return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "Return", "index", "with", "requested", "level", "(", "s", ")", "removed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1487-L1541
[ "def", "droplevel", "(", "self", ",", "level", "=", "0", ")", ":", "if", "not", "isinstance", "(", "level", ",", "(", "tuple", ",", "list", ")", ")", ":", "level", "=", "[", "level", "]", "levnums", "=", "sorted", "(", "self", ".", "_get_level_number", "(", "lev", ")", "for", "lev", "in", "level", ")", "[", ":", ":", "-", "1", "]", "if", "len", "(", "level", ")", "==", "0", ":", "return", "self", "if", "len", "(", "level", ")", ">=", "self", ".", "nlevels", ":", "raise", "ValueError", "(", "\"Cannot remove {} levels from an index with {} \"", "\"levels: at least one level must be \"", "\"left.\"", ".", "format", "(", "len", "(", "level", ")", ",", "self", ".", "nlevels", ")", ")", "# The two checks above guarantee that here self is a MultiIndex", "new_levels", "=", "list", "(", "self", ".", "levels", ")", "new_codes", "=", "list", "(", "self", ".", "codes", ")", "new_names", "=", "list", "(", "self", ".", "names", ")", "for", "i", "in", "levnums", ":", "new_levels", ".", "pop", "(", "i", ")", "new_codes", ".", "pop", "(", "i", ")", "new_names", ".", "pop", "(", "i", ")", "if", "len", "(", "new_levels", ")", "==", "1", ":", "# set nan if needed", "mask", "=", "new_codes", "[", "0", "]", "==", "-", "1", "result", "=", "new_levels", "[", "0", "]", ".", "take", "(", "new_codes", "[", "0", "]", ")", "if", "mask", ".", "any", "(", ")", ":", "result", "=", "result", ".", "putmask", "(", "mask", ",", "np", ".", "nan", ")", "result", ".", "name", "=", "new_names", "[", "0", "]", "return", "result", "else", ":", "from", ".", "multi", "import", "MultiIndex", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._isnan
Return if each value is NaN.
pandas/core/indexes/base.py
def _isnan(self): """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values
def _isnan(self): """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values
[ "Return", "if", "each", "value", "is", "NaN", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1782-L1792
[ "def", "_isnan", "(", "self", ")", ":", "if", "self", ".", "_can_hold_na", ":", "return", "isna", "(", "self", ")", "else", ":", "# shouldn't reach to this condition by checking hasnans beforehand", "values", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "values", ".", "fill", "(", "False", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.get_duplicates
Extract duplicated index elements. .. deprecated:: 0.23.0 Use idx[idx.duplicated()].unique() instead Returns a sorted list of index elements which appear more than once in the index. Returns ------- array-like List of duplicated indexes. See Also -------- Index.duplicated : Return boolean array denoting duplicates. Index.drop_duplicates : Return Index with duplicates removed. Examples -------- Works on different Index of types. >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP [2, 3] Note that for a DatetimeIndex, it does not return a list but a new DatetimeIndex: >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03', ... '2018-01-03', '2018-01-04', '2018-01-04'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', freq=None) Sorts duplicated elements even when indexes are unordered. >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP [2, 3] Return empty array-like structure when all elements are unique. >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP [] >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex([], dtype='datetime64[ns]', freq=None)
pandas/core/indexes/base.py
def get_duplicates(self): """ Extract duplicated index elements. .. deprecated:: 0.23.0 Use idx[idx.duplicated()].unique() instead Returns a sorted list of index elements which appear more than once in the index. Returns ------- array-like List of duplicated indexes. See Also -------- Index.duplicated : Return boolean array denoting duplicates. Index.drop_duplicates : Return Index with duplicates removed. Examples -------- Works on different Index of types. >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP [2, 3] Note that for a DatetimeIndex, it does not return a list but a new DatetimeIndex: >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03', ... '2018-01-03', '2018-01-04', '2018-01-04'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', freq=None) Sorts duplicated elements even when indexes are unordered. >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP [2, 3] Return empty array-like structure when all elements are unique. >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP [] >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex([], dtype='datetime64[ns]', freq=None) """ warnings.warn("'get_duplicates' is deprecated and will be removed in " "a future release. You can use " "idx[idx.duplicated()].unique() instead", FutureWarning, stacklevel=2) return self[self.duplicated()].unique()
def get_duplicates(self): """ Extract duplicated index elements. .. deprecated:: 0.23.0 Use idx[idx.duplicated()].unique() instead Returns a sorted list of index elements which appear more than once in the index. Returns ------- array-like List of duplicated indexes. See Also -------- Index.duplicated : Return boolean array denoting duplicates. Index.drop_duplicates : Return Index with duplicates removed. Examples -------- Works on different Index of types. >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP [2, 3] Note that for a DatetimeIndex, it does not return a list but a new DatetimeIndex: >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03', ... '2018-01-03', '2018-01-04', '2018-01-04'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', freq=None) Sorts duplicated elements even when indexes are unordered. >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP [2, 3] Return empty array-like structure when all elements are unique. >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP [] >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex([], dtype='datetime64[ns]', freq=None) """ warnings.warn("'get_duplicates' is deprecated and will be removed in " "a future release. You can use " "idx[idx.duplicated()].unique() instead", FutureWarning, stacklevel=2) return self[self.duplicated()].unique()
[ "Extract", "duplicated", "index", "elements", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2105-L2162
[ "def", "get_duplicates", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"'get_duplicates' is deprecated and will be removed in \"", "\"a future release. You can use \"", "\"idx[idx.duplicated()].unique() instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", "[", "self", ".", "duplicated", "(", ")", "]", ".", "unique", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._get_unique_index
Returns an index containing unique values. Parameters ---------- dropna : bool If True, NaN values are dropped. Returns ------- uniques : index
pandas/core/indexes/base.py
def _get_unique_index(self, dropna=False): """ Returns an index containing unique values. Parameters ---------- dropna : bool If True, NaN values are dropped. Returns ------- uniques : index """ if self.is_unique and not dropna: return self values = self.values if not self.is_unique: values = self.unique() if dropna: try: if self.hasnans: values = values[~isna(values)] except NotImplementedError: pass return self._shallow_copy(values)
def _get_unique_index(self, dropna=False): """ Returns an index containing unique values. Parameters ---------- dropna : bool If True, NaN values are dropped. Returns ------- uniques : index """ if self.is_unique and not dropna: return self values = self.values if not self.is_unique: values = self.unique() if dropna: try: if self.hasnans: values = values[~isna(values)] except NotImplementedError: pass return self._shallow_copy(values)
[ "Returns", "an", "index", "containing", "unique", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2164-L2192
[ "def", "_get_unique_index", "(", "self", ",", "dropna", "=", "False", ")", ":", "if", "self", ".", "is_unique", "and", "not", "dropna", ":", "return", "self", "values", "=", "self", ".", "values", "if", "not", "self", ".", "is_unique", ":", "values", "=", "self", ".", "unique", "(", ")", "if", "dropna", ":", "try", ":", "if", "self", ".", "hasnans", ":", "values", "=", "values", "[", "~", "isna", "(", "values", ")", "]", "except", "NotImplementedError", ":", "pass", "return", "self", ".", "_shallow_copy", "(", "values", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._get_reconciled_name_object
If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self.
pandas/core/indexes/base.py
def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name != name: return self._shallow_copy(name=name) return self
def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name != name: return self._shallow_copy(name=name) return self
[ "If", "the", "result", "of", "a", "set", "operation", "will", "be", "self", "return", "self", "unless", "the", "name", "changes", "in", "which", "case", "make", "a", "shallow", "copy", "of", "self", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2234-L2243
[ "def", "_get_reconciled_name_object", "(", "self", ",", "other", ")", ":", "name", "=", "get_op_result_name", "(", "self", ",", "other", ")", "if", "self", ".", "name", "!=", "name", ":", "return", "self", ".", "_shallow_copy", "(", "name", "=", "name", ")", "return", "self" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.union
Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
pandas/core/indexes/base.py
def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_reconciled_name_object(other) if len(self) == 0: return other._get_reconciled_name_object(self) # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) # 2. CategoricalIndex lacking setops (GH #10186) # Once those are fixed, this workaround can be removed if not is_dtype_union_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self) or is_datetime64tz_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other) or is_datetime64tz_dtype(other): rvals = other._ndarray_values else: rvals = other._values if sort is None and self.is_monotonic and other.is_monotonic: try: result = self._outer_indexer(lvals, rvals)[0] except TypeError: # incomparable objects result = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) else: indexer = self.get_indexer(other) indexer, = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = algos.take_nd(rvals, indexer, allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) else: result = lvals if sort is None: try: result = sorting.safe_sort(result) except TypeError as e: warnings.warn("{}, sort order is undefined for " "incomparable objects".format(e), RuntimeWarning, stacklevel=3) # for subclasses return self._wrap_setop_result(other, result)
def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_reconciled_name_object(other) if len(self) == 0: return other._get_reconciled_name_object(self) # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) # 2. CategoricalIndex lacking setops (GH #10186) # Once those are fixed, this workaround can be removed if not is_dtype_union_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self) or is_datetime64tz_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other) or is_datetime64tz_dtype(other): rvals = other._ndarray_values else: rvals = other._values if sort is None and self.is_monotonic and other.is_monotonic: try: result = self._outer_indexer(lvals, rvals)[0] except TypeError: # incomparable objects result = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) else: indexer = self.get_indexer(other) indexer, = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = algos.take_nd(rvals, indexer, allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) else: result = lvals if sort is None: try: result = sorting.safe_sort(result) except TypeError as e: warnings.warn("{}, sort order is undefined for " "incomparable objects".format(e), RuntimeWarning, stacklevel=3) # for subclasses return self._wrap_setop_result(other, result)
[ "Form", "the", "union", "of", "two", "Index", "objects", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2250-L2348
[ "def", "union", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", "=", "ensure_index", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", "or", "self", ".", "equals", "(", "other", ")", ":", "return", "self", ".", "_get_reconciled_name_object", "(", "other", ")", "if", "len", "(", "self", ")", "==", "0", ":", "return", "other", ".", "_get_reconciled_name_object", "(", "self", ")", "# TODO: is_dtype_union_equal is a hack around", "# 1. buggy set ops with duplicates (GH #13432)", "# 2. CategoricalIndex lacking setops (GH #10186)", "# Once those are fixed, this workaround can be removed", "if", "not", "is_dtype_union_equal", "(", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ":", "this", "=", "self", ".", "astype", "(", "'O'", ")", "other", "=", "other", ".", "astype", "(", "'O'", ")", "return", "this", ".", "union", "(", "other", ",", "sort", "=", "sort", ")", "# TODO(EA): setops-refactor, clean all this up", "if", "is_period_dtype", "(", "self", ")", "or", "is_datetime64tz_dtype", "(", "self", ")", ":", "lvals", "=", "self", ".", "_ndarray_values", "else", ":", "lvals", "=", "self", ".", "_values", "if", "is_period_dtype", "(", "other", ")", "or", "is_datetime64tz_dtype", "(", "other", ")", ":", "rvals", "=", "other", ".", "_ndarray_values", "else", ":", "rvals", "=", "other", ".", "_values", "if", "sort", "is", "None", "and", "self", ".", "is_monotonic", "and", "other", ".", "is_monotonic", ":", "try", ":", "result", "=", "self", ".", "_outer_indexer", "(", "lvals", ",", "rvals", ")", "[", "0", "]", "except", "TypeError", ":", "# incomparable objects", "result", "=", "list", "(", "lvals", ")", "# worth making this faster? a very unusual case", "value_set", "=", "set", "(", "lvals", ")", "result", ".", "extend", "(", "[", "x", "for", "x", "in", "rvals", "if", "x", "not", "in", "value_set", "]", ")", "else", ":", "indexer", "=", "self", ".", "get_indexer", "(", "other", ")", "indexer", ",", "=", "(", "indexer", "==", "-", "1", ")", ".", "nonzero", "(", ")", "if", "len", "(", "indexer", ")", ">", "0", ":", "other_diff", "=", "algos", ".", "take_nd", "(", "rvals", ",", "indexer", ",", "allow_fill", "=", "False", ")", "result", "=", "_concat", ".", "_concat_compat", "(", "(", "lvals", ",", "other_diff", ")", ")", "else", ":", "result", "=", "lvals", "if", "sort", "is", "None", ":", "try", ":", "result", "=", "sorting", ".", "safe_sort", "(", "result", ")", "except", "TypeError", "as", "e", ":", "warnings", ".", "warn", "(", "\"{}, sort order is undefined for \"", "\"incomparable objects\"", ".", "format", "(", "e", ")", ",", "RuntimeWarning", ",", "stacklevel", "=", "3", ")", "# for subclasses", "return", "self", ".", "_wrap_setop_result", "(", "other", ",", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.intersection
Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match the behaviour of 0.23.4 and earlier. Returns ------- intersection : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Int64Index([3, 4], dtype='int64')
pandas/core/indexes/base.py
def intersection(self, other, sort=False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match the behaviour of 0.23.4 and earlier. Returns ------- intersection : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Int64Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if self.equals(other): return self._get_reconciled_name_object(other) if not is_dtype_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.intersection(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other): rvals = other._ndarray_values else: rvals = other._values if self.is_monotonic and other.is_monotonic: try: result = self._inner_indexer(lvals, rvals)[0] return self._wrap_setop_result(other, result) except TypeError: pass try: indexer = Index(rvals).get_indexer(lvals) indexer = indexer.take((indexer != -1).nonzero()[0]) except Exception: # duplicates indexer = algos.unique1d( Index(rvals).get_indexer_non_unique(lvals)[0]) indexer = indexer[indexer != -1] taken = other.take(indexer) if sort is None: taken = sorting.safe_sort(taken.values) if self.name != other.name: name = None else: name = self.name return self._shallow_copy(taken, name=name) if self.name != other.name: taken.name = None return taken
def intersection(self, other, sort=False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match the behaviour of 0.23.4 and earlier. Returns ------- intersection : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Int64Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if self.equals(other): return self._get_reconciled_name_object(other) if not is_dtype_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.intersection(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other): rvals = other._ndarray_values else: rvals = other._values if self.is_monotonic and other.is_monotonic: try: result = self._inner_indexer(lvals, rvals)[0] return self._wrap_setop_result(other, result) except TypeError: pass try: indexer = Index(rvals).get_indexer(lvals) indexer = indexer.take((indexer != -1).nonzero()[0]) except Exception: # duplicates indexer = algos.unique1d( Index(rvals).get_indexer_non_unique(lvals)[0]) indexer = indexer[indexer != -1] taken = other.take(indexer) if sort is None: taken = sorting.safe_sort(taken.values) if self.name != other.name: name = None else: name = self.name return self._shallow_copy(taken, name=name) if self.name != other.name: taken.name = None return taken
[ "Form", "the", "intersection", "of", "two", "Index", "objects", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2353-L2439
[ "def", "intersection", "(", "self", ",", "other", ",", "sort", "=", "False", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", "=", "ensure_index", "(", "other", ")", "if", "self", ".", "equals", "(", "other", ")", ":", "return", "self", ".", "_get_reconciled_name_object", "(", "other", ")", "if", "not", "is_dtype_equal", "(", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ":", "this", "=", "self", ".", "astype", "(", "'O'", ")", "other", "=", "other", ".", "astype", "(", "'O'", ")", "return", "this", ".", "intersection", "(", "other", ",", "sort", "=", "sort", ")", "# TODO(EA): setops-refactor, clean all this up", "if", "is_period_dtype", "(", "self", ")", ":", "lvals", "=", "self", ".", "_ndarray_values", "else", ":", "lvals", "=", "self", ".", "_values", "if", "is_period_dtype", "(", "other", ")", ":", "rvals", "=", "other", ".", "_ndarray_values", "else", ":", "rvals", "=", "other", ".", "_values", "if", "self", ".", "is_monotonic", "and", "other", ".", "is_monotonic", ":", "try", ":", "result", "=", "self", ".", "_inner_indexer", "(", "lvals", ",", "rvals", ")", "[", "0", "]", "return", "self", ".", "_wrap_setop_result", "(", "other", ",", "result", ")", "except", "TypeError", ":", "pass", "try", ":", "indexer", "=", "Index", "(", "rvals", ")", ".", "get_indexer", "(", "lvals", ")", "indexer", "=", "indexer", ".", "take", "(", "(", "indexer", "!=", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", ")", "except", "Exception", ":", "# duplicates", "indexer", "=", "algos", ".", "unique1d", "(", "Index", "(", "rvals", ")", ".", "get_indexer_non_unique", "(", "lvals", ")", "[", "0", "]", ")", "indexer", "=", "indexer", "[", "indexer", "!=", "-", "1", "]", "taken", "=", "other", ".", "take", "(", "indexer", ")", "if", "sort", "is", "None", ":", "taken", "=", "sorting", ".", "safe_sort", "(", "taken", ".", "values", ")", "if", "self", ".", "name", "!=", "other", ".", "name", ":", "name", "=", "None", "else", ":", "name", "=", "self", ".", "name", "return", "self", ".", "_shallow_copy", "(", "taken", ",", "name", "=", "name", ")", "if", "self", ".", "name", "!=", "other", ".", "name", ":", "taken", ".", "name", "=", "None", "return", "taken" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.difference
Return a new Index with elements from the index that are not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- difference : Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Int64Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64')
pandas/core/indexes/base.py
def difference(self, other, sort=None): """ Return a new Index with elements from the index that are not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- difference : Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Int64Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): # pass an empty np.ndarray with the appropriate dtype return self._shallow_copy(self._data[:0]) other, result_name = self._convert_can_do_setop(other) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff = this.values.take(label_diff) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass return this._shallow_copy(the_diff, name=result_name, freq=None)
def difference(self, other, sort=None): """ Return a new Index with elements from the index that are not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- difference : Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Int64Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): # pass an empty np.ndarray with the appropriate dtype return self._shallow_copy(self._data[:0]) other, result_name = self._convert_can_do_setop(other) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff = this.values.take(label_diff) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass return this._shallow_copy(the_diff, name=result_name, freq=None)
[ "Return", "a", "new", "Index", "with", "elements", "from", "the", "index", "that", "are", "not", "in", "other", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2441-L2504
[ "def", "difference", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "if", "self", ".", "equals", "(", "other", ")", ":", "# pass an empty np.ndarray with the appropriate dtype", "return", "self", ".", "_shallow_copy", "(", "self", ".", "_data", "[", ":", "0", "]", ")", "other", ",", "result_name", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "this", "=", "self", ".", "_get_unique_index", "(", ")", "indexer", "=", "this", ".", "get_indexer", "(", "other", ")", "indexer", "=", "indexer", ".", "take", "(", "(", "indexer", "!=", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", ")", "label_diff", "=", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "this", ".", "size", ")", ",", "indexer", ",", "assume_unique", "=", "True", ")", "the_diff", "=", "this", ".", "values", ".", "take", "(", "label_diff", ")", "if", "sort", "is", "None", ":", "try", ":", "the_diff", "=", "sorting", ".", "safe_sort", "(", "the_diff", ")", "except", "TypeError", ":", "pass", "return", "this", ".", "_shallow_copy", "(", "the_diff", ",", "name", "=", "result_name", ",", "freq", "=", "None", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.symmetric_difference
Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- symmetric_difference : Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Int64Index([1, 5], dtype='int64') You can also use the ``^`` operator: >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64')
pandas/core/indexes/base.py
def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- symmetric_difference : Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Int64Index([1, 5], dtype='int64') You can also use the ``^`` operator: >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update this = self._get_unique_index() other = other._get_unique_index() indexer = this.get_indexer(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d(np.arange(this.size), common_indexer, assume_unique=True) left_diff = this.values.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.values.take(right_indexer) the_diff = _concat._concat_compat([left_diff, right_diff]) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass attribs = self._get_attributes_dict() attribs['name'] = result_name if 'freq' in attribs: attribs['freq'] = None return self._shallow_copy_with_infer(the_diff, **attribs)
def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- symmetric_difference : Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Int64Index([1, 5], dtype='int64') You can also use the ``^`` operator: >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update this = self._get_unique_index() other = other._get_unique_index() indexer = this.get_indexer(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d(np.arange(this.size), common_indexer, assume_unique=True) left_diff = this.values.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.values.take(right_indexer) the_diff = _concat._concat_compat([left_diff, right_diff]) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass attribs = self._get_attributes_dict() attribs['name'] = result_name if 'freq' in attribs: attribs['freq'] = None return self._shallow_copy_with_infer(the_diff, **attribs)
[ "Compute", "the", "symmetric", "difference", "of", "two", "Index", "objects", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2506-L2584
[ "def", "symmetric_difference", "(", "self", ",", "other", ",", "result_name", "=", "None", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_name_update", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "result_name", "is", "None", ":", "result_name", "=", "result_name_update", "this", "=", "self", ".", "_get_unique_index", "(", ")", "other", "=", "other", ".", "_get_unique_index", "(", ")", "indexer", "=", "this", ".", "get_indexer", "(", "other", ")", "# {this} minus {other}", "common_indexer", "=", "indexer", ".", "take", "(", "(", "indexer", "!=", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", ")", "left_indexer", "=", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "this", ".", "size", ")", ",", "common_indexer", ",", "assume_unique", "=", "True", ")", "left_diff", "=", "this", ".", "values", ".", "take", "(", "left_indexer", ")", "# {other} minus {this}", "right_indexer", "=", "(", "indexer", "==", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", "right_diff", "=", "other", ".", "values", ".", "take", "(", "right_indexer", ")", "the_diff", "=", "_concat", ".", "_concat_compat", "(", "[", "left_diff", ",", "right_diff", "]", ")", "if", "sort", "is", "None", ":", "try", ":", "the_diff", "=", "sorting", ".", "safe_sort", "(", "the_diff", ")", "except", "TypeError", ":", "pass", "attribs", "=", "self", ".", "_get_attributes_dict", "(", ")", "attribs", "[", "'name'", "]", "=", "result_name", "if", "'freq'", "in", "attribs", ":", "attribs", "[", "'freq'", "]", "=", "None", "return", "self", ".", "_shallow_copy_with_infer", "(", "the_diff", ",", "*", "*", "attribs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._get_fill_indexer_searchsorted
Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets.
pandas/core/indexes/base.py
def _get_fill_indexer_searchsorted(self, target, method, limit=None): """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError('limit argument for %r method only well-defined ' 'if index and target are monotonic' % method) side = 'left' if method == 'pad' else 'right' # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = (indexer == -1) indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == 'left': # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None): """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError('limit argument for %r method only well-defined ' 'if index and target are monotonic' % method) side = 'left' if method == 'pad' else 'right' # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = (indexer == -1) indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == 'left': # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer
[ "Fallback", "pad", "/", "backfill", "get_indexer", "that", "works", "for", "monotonic", "decreasing", "indexes", "and", "non", "-", "monotonic", "targets", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2777-L2805
[ "def", "_get_fill_indexer_searchsorted", "(", "self", ",", "target", ",", "method", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "raise", "ValueError", "(", "'limit argument for %r method only well-defined '", "'if index and target are monotonic'", "%", "method", ")", "side", "=", "'left'", "if", "method", "==", "'pad'", "else", "'right'", "# find exact matches first (this simplifies the algorithm)", "indexer", "=", "self", ".", "get_indexer", "(", "target", ")", "nonexact", "=", "(", "indexer", "==", "-", "1", ")", "indexer", "[", "nonexact", "]", "=", "self", ".", "_searchsorted_monotonic", "(", "target", "[", "nonexact", "]", ",", "side", ")", "if", "side", "==", "'left'", ":", "# searchsorted returns \"indices into a sorted array such that,", "# if the corresponding elements in v were inserted before the", "# indices, the order of a would be preserved\".", "# Thus, we need to subtract 1 to find values to the left.", "indexer", "[", "nonexact", "]", "-=", "1", "# This also mapped not found values (values of 0 from", "# np.searchsorted) to -1, which conveniently is also our", "# sentinel for missing values", "else", ":", "# Mark indices to the right of the largest value as not found", "indexer", "[", "indexer", "==", "len", "(", "self", ")", "]", "=", "-", "1", "return", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._get_nearest_indexer
Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples).
pandas/core/indexes/base.py
def _get_nearest_indexer(self, target, limit, tolerance): """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ left_indexer = self.get_indexer(target, 'pad', limit=limit) right_indexer = self.get_indexer(target, 'backfill', limit=limit) target = np.asarray(target) left_distances = abs(self.values[left_indexer] - target) right_distances = abs(self.values[right_indexer] - target) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where(op(left_distances, right_distances) | (right_indexer == -1), left_indexer, right_indexer) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer
def _get_nearest_indexer(self, target, limit, tolerance): """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ left_indexer = self.get_indexer(target, 'pad', limit=limit) right_indexer = self.get_indexer(target, 'backfill', limit=limit) target = np.asarray(target) left_distances = abs(self.values[left_indexer] - target) right_distances = abs(self.values[right_indexer] - target) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where(op(left_distances, right_distances) | (right_indexer == -1), left_indexer, right_indexer) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer
[ "Get", "the", "indexer", "for", "the", "nearest", "index", "labels", ";", "requires", "an", "index", "with", "values", "that", "can", "be", "subtracted", "from", "each", "other", "(", "e", ".", "g", ".", "not", "strings", "or", "tuples", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2807-L2826
[ "def", "_get_nearest_indexer", "(", "self", ",", "target", ",", "limit", ",", "tolerance", ")", ":", "left_indexer", "=", "self", ".", "get_indexer", "(", "target", ",", "'pad'", ",", "limit", "=", "limit", ")", "right_indexer", "=", "self", ".", "get_indexer", "(", "target", ",", "'backfill'", ",", "limit", "=", "limit", ")", "target", "=", "np", ".", "asarray", "(", "target", ")", "left_distances", "=", "abs", "(", "self", ".", "values", "[", "left_indexer", "]", "-", "target", ")", "right_distances", "=", "abs", "(", "self", ".", "values", "[", "right_indexer", "]", "-", "target", ")", "op", "=", "operator", ".", "lt", "if", "self", ".", "is_monotonic_increasing", "else", "operator", ".", "le", "indexer", "=", "np", ".", "where", "(", "op", "(", "left_distances", ",", "right_distances", ")", "|", "(", "right_indexer", "==", "-", "1", ")", ",", "left_indexer", ",", "right_indexer", ")", "if", "tolerance", "is", "not", "None", ":", "indexer", "=", "self", ".", "_filter_indexer_tolerance", "(", "target", ",", "indexer", ",", "tolerance", ")", "return", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._convert_listlike_indexer
Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- indexer : numpy.ndarray or None Return an ndarray or None if cannot convert. keyarr : numpy.ndarray Return tuple-safe keys.
pandas/core/indexes/base.py
def _convert_listlike_indexer(self, keyarr, kind=None): """ Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- indexer : numpy.ndarray or None Return an ndarray or None if cannot convert. keyarr : numpy.ndarray Return tuple-safe keys. """ if isinstance(keyarr, Index): keyarr = self._convert_index_indexer(keyarr) else: keyarr = self._convert_arr_indexer(keyarr) indexer = self._convert_list_indexer(keyarr, kind=kind) return indexer, keyarr
def _convert_listlike_indexer(self, keyarr, kind=None): """ Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- indexer : numpy.ndarray or None Return an ndarray or None if cannot convert. keyarr : numpy.ndarray Return tuple-safe keys. """ if isinstance(keyarr, Index): keyarr = self._convert_index_indexer(keyarr) else: keyarr = self._convert_arr_indexer(keyarr) indexer = self._convert_list_indexer(keyarr, kind=kind) return indexer, keyarr
[ "Parameters", "----------", "keyarr", ":", "list", "-", "like", "Indexer", "to", "convert", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2961-L2981
[ "def", "_convert_listlike_indexer", "(", "self", ",", "keyarr", ",", "kind", "=", "None", ")", ":", "if", "isinstance", "(", "keyarr", ",", "Index", ")", ":", "keyarr", "=", "self", ".", "_convert_index_indexer", "(", "keyarr", ")", "else", ":", "keyarr", "=", "self", ".", "_convert_arr_indexer", "(", "keyarr", ")", "indexer", "=", "self", ".", "_convert_list_indexer", "(", "keyarr", ",", "kind", "=", "kind", ")", "return", "indexer", ",", "keyarr" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._invalid_indexer
Consistent invalid indexer message.
pandas/core/indexes/base.py
def _invalid_indexer(self, form, key): """ Consistent invalid indexer message. """ raise TypeError("cannot do {form} indexing on {klass} with these " "indexers [{key}] of {kind}".format( form=form, klass=type(self), key=key, kind=type(key)))
def _invalid_indexer(self, form, key): """ Consistent invalid indexer message. """ raise TypeError("cannot do {form} indexing on {klass} with these " "indexers [{key}] of {kind}".format( form=form, klass=type(self), key=key, kind=type(key)))
[ "Consistent", "invalid", "indexer", "message", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3057-L3064
[ "def", "_invalid_indexer", "(", "self", ",", "form", ",", "key", ")", ":", "raise", "TypeError", "(", "\"cannot do {form} indexing on {klass} with these \"", "\"indexers [{key}] of {kind}\"", ".", "format", "(", "form", "=", "form", ",", "klass", "=", "type", "(", "self", ")", ",", "key", "=", "key", ",", "kind", "=", "type", "(", "key", ")", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.reindex
Create index with target's values (move/add/delete values as necessary). Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index.
pandas/core/indexes/base.py
def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary). Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'name') # GH7774: preserve dtype/tz if target is empty and not an Index. target = _ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: attrs = self._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq values = self._data[:0] # appropriately-dtyped empty array target = self._simple_new(values, dtype=self.dtype, **attrs) else: target = ensure_index(target) if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') _, indexer, _ = self._join_level(target, level, how='right', return_indexers=True) else: if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: if method is not None or limit is not None: raise ValueError("cannot reindex a non-unique index " "with a method or limit") indexer, missing = self.get_indexer_non_unique(target) if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy() target.name = self.name return target, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary). Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'name') # GH7774: preserve dtype/tz if target is empty and not an Index. target = _ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: attrs = self._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq values = self._data[:0] # appropriately-dtyped empty array target = self._simple_new(values, dtype=self.dtype, **attrs) else: target = ensure_index(target) if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') _, indexer, _ = self._join_level(target, level, how='right', return_indexers=True) else: if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: if method is not None or limit is not None: raise ValueError("cannot reindex a non-unique index " "with a method or limit") indexer, missing = self.get_indexer_non_unique(target) if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy() target.name = self.name return target, indexer
[ "Create", "index", "with", "target", "s", "values", "(", "move", "/", "add", "/", "delete", "values", "as", "necessary", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3086-L3142
[ "def", "reindex", "(", "self", ",", "target", ",", "method", "=", "None", ",", "level", "=", "None", ",", "limit", "=", "None", ",", "tolerance", "=", "None", ")", ":", "# GH6552: preserve names when reindexing to non-named target", "# (i.e. neither Index nor Series).", "preserve_names", "=", "not", "hasattr", "(", "target", ",", "'name'", ")", "# GH7774: preserve dtype/tz if target is empty and not an Index.", "target", "=", "_ensure_has_len", "(", "target", ")", "# target may be an iterator", "if", "not", "isinstance", "(", "target", ",", "Index", ")", "and", "len", "(", "target", ")", "==", "0", ":", "attrs", "=", "self", ".", "_get_attributes_dict", "(", ")", "attrs", ".", "pop", "(", "'freq'", ",", "None", ")", "# don't preserve freq", "values", "=", "self", ".", "_data", "[", ":", "0", "]", "# appropriately-dtyped empty array", "target", "=", "self", ".", "_simple_new", "(", "values", ",", "dtype", "=", "self", ".", "dtype", ",", "*", "*", "attrs", ")", "else", ":", "target", "=", "ensure_index", "(", "target", ")", "if", "level", "is", "not", "None", ":", "if", "method", "is", "not", "None", ":", "raise", "TypeError", "(", "'Fill method not supported if level passed'", ")", "_", ",", "indexer", ",", "_", "=", "self", ".", "_join_level", "(", "target", ",", "level", ",", "how", "=", "'right'", ",", "return_indexers", "=", "True", ")", "else", ":", "if", "self", ".", "equals", "(", "target", ")", ":", "indexer", "=", "None", "else", ":", "if", "self", ".", "is_unique", ":", "indexer", "=", "self", ".", "get_indexer", "(", "target", ",", "method", "=", "method", ",", "limit", "=", "limit", ",", "tolerance", "=", "tolerance", ")", "else", ":", "if", "method", "is", "not", "None", "or", "limit", "is", "not", "None", ":", "raise", "ValueError", "(", "\"cannot reindex a non-unique index \"", "\"with a method or limit\"", ")", "indexer", ",", "missing", "=", "self", ".", "get_indexer_non_unique", "(", "target", ")", "if", "preserve_names", "and", "target", ".", "nlevels", "==", "1", "and", "target", ".", "name", "!=", "self", ".", "name", ":", "target", "=", "target", ".", "copy", "(", ")", "target", ".", "name", "=", "self", ".", "name", "return", "target", ",", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._reindex_non_unique
Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index.
pandas/core/indexes/base.py
def _reindex_non_unique(self, target): """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer)) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # a unique indexer if target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer)) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer))) new_indexer[~check] = -1 new_index = self._shallow_copy_with_infer(new_labels, freq=None) return new_index, indexer, new_indexer
def _reindex_non_unique(self, target): """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer)) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # a unique indexer if target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer)) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer))) new_indexer[~check] = -1 new_index = self._shallow_copy_with_infer(new_labels, freq=None) return new_index, indexer, new_indexer
[ "Create", "a", "new", "index", "with", "target", "s", "values", "(", "move", "/", "add", "/", "delete", "values", "as", "necessary", ")", "use", "with", "non", "-", "unique", "Index", "and", "a", "possibly", "non", "-", "unique", "target", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3144-L3201
[ "def", "_reindex_non_unique", "(", "self", ",", "target", ")", ":", "target", "=", "ensure_index", "(", "target", ")", "indexer", ",", "missing", "=", "self", ".", "get_indexer_non_unique", "(", "target", ")", "check", "=", "indexer", "!=", "-", "1", "new_labels", "=", "self", ".", "take", "(", "indexer", "[", "check", "]", ")", "new_indexer", "=", "None", "if", "len", "(", "missing", ")", ":", "length", "=", "np", ".", "arange", "(", "len", "(", "indexer", ")", ")", "missing", "=", "ensure_platform_int", "(", "missing", ")", "missing_labels", "=", "target", ".", "take", "(", "missing", ")", "missing_indexer", "=", "ensure_int64", "(", "length", "[", "~", "check", "]", ")", "cur_labels", "=", "self", ".", "take", "(", "indexer", "[", "check", "]", ")", ".", "values", "cur_indexer", "=", "ensure_int64", "(", "length", "[", "check", "]", ")", "new_labels", "=", "np", ".", "empty", "(", "tuple", "(", "[", "len", "(", "indexer", ")", "]", ")", ",", "dtype", "=", "object", ")", "new_labels", "[", "cur_indexer", "]", "=", "cur_labels", "new_labels", "[", "missing_indexer", "]", "=", "missing_labels", "# a unique indexer", "if", "target", ".", "is_unique", ":", "# see GH5553, make sure we use the right indexer", "new_indexer", "=", "np", ".", "arange", "(", "len", "(", "indexer", ")", ")", "new_indexer", "[", "cur_indexer", "]", "=", "np", ".", "arange", "(", "len", "(", "cur_labels", ")", ")", "new_indexer", "[", "missing_indexer", "]", "=", "-", "1", "# we have a non_unique selector, need to use the original", "# indexer here", "else", ":", "# need to retake to have the same size as the indexer", "indexer", "[", "~", "check", "]", "=", "-", "1", "# reset the new indexer to account for the new size", "new_indexer", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "take", "(", "indexer", ")", ")", ")", "new_indexer", "[", "~", "check", "]", "=", "-", "1", "new_index", "=", "self", ".", "_shallow_copy_with_infer", "(", "new_labels", ",", "freq", "=", "None", ")", "return", "new_index", ",", "indexer", ",", "new_indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._join_level
The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`.
pandas/core/indexes/base.py
def _join_level(self, other, level, how='left', return_indexers=False, keep_order=True): """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from .multi import MultiIndex def _get_leaf_sorter(labels): """ Returns sorter for the inner most level while preserving the order of higher levels. """ if labels[0].size == 0: return np.empty(0, dtype='int64') if len(labels) == 1: lab = ensure_int64(labels[0]) sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) return sorter # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_int64(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError('Join on level between two MultiIndex objects ' 'is ambiguous') left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left how = {'right': 'left', 'left': 'right'}.get(how, how) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError('Index._join_level on non-unique index ' 'is not implemented') new_level, left_lev_indexer, right_lev_indexer = \ old_level.join(right, how=how, return_indexers=True) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[:level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_int64(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) new_lev_codes = algos.take_nd(rev_indexer, left.codes[level], allow_fill=False) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route ngroups = 1 + new_lev_codes.max() left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0]:] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[:level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex(levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False) if right_lev_indexer is not None: right_indexer = algos.take_nd(right_lev_indexer, join_index.codes[level], allow_fill=False) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer if return_indexers: left_indexer = (None if left_indexer is None else ensure_platform_int(left_indexer)) right_indexer = (None if right_indexer is None else ensure_platform_int(right_indexer)) return join_index, left_indexer, right_indexer else: return join_index
def _join_level(self, other, level, how='left', return_indexers=False, keep_order=True): """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from .multi import MultiIndex def _get_leaf_sorter(labels): """ Returns sorter for the inner most level while preserving the order of higher levels. """ if labels[0].size == 0: return np.empty(0, dtype='int64') if len(labels) == 1: lab = ensure_int64(labels[0]) sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) return sorter # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_int64(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError('Join on level between two MultiIndex objects ' 'is ambiguous') left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left how = {'right': 'left', 'left': 'right'}.get(how, how) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError('Index._join_level on non-unique index ' 'is not implemented') new_level, left_lev_indexer, right_lev_indexer = \ old_level.join(right, how=how, return_indexers=True) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[:level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_int64(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) new_lev_codes = algos.take_nd(rev_indexer, left.codes[level], allow_fill=False) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route ngroups = 1 + new_lev_codes.max() left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0]:] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[:level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex(levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False) if right_lev_indexer is not None: right_indexer = algos.take_nd(right_lev_indexer, join_index.codes[level], allow_fill=False) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer if return_indexers: left_indexer = (None if left_indexer is None else ensure_platform_int(left_indexer)) right_indexer = (None if right_indexer is None else ensure_platform_int(right_indexer)) return join_index, left_indexer, right_indexer else: return join_index
[ "The", "join", "method", "*", "only", "*", "affects", "the", "level", "of", "the", "resulting", "MultiIndex", ".", "Otherwise", "it", "just", "exactly", "aligns", "the", "Index", "data", "to", "the", "labels", "of", "the", "level", "in", "the", "MultiIndex", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3420-L3550
[ "def", "_join_level", "(", "self", ",", "other", ",", "level", ",", "how", "=", "'left'", ",", "return_indexers", "=", "False", ",", "keep_order", "=", "True", ")", ":", "from", ".", "multi", "import", "MultiIndex", "def", "_get_leaf_sorter", "(", "labels", ")", ":", "\"\"\"\n Returns sorter for the inner most level while preserving the\n order of higher levels.\n \"\"\"", "if", "labels", "[", "0", "]", ".", "size", "==", "0", ":", "return", "np", ".", "empty", "(", "0", ",", "dtype", "=", "'int64'", ")", "if", "len", "(", "labels", ")", "==", "1", ":", "lab", "=", "ensure_int64", "(", "labels", "[", "0", "]", ")", "sorter", ",", "_", "=", "libalgos", ".", "groupsort_indexer", "(", "lab", ",", "1", "+", "lab", ".", "max", "(", ")", ")", "return", "sorter", "# find indexers of beginning of each set of", "# same-key labels w.r.t all but last level", "tic", "=", "labels", "[", "0", "]", "[", ":", "-", "1", "]", "!=", "labels", "[", "0", "]", "[", "1", ":", "]", "for", "lab", "in", "labels", "[", "1", ":", "-", "1", "]", ":", "tic", "|=", "lab", "[", ":", "-", "1", "]", "!=", "lab", "[", "1", ":", "]", "starts", "=", "np", ".", "hstack", "(", "(", "[", "True", "]", ",", "tic", ",", "[", "True", "]", ")", ")", ".", "nonzero", "(", ")", "[", "0", "]", "lab", "=", "ensure_int64", "(", "labels", "[", "-", "1", "]", ")", "return", "lib", ".", "get_level_sorter", "(", "lab", ",", "ensure_int64", "(", "starts", ")", ")", "if", "isinstance", "(", "self", ",", "MultiIndex", ")", "and", "isinstance", "(", "other", ",", "MultiIndex", ")", ":", "raise", "TypeError", "(", "'Join on level between two MultiIndex objects '", "'is ambiguous'", ")", "left", ",", "right", "=", "self", ",", "other", "flip_order", "=", "not", "isinstance", "(", "self", ",", "MultiIndex", ")", "if", "flip_order", ":", "left", ",", "right", "=", "right", ",", "left", "how", "=", "{", "'right'", ":", "'left'", ",", "'left'", ":", "'right'", "}", ".", "get", "(", "how", ",", "how", ")", "level", "=", "left", ".", "_get_level_number", "(", "level", ")", "old_level", "=", "left", ".", "levels", "[", "level", "]", "if", "not", "right", ".", "is_unique", ":", "raise", "NotImplementedError", "(", "'Index._join_level on non-unique index '", "'is not implemented'", ")", "new_level", ",", "left_lev_indexer", ",", "right_lev_indexer", "=", "old_level", ".", "join", "(", "right", ",", "how", "=", "how", ",", "return_indexers", "=", "True", ")", "if", "left_lev_indexer", "is", "None", ":", "if", "keep_order", "or", "len", "(", "left", ")", "==", "0", ":", "left_indexer", "=", "None", "join_index", "=", "left", "else", ":", "# sort the leaves", "left_indexer", "=", "_get_leaf_sorter", "(", "left", ".", "codes", "[", ":", "level", "+", "1", "]", ")", "join_index", "=", "left", "[", "left_indexer", "]", "else", ":", "left_lev_indexer", "=", "ensure_int64", "(", "left_lev_indexer", ")", "rev_indexer", "=", "lib", ".", "get_reverse_indexer", "(", "left_lev_indexer", ",", "len", "(", "old_level", ")", ")", "new_lev_codes", "=", "algos", ".", "take_nd", "(", "rev_indexer", ",", "left", ".", "codes", "[", "level", "]", ",", "allow_fill", "=", "False", ")", "new_codes", "=", "list", "(", "left", ".", "codes", ")", "new_codes", "[", "level", "]", "=", "new_lev_codes", "new_levels", "=", "list", "(", "left", ".", "levels", ")", "new_levels", "[", "level", "]", "=", "new_level", "if", "keep_order", ":", "# just drop missing values. o.w. keep order", "left_indexer", "=", "np", ".", "arange", "(", "len", "(", "left", ")", ",", "dtype", "=", "np", ".", "intp", ")", "mask", "=", "new_lev_codes", "!=", "-", "1", "if", "not", "mask", ".", "all", "(", ")", ":", "new_codes", "=", "[", "lab", "[", "mask", "]", "for", "lab", "in", "new_codes", "]", "left_indexer", "=", "left_indexer", "[", "mask", "]", "else", ":", "# tie out the order with other", "if", "level", "==", "0", ":", "# outer most level, take the fast route", "ngroups", "=", "1", "+", "new_lev_codes", ".", "max", "(", ")", "left_indexer", ",", "counts", "=", "libalgos", ".", "groupsort_indexer", "(", "new_lev_codes", ",", "ngroups", ")", "# missing values are placed first; drop them!", "left_indexer", "=", "left_indexer", "[", "counts", "[", "0", "]", ":", "]", "new_codes", "=", "[", "lab", "[", "left_indexer", "]", "for", "lab", "in", "new_codes", "]", "else", ":", "# sort the leaves", "mask", "=", "new_lev_codes", "!=", "-", "1", "mask_all", "=", "mask", ".", "all", "(", ")", "if", "not", "mask_all", ":", "new_codes", "=", "[", "lab", "[", "mask", "]", "for", "lab", "in", "new_codes", "]", "left_indexer", "=", "_get_leaf_sorter", "(", "new_codes", "[", ":", "level", "+", "1", "]", ")", "new_codes", "=", "[", "lab", "[", "left_indexer", "]", "for", "lab", "in", "new_codes", "]", "# left_indexers are w.r.t masked frame.", "# reverse to original frame!", "if", "not", "mask_all", ":", "left_indexer", "=", "mask", ".", "nonzero", "(", ")", "[", "0", "]", "[", "left_indexer", "]", "join_index", "=", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "left", ".", "names", ",", "verify_integrity", "=", "False", ")", "if", "right_lev_indexer", "is", "not", "None", ":", "right_indexer", "=", "algos", ".", "take_nd", "(", "right_lev_indexer", ",", "join_index", ".", "codes", "[", "level", "]", ",", "allow_fill", "=", "False", ")", "else", ":", "right_indexer", "=", "join_index", ".", "codes", "[", "level", "]", "if", "flip_order", ":", "left_indexer", ",", "right_indexer", "=", "right_indexer", ",", "left_indexer", "if", "return_indexers", ":", "left_indexer", "=", "(", "None", "if", "left_indexer", "is", "None", "else", "ensure_platform_int", "(", "left_indexer", ")", ")", "right_indexer", "=", "(", "None", "if", "right_indexer", "is", "None", "else", "ensure_platform_int", "(", "right_indexer", ")", ")", "return", "join_index", ",", "left_indexer", ",", "right_indexer", "else", ":", "return", "join_index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._try_convert_to_int_index
Attempt to convert an array of data into an integer index. Parameters ---------- data : The data to convert. copy : Whether to copy the data or not. name : The name of the index returned. Returns ------- int_index : data converted to either an Int64Index or a UInt64Index Raises ------ ValueError if the conversion was not successful.
pandas/core/indexes/base.py
def _try_convert_to_int_index(cls, data, copy, name, dtype): """ Attempt to convert an array of data into an integer index. Parameters ---------- data : The data to convert. copy : Whether to copy the data or not. name : The name of the index returned. Returns ------- int_index : data converted to either an Int64Index or a UInt64Index Raises ------ ValueError if the conversion was not successful. """ from .numeric import Int64Index, UInt64Index if not is_unsigned_integer_dtype(dtype): # skip int64 conversion attempt if uint-like dtype is passed, as # this could return Int64Index when UInt64Index is what's desrired try: res = data.astype('i8', copy=False) if (res == data).all(): return Int64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass # Conversion to int64 failed (possibly due to overflow) or was skipped, # so let's try now with uint64. try: res = data.astype('u8', copy=False) if (res == data).all(): return UInt64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass raise ValueError
def _try_convert_to_int_index(cls, data, copy, name, dtype): """ Attempt to convert an array of data into an integer index. Parameters ---------- data : The data to convert. copy : Whether to copy the data or not. name : The name of the index returned. Returns ------- int_index : data converted to either an Int64Index or a UInt64Index Raises ------ ValueError if the conversion was not successful. """ from .numeric import Int64Index, UInt64Index if not is_unsigned_integer_dtype(dtype): # skip int64 conversion attempt if uint-like dtype is passed, as # this could return Int64Index when UInt64Index is what's desrired try: res = data.astype('i8', copy=False) if (res == data).all(): return Int64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass # Conversion to int64 failed (possibly due to overflow) or was skipped, # so let's try now with uint64. try: res = data.astype('u8', copy=False) if (res == data).all(): return UInt64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass raise ValueError
[ "Attempt", "to", "convert", "an", "array", "of", "data", "into", "an", "integer", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3746-L3786
[ "def", "_try_convert_to_int_index", "(", "cls", ",", "data", ",", "copy", ",", "name", ",", "dtype", ")", ":", "from", ".", "numeric", "import", "Int64Index", ",", "UInt64Index", "if", "not", "is_unsigned_integer_dtype", "(", "dtype", ")", ":", "# skip int64 conversion attempt if uint-like dtype is passed, as", "# this could return Int64Index when UInt64Index is what's desrired", "try", ":", "res", "=", "data", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", "if", "(", "res", "==", "data", ")", ".", "all", "(", ")", ":", "return", "Int64Index", "(", "res", ",", "copy", "=", "copy", ",", "name", "=", "name", ")", "except", "(", "OverflowError", ",", "TypeError", ",", "ValueError", ")", ":", "pass", "# Conversion to int64 failed (possibly due to overflow) or was skipped,", "# so let's try now with uint64.", "try", ":", "res", "=", "data", ".", "astype", "(", "'u8'", ",", "copy", "=", "False", ")", "if", "(", "res", "==", "data", ")", ".", "all", "(", ")", ":", "return", "UInt64Index", "(", "res", ",", "copy", "=", "copy", ",", "name", "=", "name", ")", "except", "(", "OverflowError", ",", "TypeError", ",", "ValueError", ")", ":", "pass", "raise", "ValueError" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._coerce_to_ndarray
Coerces data to ndarray. Converts other iterables to list first and then to array. Does not touch ndarrays. Raises ------ TypeError When the data passed in is a scalar.
pandas/core/indexes/base.py
def _coerce_to_ndarray(cls, data): """ Coerces data to ndarray. Converts other iterables to list first and then to array. Does not touch ndarrays. Raises ------ TypeError When the data passed in is a scalar. """ if not isinstance(data, (np.ndarray, Index)): if data is None or is_scalar(data): cls._scalar_data_error(data) # other iterable of some kind if not isinstance(data, (ABCSeries, list, tuple)): data = list(data) data = np.asarray(data) return data
def _coerce_to_ndarray(cls, data): """ Coerces data to ndarray. Converts other iterables to list first and then to array. Does not touch ndarrays. Raises ------ TypeError When the data passed in is a scalar. """ if not isinstance(data, (np.ndarray, Index)): if data is None or is_scalar(data): cls._scalar_data_error(data) # other iterable of some kind if not isinstance(data, (ABCSeries, list, tuple)): data = list(data) data = np.asarray(data) return data
[ "Coerces", "data", "to", "ndarray", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3800-L3821
[ "def", "_coerce_to_ndarray", "(", "cls", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "if", "data", "is", "None", "or", "is_scalar", "(", "data", ")", ":", "cls", ".", "_scalar_data_error", "(", "data", ")", "# other iterable of some kind", "if", "not", "isinstance", "(", "data", ",", "(", "ABCSeries", ",", "list", ",", "tuple", ")", ")", ":", "data", "=", "list", "(", "data", ")", "data", "=", "np", ".", "asarray", "(", "data", ")", "return", "data" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._coerce_scalar_to_index
We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce
pandas/core/indexes/base.py
def _coerce_scalar_to_index(self, item): """ We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce """ dtype = self.dtype if self._is_numeric_dtype and isna(item): # We can't coerce to the numeric dtype of "self" (unless # it's float) if there are NaN values in our output. dtype = None return Index([item], dtype=dtype, **self._get_attributes_dict())
def _coerce_scalar_to_index(self, item): """ We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce """ dtype = self.dtype if self._is_numeric_dtype and isna(item): # We can't coerce to the numeric dtype of "self" (unless # it's float) if there are NaN values in our output. dtype = None return Index([item], dtype=dtype, **self._get_attributes_dict())
[ "We", "need", "to", "coerce", "a", "scalar", "to", "a", "compat", "for", "our", "index", "type", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3823-L3838
[ "def", "_coerce_scalar_to_index", "(", "self", ",", "item", ")", ":", "dtype", "=", "self", ".", "dtype", "if", "self", ".", "_is_numeric_dtype", "and", "isna", "(", "item", ")", ":", "# We can't coerce to the numeric dtype of \"self\" (unless", "# it's float) if there are NaN values in our output.", "dtype", "=", "None", "return", "Index", "(", "[", "item", "]", ",", "dtype", "=", "dtype", ",", "*", "*", "self", ".", "_get_attributes_dict", "(", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._assert_can_do_op
Check value is valid for scalar op.
pandas/core/indexes/base.py
def _assert_can_do_op(self, value): """ Check value is valid for scalar op. """ if not is_scalar(value): msg = "'value' must be a scalar, passed: {0}" raise TypeError(msg.format(type(value).__name__))
def _assert_can_do_op(self, value): """ Check value is valid for scalar op. """ if not is_scalar(value): msg = "'value' must be a scalar, passed: {0}" raise TypeError(msg.format(type(value).__name__))
[ "Check", "value", "is", "valid", "for", "scalar", "op", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3852-L3858
[ "def", "_assert_can_do_op", "(", "self", ",", "value", ")", ":", "if", "not", "is_scalar", "(", "value", ")", ":", "msg", "=", "\"'value' must be a scalar, passed: {0}\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "type", "(", "value", ")", ".", "__name__", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._can_hold_identifiers_and_holds_name
Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764
pandas/core/indexes/base.py
def _can_hold_identifiers_and_holds_name(self, name): """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if self.is_object() or self.is_categorical(): return name in self return False
def _can_hold_identifiers_and_holds_name(self, name): """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if self.is_object() or self.is_categorical(): return name in self return False
[ "Faster", "check", "for", "name", "in", "self", "when", "we", "know", "name", "is", "a", "Python", "identifier", "(", "e", ".", "g", ".", "in", "NDFrame", ".", "__getattr__", "which", "hits", "this", "to", "support", ".", "key", "lookup", ")", ".", "For", "indexes", "that", "can", "t", "hold", "identifiers", "(", "everything", "but", "object", "&", "categorical", ")", "we", "just", "return", "False", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3968-L3979
[ "def", "_can_hold_identifiers_and_holds_name", "(", "self", ",", "name", ")", ":", "if", "self", ".", "is_object", "(", ")", "or", "self", ".", "is_categorical", "(", ")", ":", "return", "name", "in", "self", "return", "False" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.append
Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index
pandas/core/indexes/base.py
def append(self, other): """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat = to_concat + list(other) else: to_concat.append(other) for obj in to_concat: if not isinstance(obj, Index): raise TypeError('all inputs must be Index') names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name)
def append(self, other): """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat = to_concat + list(other) else: to_concat.append(other) for obj in to_concat: if not isinstance(obj, Index): raise TypeError('all inputs must be Index') names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name)
[ "Append", "a", "collection", "of", "Index", "options", "together", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3981-L4008
[ "def", "append", "(", "self", ",", "other", ")", ":", "to_concat", "=", "[", "self", "]", "if", "isinstance", "(", "other", ",", "(", "list", ",", "tuple", ")", ")", ":", "to_concat", "=", "to_concat", "+", "list", "(", "other", ")", "else", ":", "to_concat", ".", "append", "(", "other", ")", "for", "obj", "in", "to_concat", ":", "if", "not", "isinstance", "(", "obj", ",", "Index", ")", ":", "raise", "TypeError", "(", "'all inputs must be Index'", ")", "names", "=", "{", "obj", ".", "name", "for", "obj", "in", "to_concat", "}", "name", "=", "None", "if", "len", "(", "names", ")", ">", "1", "else", "self", ".", "name", "return", "self", ".", "_concat", "(", "to_concat", ",", "name", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.putmask
Return a new Index of the values set with the mask. See Also -------- numpy.ndarray.putmask
pandas/core/indexes/base.py
def putmask(self, mask, value): """ Return a new Index of the values set with the mask. See Also -------- numpy.ndarray.putmask """ values = self.values.copy() try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): raise err # coerces to object return self.astype(object).putmask(mask, value)
def putmask(self, mask, value): """ Return a new Index of the values set with the mask. See Also -------- numpy.ndarray.putmask """ values = self.values.copy() try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): raise err # coerces to object return self.astype(object).putmask(mask, value)
[ "Return", "a", "new", "Index", "of", "the", "values", "set", "with", "the", "mask", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4025-L4042
[ "def", "putmask", "(", "self", ",", "mask", ",", "value", ")", ":", "values", "=", "self", ".", "values", ".", "copy", "(", ")", "try", ":", "np", ".", "putmask", "(", "values", ",", "mask", ",", "self", ".", "_convert_for_op", "(", "value", ")", ")", "return", "self", ".", "_shallow_copy", "(", "values", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "err", ":", "if", "is_object_dtype", "(", "self", ")", ":", "raise", "err", "# coerces to object", "return", "self", ".", "astype", "(", "object", ")", ".", "putmask", "(", "mask", ",", "value", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.equals
Determine if two Index objects contain the same elements.
pandas/core/indexes/base.py
def equals(self, other): """ Determine if two Index objects contain the same elements. """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self) and not is_object_dtype(other): # if other is not object, use other's logic for coercion return other.equals(self) try: return array_equivalent(com.values_from_object(self), com.values_from_object(other)) except Exception: return False
def equals(self, other): """ Determine if two Index objects contain the same elements. """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self) and not is_object_dtype(other): # if other is not object, use other's logic for coercion return other.equals(self) try: return array_equivalent(com.values_from_object(self), com.values_from_object(other)) except Exception: return False
[ "Determine", "if", "two", "Index", "objects", "contain", "the", "same", "elements", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4044-L4062
[ "def", "equals", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_", "(", "other", ")", ":", "return", "True", "if", "not", "isinstance", "(", "other", ",", "Index", ")", ":", "return", "False", "if", "is_object_dtype", "(", "self", ")", "and", "not", "is_object_dtype", "(", "other", ")", ":", "# if other is not object, use other's logic for coercion", "return", "other", ".", "equals", "(", "self", ")", "try", ":", "return", "array_equivalent", "(", "com", ".", "values_from_object", "(", "self", ")", ",", "com", ".", "values_from_object", "(", "other", ")", ")", "except", "Exception", ":", "return", "False" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.identical
Similar to equals, but check that other comparable attributes are also equal.
pandas/core/indexes/base.py
def identical(self, other): """ Similar to equals, but check that other comparable attributes are also equal. """ return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables)) and type(self) == type(other))
def identical(self, other): """ Similar to equals, but check that other comparable attributes are also equal. """ return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables)) and type(self) == type(other))
[ "Similar", "to", "equals", "but", "check", "that", "other", "comparable", "attributes", "are", "also", "equal", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4064-L4072
[ "def", "identical", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "equals", "(", "other", ")", "and", "all", "(", "(", "getattr", "(", "self", ",", "c", ",", "None", ")", "==", "getattr", "(", "other", ",", "c", ",", "None", ")", "for", "c", "in", "self", ".", "_comparables", ")", ")", "and", "type", "(", "self", ")", "==", "type", "(", "other", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.asof
Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing
pandas/core/indexes/base.py
def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ try: loc = self.get_loc(label, method='pad') except KeyError: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc]
def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ try: loc = self.get_loc(label, method='pad') except KeyError: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc]
[ "Return", "the", "label", "from", "the", "index", "or", "if", "not", "present", "the", "previous", "one", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4074-L4137
[ "def", "asof", "(", "self", ",", "label", ")", ":", "try", ":", "loc", "=", "self", ".", "get_loc", "(", "label", ",", "method", "=", "'pad'", ")", "except", "KeyError", ":", "return", "self", ".", "_na_value", "else", ":", "if", "isinstance", "(", "loc", ",", "slice", ")", ":", "loc", "=", "loc", ".", "indices", "(", "len", "(", "self", ")", ")", "[", "-", "1", "]", "return", "self", "[", "loc", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.asof_locs
Find the locations (indices) of the labels from the index for every entry in the `where` argument. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label upto the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : array-like Array of booleans denoting where values in the original data are not NA. Returns ------- numpy.ndarray An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`.
pandas/core/indexes/base.py
def asof_locs(self, where, mask): """ Find the locations (indices) of the labels from the index for every entry in the `where` argument. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label upto the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : array-like Array of booleans denoting where values in the original data are not NA. Returns ------- numpy.ndarray An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ locs = self.values[mask].searchsorted(where.values, side='right') locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self))[mask].take(locs) first = mask.argmax() result[(locs == 0) & (where.values < self.values[first])] = -1 return result
def asof_locs(self, where, mask): """ Find the locations (indices) of the labels from the index for every entry in the `where` argument. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label upto the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : array-like Array of booleans denoting where values in the original data are not NA. Returns ------- numpy.ndarray An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ locs = self.values[mask].searchsorted(where.values, side='right') locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self))[mask].take(locs) first = mask.argmax() result[(locs == 0) & (where.values < self.values[first])] = -1 return result
[ "Find", "the", "locations", "(", "indices", ")", "of", "the", "labels", "from", "the", "index", "for", "every", "entry", "in", "the", "where", "argument", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4139-L4176
[ "def", "asof_locs", "(", "self", ",", "where", ",", "mask", ")", ":", "locs", "=", "self", ".", "values", "[", "mask", "]", ".", "searchsorted", "(", "where", ".", "values", ",", "side", "=", "'right'", ")", "locs", "=", "np", ".", "where", "(", "locs", ">", "0", ",", "locs", "-", "1", ",", "0", ")", "result", "=", "np", ".", "arange", "(", "len", "(", "self", ")", ")", "[", "mask", "]", ".", "take", "(", "locs", ")", "first", "=", "mask", ".", "argmax", "(", ")", "result", "[", "(", "locs", "==", "0", ")", "&", "(", "where", ".", "values", "<", "self", ".", "values", "[", "first", "]", ")", "]", "=", "-", "1", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.sort_values
Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
pandas/core/indexes/base.py
def sort_values(self, return_indexer=False, ascending=True): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ _as = self.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
def sort_values(self, return_indexer=False, ascending=True): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ _as = self.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
[ "Return", "a", "sorted", "copy", "of", "the", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4178-L4230
[ "def", "sort_values", "(", "self", ",", "return_indexer", "=", "False", ",", "ascending", "=", "True", ")", ":", "_as", "=", "self", ".", "argsort", "(", ")", "if", "not", "ascending", ":", "_as", "=", "_as", "[", ":", ":", "-", "1", "]", "sorted_index", "=", "self", ".", "take", "(", "_as", ")", "if", "return_indexer", ":", "return", "sorted_index", ",", "_as", "else", ":", "return", "sorted_index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.argsort
Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- numpy.ndarray Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object')
pandas/core/indexes/base.py
def argsort(self, *args, **kwargs): """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- numpy.ndarray Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ result = self.asi8 if result is None: result = np.array(self) return result.argsort(*args, **kwargs)
def argsort(self, *args, **kwargs): """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- numpy.ndarray Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ result = self.asi8 if result is None: result = np.array(self) return result.argsort(*args, **kwargs)
[ "Return", "the", "integer", "indices", "that", "would", "sort", "the", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4295-L4333
[ "def", "argsort", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "asi8", "if", "result", "is", "None", ":", "result", "=", "np", ".", "array", "(", "self", ")", "return", "result", ".", "argsort", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.get_value
Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing.
pandas/core/indexes/base.py
def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing. """ # if we have something that is Index-like, then # use this, e.g. DatetimeIndex # Things like `Series._get_value` (via .at) pass the EA directly here. s = getattr(series, '_values', series) if isinstance(s, (ExtensionArray, Index)) and is_scalar(key): # GH 20882, 21257 # Unify Index and ExtensionArray treatment # First try to convert the key to a location # If that fails, raise a KeyError if an integer # index, otherwise, see if key is an integer, and # try that try: iloc = self.get_loc(key) return s[iloc] except KeyError: if (len(self) > 0 and (self.holds_integer() or self.is_boolean())): raise elif is_integer(key): return s[key] s = com.values_from_object(series) k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: return self._engine.get_value(s, k, tz=getattr(series.dtype, 'tz', None)) except KeyError as e1: if len(self) > 0 and (self.holds_integer() or self.is_boolean()): raise try: return libindex.get_value_box(s, key) except IndexError: raise except TypeError: # generator/iterator-like if is_iterator(key): raise InvalidIndexError(key) else: raise e1 except Exception: # pragma: no cover raise e1 except TypeError: # python 3 if is_scalar(key): # pragma: no cover raise IndexError(key) raise InvalidIndexError(key)
def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing. """ # if we have something that is Index-like, then # use this, e.g. DatetimeIndex # Things like `Series._get_value` (via .at) pass the EA directly here. s = getattr(series, '_values', series) if isinstance(s, (ExtensionArray, Index)) and is_scalar(key): # GH 20882, 21257 # Unify Index and ExtensionArray treatment # First try to convert the key to a location # If that fails, raise a KeyError if an integer # index, otherwise, see if key is an integer, and # try that try: iloc = self.get_loc(key) return s[iloc] except KeyError: if (len(self) > 0 and (self.holds_integer() or self.is_boolean())): raise elif is_integer(key): return s[key] s = com.values_from_object(series) k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: return self._engine.get_value(s, k, tz=getattr(series.dtype, 'tz', None)) except KeyError as e1: if len(self) > 0 and (self.holds_integer() or self.is_boolean()): raise try: return libindex.get_value_box(s, key) except IndexError: raise except TypeError: # generator/iterator-like if is_iterator(key): raise InvalidIndexError(key) else: raise e1 except Exception: # pragma: no cover raise e1 except TypeError: # python 3 if is_scalar(key): # pragma: no cover raise IndexError(key) raise InvalidIndexError(key)
[ "Fast", "lookup", "of", "value", "from", "1", "-", "dimensional", "ndarray", ".", "Only", "use", "this", "if", "you", "know", "what", "you", "re", "doing", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4335-L4389
[ "def", "get_value", "(", "self", ",", "series", ",", "key", ")", ":", "# if we have something that is Index-like, then", "# use this, e.g. DatetimeIndex", "# Things like `Series._get_value` (via .at) pass the EA directly here.", "s", "=", "getattr", "(", "series", ",", "'_values'", ",", "series", ")", "if", "isinstance", "(", "s", ",", "(", "ExtensionArray", ",", "Index", ")", ")", "and", "is_scalar", "(", "key", ")", ":", "# GH 20882, 21257", "# Unify Index and ExtensionArray treatment", "# First try to convert the key to a location", "# If that fails, raise a KeyError if an integer", "# index, otherwise, see if key is an integer, and", "# try that", "try", ":", "iloc", "=", "self", ".", "get_loc", "(", "key", ")", "return", "s", "[", "iloc", "]", "except", "KeyError", ":", "if", "(", "len", "(", "self", ")", ">", "0", "and", "(", "self", ".", "holds_integer", "(", ")", "or", "self", ".", "is_boolean", "(", ")", ")", ")", ":", "raise", "elif", "is_integer", "(", "key", ")", ":", "return", "s", "[", "key", "]", "s", "=", "com", ".", "values_from_object", "(", "series", ")", "k", "=", "com", ".", "values_from_object", "(", "key", ")", "k", "=", "self", ".", "_convert_scalar_indexer", "(", "k", ",", "kind", "=", "'getitem'", ")", "try", ":", "return", "self", ".", "_engine", ".", "get_value", "(", "s", ",", "k", ",", "tz", "=", "getattr", "(", "series", ".", "dtype", ",", "'tz'", ",", "None", ")", ")", "except", "KeyError", "as", "e1", ":", "if", "len", "(", "self", ")", ">", "0", "and", "(", "self", ".", "holds_integer", "(", ")", "or", "self", ".", "is_boolean", "(", ")", ")", ":", "raise", "try", ":", "return", "libindex", ".", "get_value_box", "(", "s", ",", "key", ")", "except", "IndexError", ":", "raise", "except", "TypeError", ":", "# generator/iterator-like", "if", "is_iterator", "(", "key", ")", ":", "raise", "InvalidIndexError", "(", "key", ")", "else", ":", "raise", "e1", "except", "Exception", ":", "# pragma: no cover", "raise", "e1", "except", "TypeError", ":", "# python 3", "if", "is_scalar", "(", "key", ")", ":", "# pragma: no cover", "raise", "IndexError", "(", "key", ")", "raise", "InvalidIndexError", "(", "key", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.set_value
Fast lookup of value from 1-dimensional ndarray. Notes ----- Only use this if you know what you're doing.
pandas/core/indexes/base.py
def set_value(self, arr, key, value): """ Fast lookup of value from 1-dimensional ndarray. Notes ----- Only use this if you know what you're doing. """ self._engine.set_value(com.values_from_object(arr), com.values_from_object(key), value)
def set_value(self, arr, key, value): """ Fast lookup of value from 1-dimensional ndarray. Notes ----- Only use this if you know what you're doing. """ self._engine.set_value(com.values_from_object(arr), com.values_from_object(key), value)
[ "Fast", "lookup", "of", "value", "from", "1", "-", "dimensional", "ndarray", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4391-L4400
[ "def", "set_value", "(", "self", ",", "arr", ",", "key", ",", "value", ")", ":", "self", ".", "_engine", ".", "set_value", "(", "com", ".", "values_from_object", "(", "arr", ")", ",", "com", ".", "values_from_object", "(", "key", ")", ",", "value", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.get_indexer_for
Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate.
pandas/core/indexes/base.py
def get_indexer_for(self, target, **kwargs): """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate. """ if self.is_unique: return self.get_indexer(target, **kwargs) indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer
def get_indexer_for(self, target, **kwargs): """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate. """ if self.is_unique: return self.get_indexer(target, **kwargs) indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer
[ "Guaranteed", "return", "of", "an", "indexer", "even", "when", "non", "-", "unique", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4440-L4450
[ "def", "get_indexer_for", "(", "self", ",", "target", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_unique", ":", "return", "self", ".", "get_indexer", "(", "target", ",", "*", "*", "kwargs", ")", "indexer", ",", "_", "=", "self", ".", "get_indexer_non_unique", "(", "target", ",", "*", "*", "kwargs", ")", "return", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.groupby
Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- groups : dict {group name -> group labels}
pandas/core/indexes/base.py
def groupby(self, values): """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- groups : dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values.values values = ensure_categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return result
def groupby(self, values): """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- groups : dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values.values values = ensure_categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return result
[ "Group", "the", "index", "labels", "by", "a", "given", "array", "of", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4462-L4487
[ "def", "groupby", "(", "self", ",", "values", ")", ":", "# TODO: if we are a MultiIndex, we can do better", "# that converting to tuples", "if", "isinstance", "(", "values", ",", "ABCMultiIndex", ")", ":", "values", "=", "values", ".", "values", "values", "=", "ensure_categorical", "(", "values", ")", "result", "=", "values", ".", "_reverse_indexer", "(", ")", "# map to the label", "result", "=", "{", "k", ":", "self", ".", "take", "(", "v", ")", "for", "k", ",", "v", "in", "result", ".", "items", "(", ")", "}", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.map
Map values using input correspondence (a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- applied : Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned.
pandas/core/indexes/base.py
def map(self, mapper, na_action=None): """ Map values using input correspondence (a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- applied : Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from .multi import MultiIndex new_values = super()._map_values(mapper, na_action=na_action) attributes = self._get_attributes_dict() # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif attributes.get('name'): names = [attributes.get('name')] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) attributes['copy'] = False if not new_values.size: # empty attributes['dtype'] = self.dtype return Index(new_values, **attributes)
def map(self, mapper, na_action=None): """ Map values using input correspondence (a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- applied : Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from .multi import MultiIndex new_values = super()._map_values(mapper, na_action=na_action) attributes = self._get_attributes_dict() # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif attributes.get('name'): names = [attributes.get('name')] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) attributes['copy'] = False if not new_values.size: # empty attributes['dtype'] = self.dtype return Index(new_values, **attributes)
[ "Map", "values", "using", "input", "correspondence", "(", "a", "dict", "Series", "or", "function", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4489-L4530
[ "def", "map", "(", "self", ",", "mapper", ",", "na_action", "=", "None", ")", ":", "from", ".", "multi", "import", "MultiIndex", "new_values", "=", "super", "(", ")", ".", "_map_values", "(", "mapper", ",", "na_action", "=", "na_action", ")", "attributes", "=", "self", ".", "_get_attributes_dict", "(", ")", "# we can return a MultiIndex", "if", "new_values", ".", "size", "and", "isinstance", "(", "new_values", "[", "0", "]", ",", "tuple", ")", ":", "if", "isinstance", "(", "self", ",", "MultiIndex", ")", ":", "names", "=", "self", ".", "names", "elif", "attributes", ".", "get", "(", "'name'", ")", ":", "names", "=", "[", "attributes", ".", "get", "(", "'name'", ")", "]", "*", "len", "(", "new_values", "[", "0", "]", ")", "else", ":", "names", "=", "None", "return", "MultiIndex", ".", "from_tuples", "(", "new_values", ",", "names", "=", "names", ")", "attributes", "[", "'copy'", "]", "=", "False", "if", "not", "new_values", ".", "size", ":", "# empty", "attributes", "[", "'dtype'", "]", "=", "self", ".", "dtype", "return", "Index", "(", "new_values", ",", "*", "*", "attributes", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.isin
Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. .. versionadded:: 0.18.1 Support for values as a set. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- is_contained : ndarray NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Int64Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']], codes=[[0, 1, 2], [2, 0, 1]], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False])
pandas/core/indexes/base.py
def isin(self, values, level=None): """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. .. versionadded:: 0.18.1 Support for values as a set. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- is_contained : ndarray NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Int64Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']], codes=[[0, 1, 2], [2, 0, 1]], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self, values)
def isin(self, values, level=None): """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. .. versionadded:: 0.18.1 Support for values as a set. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- is_contained : ndarray NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Int64Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']], codes=[[0, 1, 2], [2, 0, 1]], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self, values)
[ "Return", "a", "boolean", "array", "where", "the", "index", "values", "are", "in", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4532-L4618
[ "def", "isin", "(", "self", ",", "values", ",", "level", "=", "None", ")", ":", "if", "level", "is", "not", "None", ":", "self", ".", "_validate_index_level", "(", "level", ")", "return", "algos", ".", "isin", "(", "self", ",", "values", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037