id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
27,400
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.compute_index
def compute_index(self, axis, data_object, compute_diff=True): """Computes the index after a number of rows have been removed. Note: In order for this to be used properly, the indexes must not be changed before you compute this. Args: axis: The axis to extract the index from. data_object: The new data object to extract the index from. compute_diff: True to use `self` to compute the index from self rather than data_object. This is used when the dimension of the index may have changed, but the deleted rows/columns are unknown. Returns: A new pandas.Index object. """ def pandas_index_extraction(df, axis): if not axis: return df.index else: try: return df.columns except AttributeError: return pandas.Index([]) index_obj = self.index if not axis else self.columns old_blocks = self.data if compute_diff else None new_indices = data_object.get_indices( axis=axis, index_func=lambda df: pandas_index_extraction(df, axis), old_blocks=old_blocks, ) return index_obj[new_indices] if compute_diff else new_indices
python
def compute_index(self, axis, data_object, compute_diff=True): """Computes the index after a number of rows have been removed. Note: In order for this to be used properly, the indexes must not be changed before you compute this. Args: axis: The axis to extract the index from. data_object: The new data object to extract the index from. compute_diff: True to use `self` to compute the index from self rather than data_object. This is used when the dimension of the index may have changed, but the deleted rows/columns are unknown. Returns: A new pandas.Index object. """ def pandas_index_extraction(df, axis): if not axis: return df.index else: try: return df.columns except AttributeError: return pandas.Index([]) index_obj = self.index if not axis else self.columns old_blocks = self.data if compute_diff else None new_indices = data_object.get_indices( axis=axis, index_func=lambda df: pandas_index_extraction(df, axis), old_blocks=old_blocks, ) return index_obj[new_indices] if compute_diff else new_indices
[ "def", "compute_index", "(", "self", ",", "axis", ",", "data_object", ",", "compute_diff", "=", "True", ")", ":", "def", "pandas_index_extraction", "(", "df", ",", "axis", ")", ":", "if", "not", "axis", ":", "return", "df", ".", "index", "else", ":", "try", ":", "return", "df", ".", "columns", "except", "AttributeError", ":", "return", "pandas", ".", "Index", "(", "[", "]", ")", "index_obj", "=", "self", ".", "index", "if", "not", "axis", "else", "self", ".", "columns", "old_blocks", "=", "self", ".", "data", "if", "compute_diff", "else", "None", "new_indices", "=", "data_object", ".", "get_indices", "(", "axis", "=", "axis", ",", "index_func", "=", "lambda", "df", ":", "pandas_index_extraction", "(", "df", ",", "axis", ")", ",", "old_blocks", "=", "old_blocks", ",", ")", "return", "index_obj", "[", "new_indices", "]", "if", "compute_diff", "else", "new_indices" ]
Computes the index after a number of rows have been removed. Note: In order for this to be used properly, the indexes must not be changed before you compute this. Args: axis: The axis to extract the index from. data_object: The new data object to extract the index from. compute_diff: True to use `self` to compute the index from self rather than data_object. This is used when the dimension of the index may have changed, but the deleted rows/columns are unknown. Returns: A new pandas.Index object.
[ "Computes", "the", "index", "after", "a", "number", "of", "rows", "have", "been", "removed", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L64-L98
27,401
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.numeric_columns
def numeric_columns(self, include_bool=True): """Returns the numeric columns of the Manager. Returns: List of index names. """ columns = [] for col, dtype in zip(self.columns, self.dtypes): if is_numeric_dtype(dtype) and ( include_bool or (not include_bool and dtype != np.bool_) ): columns.append(col) return columns
python
def numeric_columns(self, include_bool=True): """Returns the numeric columns of the Manager. Returns: List of index names. """ columns = [] for col, dtype in zip(self.columns, self.dtypes): if is_numeric_dtype(dtype) and ( include_bool or (not include_bool and dtype != np.bool_) ): columns.append(col) return columns
[ "def", "numeric_columns", "(", "self", ",", "include_bool", "=", "True", ")", ":", "columns", "=", "[", "]", "for", "col", ",", "dtype", "in", "zip", "(", "self", ".", "columns", ",", "self", ".", "dtypes", ")", ":", "if", "is_numeric_dtype", "(", "dtype", ")", "and", "(", "include_bool", "or", "(", "not", "include_bool", "and", "dtype", "!=", "np", ".", "bool_", ")", ")", ":", "columns", ".", "append", "(", "col", ")", "return", "columns" ]
Returns the numeric columns of the Manager. Returns: List of index names.
[ "Returns", "the", "numeric", "columns", "of", "the", "Manager", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L167-L179
27,402
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.numeric_function_clean_dataframe
def numeric_function_clean_dataframe(self, axis): """Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager. """ result = None query_compiler = self # If no numeric columns and over columns, then return empty Series if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): # If over rows and no numeric columns, return this if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
python
def numeric_function_clean_dataframe(self, axis): """Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager. """ result = None query_compiler = self # If no numeric columns and over columns, then return empty Series if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): # If over rows and no numeric columns, return this if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
[ "def", "numeric_function_clean_dataframe", "(", "self", ",", "axis", ")", ":", "result", "=", "None", "query_compiler", "=", "self", "# If no numeric columns and over columns, then return empty Series", "if", "not", "axis", "and", "len", "(", "self", ".", "index", ")", "==", "0", ":", "result", "=", "pandas", ".", "Series", "(", "dtype", "=", "np", ".", "int64", ")", "nonnumeric", "=", "[", "col", "for", "col", ",", "dtype", "in", "zip", "(", "self", ".", "columns", ",", "self", ".", "dtypes", ")", "if", "not", "is_numeric_dtype", "(", "dtype", ")", "]", "if", "len", "(", "nonnumeric", ")", "==", "len", "(", "self", ".", "columns", ")", ":", "# If over rows and no numeric columns, return this", "if", "axis", ":", "result", "=", "pandas", ".", "Series", "(", "[", "np", ".", "nan", "for", "_", "in", "self", ".", "index", "]", ")", "else", ":", "result", "=", "pandas", ".", "Series", "(", "[", "0", "for", "_", "in", "self", ".", "index", "]", ")", "else", ":", "query_compiler", "=", "self", ".", "drop", "(", "columns", "=", "nonnumeric", ")", "return", "result", ",", "query_compiler" ]
Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager.
[ "Preprocesses", "numeric", "functions", "to", "clean", "dataframe", "and", "pick", "numeric", "indices", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L181-L209
27,403
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.join
def join(self, other, **kwargs): """Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects. """ if not isinstance(other, list): other = [other] return self._join_list_of_managers(other, **kwargs)
python
def join(self, other, **kwargs): """Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects. """ if not isinstance(other, list): other = [other] return self._join_list_of_managers(other, **kwargs)
[ "def", "join", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "other", ",", "list", ")", ":", "other", "=", "[", "other", "]", "return", "self", ".", "_join_list_of_managers", "(", "other", ",", "*", "*", "kwargs", ")" ]
Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects.
[ "Joins", "a", "list", "or", "two", "objects", "together", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L291-L302
27,404
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.concat
def concat(self, axis, other, **kwargs): """Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects. """ return self._append_list_of_managers(other, axis, **kwargs)
python
def concat(self, axis, other, **kwargs): """Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects. """ return self._append_list_of_managers(other, axis, **kwargs)
[ "def", "concat", "(", "self", ",", "axis", ",", "other", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_append_list_of_managers", "(", "other", ",", "axis", ",", "*", "*", "kwargs", ")" ]
Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects.
[ "Concatenates", "two", "objects", "together", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L304-L314
27,405
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.copartition
def copartition(self, axis, other, how_to_join, sort, force_repartition=False): """Copartition two QueryCompiler objects. Args: axis: The axis to copartition along. other: The other Query Compiler(s) to copartition against. how_to_join: How to manage joining the index object ("left", "right", etc.) sort: Whether or not to sort the joined index. force_repartition: Whether or not to force the repartitioning. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns: A tuple (left query compiler, right query compiler list, joined index). """ if isinstance(other, type(self)): other = [other] index_obj = ( [o.index for o in other] if axis == 0 else [o.columns for o in other] ) joined_index = self._join_index_objects( axis ^ 1, index_obj, how_to_join, sort=sort ) # We have to set these because otherwise when we perform the functions it may # end up serializing this entire object. left_old_idx = self.index if axis == 0 else self.columns right_old_idxes = index_obj # Start with this and we'll repartition the first time, and then not again. reindexed_self = self.data reindexed_other_list = [] def compute_reindex(old_idx): """Create a function based on the old index and axis. Args: old_idx: The old index/columns Returns: A function that will be run in each partition. """ def reindex_partition(df): if axis == 0: df.index = old_idx new_df = df.reindex(index=joined_index) new_df.index = pandas.RangeIndex(len(new_df.index)) else: df.columns = old_idx new_df = df.reindex(columns=joined_index) new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df return reindex_partition for i in range(len(other)): # If the indices are equal we can skip partitioning so long as we are not # forced to repartition. See note above about `force_repartition`. if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition): reindex_left = None else: reindex_left = self._prepare_method(compute_reindex(left_old_idx)) if right_old_idxes[i].equals(joined_index) and not force_repartition: reindex_right = None else: reindex_right = other[i]._prepare_method( compute_reindex(right_old_idxes[i]) ) reindexed_self, reindexed_other = reindexed_self.copartition_datasets( axis, other[i].data, reindex_left, reindex_right ) reindexed_other_list.append(reindexed_other) return reindexed_self, reindexed_other_list, joined_index
python
def copartition(self, axis, other, how_to_join, sort, force_repartition=False): """Copartition two QueryCompiler objects. Args: axis: The axis to copartition along. other: The other Query Compiler(s) to copartition against. how_to_join: How to manage joining the index object ("left", "right", etc.) sort: Whether or not to sort the joined index. force_repartition: Whether or not to force the repartitioning. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns: A tuple (left query compiler, right query compiler list, joined index). """ if isinstance(other, type(self)): other = [other] index_obj = ( [o.index for o in other] if axis == 0 else [o.columns for o in other] ) joined_index = self._join_index_objects( axis ^ 1, index_obj, how_to_join, sort=sort ) # We have to set these because otherwise when we perform the functions it may # end up serializing this entire object. left_old_idx = self.index if axis == 0 else self.columns right_old_idxes = index_obj # Start with this and we'll repartition the first time, and then not again. reindexed_self = self.data reindexed_other_list = [] def compute_reindex(old_idx): """Create a function based on the old index and axis. Args: old_idx: The old index/columns Returns: A function that will be run in each partition. """ def reindex_partition(df): if axis == 0: df.index = old_idx new_df = df.reindex(index=joined_index) new_df.index = pandas.RangeIndex(len(new_df.index)) else: df.columns = old_idx new_df = df.reindex(columns=joined_index) new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df return reindex_partition for i in range(len(other)): # If the indices are equal we can skip partitioning so long as we are not # forced to repartition. See note above about `force_repartition`. if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition): reindex_left = None else: reindex_left = self._prepare_method(compute_reindex(left_old_idx)) if right_old_idxes[i].equals(joined_index) and not force_repartition: reindex_right = None else: reindex_right = other[i]._prepare_method( compute_reindex(right_old_idxes[i]) ) reindexed_self, reindexed_other = reindexed_self.copartition_datasets( axis, other[i].data, reindex_left, reindex_right ) reindexed_other_list.append(reindexed_other) return reindexed_self, reindexed_other_list, joined_index
[ "def", "copartition", "(", "self", ",", "axis", ",", "other", ",", "how_to_join", ",", "sort", ",", "force_repartition", "=", "False", ")", ":", "if", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ":", "other", "=", "[", "other", "]", "index_obj", "=", "(", "[", "o", ".", "index", "for", "o", "in", "other", "]", "if", "axis", "==", "0", "else", "[", "o", ".", "columns", "for", "o", "in", "other", "]", ")", "joined_index", "=", "self", ".", "_join_index_objects", "(", "axis", "^", "1", ",", "index_obj", ",", "how_to_join", ",", "sort", "=", "sort", ")", "# We have to set these because otherwise when we perform the functions it may", "# end up serializing this entire object.", "left_old_idx", "=", "self", ".", "index", "if", "axis", "==", "0", "else", "self", ".", "columns", "right_old_idxes", "=", "index_obj", "# Start with this and we'll repartition the first time, and then not again.", "reindexed_self", "=", "self", ".", "data", "reindexed_other_list", "=", "[", "]", "def", "compute_reindex", "(", "old_idx", ")", ":", "\"\"\"Create a function based on the old index and axis.\n\n Args:\n old_idx: The old index/columns\n\n Returns:\n A function that will be run in each partition.\n \"\"\"", "def", "reindex_partition", "(", "df", ")", ":", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "old_idx", "new_df", "=", "df", ".", "reindex", "(", "index", "=", "joined_index", ")", "new_df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "new_df", ".", "index", ")", ")", "else", ":", "df", ".", "columns", "=", "old_idx", "new_df", "=", "df", ".", "reindex", "(", "columns", "=", "joined_index", ")", "new_df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "new_df", ".", "columns", ")", ")", "return", "new_df", "return", "reindex_partition", "for", "i", "in", "range", "(", "len", "(", "other", ")", ")", ":", "# If the indices are equal we can skip partitioning so long as we are not", "# forced to repartition. See note above about `force_repartition`.", "if", "i", "!=", "0", "or", "(", "left_old_idx", ".", "equals", "(", "joined_index", ")", "and", "not", "force_repartition", ")", ":", "reindex_left", "=", "None", "else", ":", "reindex_left", "=", "self", ".", "_prepare_method", "(", "compute_reindex", "(", "left_old_idx", ")", ")", "if", "right_old_idxes", "[", "i", "]", ".", "equals", "(", "joined_index", ")", "and", "not", "force_repartition", ":", "reindex_right", "=", "None", "else", ":", "reindex_right", "=", "other", "[", "i", "]", ".", "_prepare_method", "(", "compute_reindex", "(", "right_old_idxes", "[", "i", "]", ")", ")", "reindexed_self", ",", "reindexed_other", "=", "reindexed_self", ".", "copartition_datasets", "(", "axis", ",", "other", "[", "i", "]", ".", "data", ",", "reindex_left", ",", "reindex_right", ")", "reindexed_other_list", ".", "append", "(", "reindexed_other", ")", "return", "reindexed_self", ",", "reindexed_other_list", ",", "joined_index" ]
Copartition two QueryCompiler objects. Args: axis: The axis to copartition along. other: The other Query Compiler(s) to copartition against. how_to_join: How to manage joining the index object ("left", "right", etc.) sort: Whether or not to sort the joined index. force_repartition: Whether or not to force the repartitioning. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns: A tuple (left query compiler, right query compiler list, joined index).
[ "Copartition", "two", "QueryCompiler", "objects", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L380-L456
27,406
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.to_pandas
def to_pandas(self): """Converts Modin DataFrame to Pandas DataFrame. Returns: Pandas DataFrame of the DataManager. """ df = self.data.to_pandas(is_transposed=self._is_transposed) if df.empty: if len(self.columns) != 0: df = pandas.DataFrame(columns=self.columns).astype(self.dtypes) else: df = pandas.DataFrame(columns=self.columns, index=self.index) else: ErrorMessage.catch_bugs_and_request_email( len(df.index) != len(self.index) or len(df.columns) != len(self.columns) ) df.index = self.index df.columns = self.columns return df
python
def to_pandas(self): """Converts Modin DataFrame to Pandas DataFrame. Returns: Pandas DataFrame of the DataManager. """ df = self.data.to_pandas(is_transposed=self._is_transposed) if df.empty: if len(self.columns) != 0: df = pandas.DataFrame(columns=self.columns).astype(self.dtypes) else: df = pandas.DataFrame(columns=self.columns, index=self.index) else: ErrorMessage.catch_bugs_and_request_email( len(df.index) != len(self.index) or len(df.columns) != len(self.columns) ) df.index = self.index df.columns = self.columns return df
[ "def", "to_pandas", "(", "self", ")", ":", "df", "=", "self", ".", "data", ".", "to_pandas", "(", "is_transposed", "=", "self", ".", "_is_transposed", ")", "if", "df", ".", "empty", ":", "if", "len", "(", "self", ".", "columns", ")", "!=", "0", ":", "df", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "self", ".", "columns", ")", ".", "astype", "(", "self", ".", "dtypes", ")", "else", ":", "df", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "self", ".", "columns", ",", "index", "=", "self", ".", "index", ")", "else", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "len", "(", "df", ".", "index", ")", "!=", "len", "(", "self", ".", "index", ")", "or", "len", "(", "df", ".", "columns", ")", "!=", "len", "(", "self", ".", "columns", ")", ")", "df", ".", "index", "=", "self", ".", "index", "df", ".", "columns", "=", "self", ".", "columns", "return", "df" ]
Converts Modin DataFrame to Pandas DataFrame. Returns: Pandas DataFrame of the DataManager.
[ "Converts", "Modin", "DataFrame", "to", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L468-L486
27,407
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.from_pandas
def from_pandas(cls, df, block_partitions_cls): """Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame. Args: cls: DataManger object to convert the DataFrame to. df: Pandas DataFrame object. block_partitions_cls: BlockParitions object to store partitions Returns: Returns DataManager containing data from the Pandas DataFrame. """ new_index = df.index new_columns = df.columns new_dtypes = df.dtypes new_data = block_partitions_cls.from_pandas(df) return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
python
def from_pandas(cls, df, block_partitions_cls): """Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame. Args: cls: DataManger object to convert the DataFrame to. df: Pandas DataFrame object. block_partitions_cls: BlockParitions object to store partitions Returns: Returns DataManager containing data from the Pandas DataFrame. """ new_index = df.index new_columns = df.columns new_dtypes = df.dtypes new_data = block_partitions_cls.from_pandas(df) return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
[ "def", "from_pandas", "(", "cls", ",", "df", ",", "block_partitions_cls", ")", ":", "new_index", "=", "df", ".", "index", "new_columns", "=", "df", ".", "columns", "new_dtypes", "=", "df", ".", "dtypes", "new_data", "=", "block_partitions_cls", ".", "from_pandas", "(", "df", ")", "return", "cls", "(", "new_data", ",", "new_index", ",", "new_columns", ",", "dtypes", "=", "new_dtypes", ")" ]
Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame. Args: cls: DataManger object to convert the DataFrame to. df: Pandas DataFrame object. block_partitions_cls: BlockParitions object to store partitions Returns: Returns DataManager containing data from the Pandas DataFrame.
[ "Improve", "simple", "Pandas", "DataFrame", "to", "an", "advanced", "and", "superior", "Modin", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L489-L504
27,408
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._inter_df_op_handler
def _inter_df_op_handler(self, func, other, **kwargs): """Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index. """ axis = kwargs.get("axis", 0) axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0 if isinstance(other, type(self)): return self._inter_manager_operations( other, "outer", lambda x, y: func(x, y, **kwargs) ) else: return self._scalar_operations( axis, other, lambda df: func(df, other, **kwargs) )
python
def _inter_df_op_handler(self, func, other, **kwargs): """Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index. """ axis = kwargs.get("axis", 0) axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0 if isinstance(other, type(self)): return self._inter_manager_operations( other, "outer", lambda x, y: func(x, y, **kwargs) ) else: return self._scalar_operations( axis, other, lambda df: func(df, other, **kwargs) )
[ "def", "_inter_df_op_handler", "(", "self", ",", "func", ",", "other", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "axis", "=", "pandas", ".", "DataFrame", "(", ")", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ":", "return", "self", ".", "_inter_manager_operations", "(", "other", ",", "\"outer\"", ",", "lambda", "x", ",", "y", ":", "func", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ")", "else", ":", "return", "self", ".", "_scalar_operations", "(", "axis", ",", "other", ",", "lambda", "df", ":", "func", "(", "df", ",", "other", ",", "*", "*", "kwargs", ")", ")" ]
Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index.
[ "Helper", "method", "for", "inter", "-", "manager", "and", "scalar", "operations", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L554-L573
27,409
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.binary_op
def binary_op(self, op, other, **kwargs): """Perform an operation between two objects. Note: The list of operations is as follows: - add - eq - floordiv - ge - gt - le - lt - mod - mul - ne - pow - rfloordiv - rmod - rpow - rsub - rtruediv - sub - truediv - __and__ - __or__ - __xor__ Args: op: The operation. See list of operations above other: The object to operate against. Returns: A new QueryCompiler object. """ func = getattr(pandas.DataFrame, op) return self._inter_df_op_handler(func, other, **kwargs)
python
def binary_op(self, op, other, **kwargs): """Perform an operation between two objects. Note: The list of operations is as follows: - add - eq - floordiv - ge - gt - le - lt - mod - mul - ne - pow - rfloordiv - rmod - rpow - rsub - rtruediv - sub - truediv - __and__ - __or__ - __xor__ Args: op: The operation. See list of operations above other: The object to operate against. Returns: A new QueryCompiler object. """ func = getattr(pandas.DataFrame, op) return self._inter_df_op_handler(func, other, **kwargs)
[ "def", "binary_op", "(", "self", ",", "op", ",", "other", ",", "*", "*", "kwargs", ")", ":", "func", "=", "getattr", "(", "pandas", ".", "DataFrame", ",", "op", ")", "return", "self", ".", "_inter_df_op_handler", "(", "func", ",", "other", ",", "*", "*", "kwargs", ")" ]
Perform an operation between two objects. Note: The list of operations is as follows: - add - eq - floordiv - ge - gt - le - lt - mod - mul - ne - pow - rfloordiv - rmod - rpow - rsub - rtruediv - sub - truediv - __and__ - __or__ - __xor__ Args: op: The operation. See list of operations above other: The object to operate against. Returns: A new QueryCompiler object.
[ "Perform", "an", "operation", "between", "two", "objects", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L575-L608
27,410
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.update
def update(self, other, **kwargs): """Uses other manager to update corresponding values in this manager. Args: other: The other manager. Returns: New DataManager with updated data and index. """ assert isinstance( other, type(self) ), "Must have the same DataManager subclass to perform this operation" def update_builder(df, other, **kwargs): # This is because of a requirement in Arrow df = df.copy() df.update(other, **kwargs) return df return self._inter_df_op_handler(update_builder, other, **kwargs)
python
def update(self, other, **kwargs): """Uses other manager to update corresponding values in this manager. Args: other: The other manager. Returns: New DataManager with updated data and index. """ assert isinstance( other, type(self) ), "Must have the same DataManager subclass to perform this operation" def update_builder(df, other, **kwargs): # This is because of a requirement in Arrow df = df.copy() df.update(other, **kwargs) return df return self._inter_df_op_handler(update_builder, other, **kwargs)
[ "def", "update", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ",", "\"Must have the same DataManager subclass to perform this operation\"", "def", "update_builder", "(", "df", ",", "other", ",", "*", "*", "kwargs", ")", ":", "# This is because of a requirement in Arrow", "df", "=", "df", ".", "copy", "(", ")", "df", ".", "update", "(", "other", ",", "*", "*", "kwargs", ")", "return", "df", "return", "self", ".", "_inter_df_op_handler", "(", "update_builder", ",", "other", ",", "*", "*", "kwargs", ")" ]
Uses other manager to update corresponding values in this manager. Args: other: The other manager. Returns: New DataManager with updated data and index.
[ "Uses", "other", "manager", "to", "update", "corresponding", "values", "in", "this", "manager", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L620-L639
27,411
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.where
def where(self, cond, other, **kwargs): """Gets values from this manager where cond is true else from other. Args: cond: Condition on which to evaluate values. Returns: New DataManager with updated data and index. """ assert isinstance( cond, type(self) ), "Must have the same DataManager subclass to perform this operation" if isinstance(other, type(self)): # Note: Currently we are doing this with two maps across the entire # data. This can be done with a single map, but it will take a # modification in the `BlockPartition` class. # If this were in one pass it would be ~2x faster. # TODO (devin-petersohn) rewrite this to take one pass. def where_builder_first_pass(cond, other, **kwargs): return cond.where(cond, other, **kwargs) def where_builder_second_pass(df, new_other, **kwargs): return df.where(new_other.eq(True), new_other, **kwargs) first_pass = cond._inter_manager_operations( other, "left", where_builder_first_pass ) final_pass = self._inter_manager_operations( first_pass, "left", where_builder_second_pass ) return self.__constructor__(final_pass.data, self.index, self.columns) else: axis = kwargs.get("axis", 0) # Rather than serializing and passing in the index/columns, we will # just change this index to match the internal index. if isinstance(other, pandas.Series): other.index = pandas.RangeIndex(len(other.index)) def where_builder_series(df, cond): if axis == 0: df.index = pandas.RangeIndex(len(df.index)) cond.index = pandas.RangeIndex(len(cond.index)) else: df.columns = pandas.RangeIndex(len(df.columns)) cond.columns = pandas.RangeIndex(len(cond.columns)) return df.where(cond, other, **kwargs) reindexed_self, reindexed_cond, a = self.copartition( axis, cond, "left", False ) # Unwrap from list given by `copartition` reindexed_cond = reindexed_cond[0] new_data = reindexed_self.inter_data_operation( axis, lambda l, r: where_builder_series(l, r), reindexed_cond ) return self.__constructor__(new_data, self.index, self.columns)
python
def where(self, cond, other, **kwargs): """Gets values from this manager where cond is true else from other. Args: cond: Condition on which to evaluate values. Returns: New DataManager with updated data and index. """ assert isinstance( cond, type(self) ), "Must have the same DataManager subclass to perform this operation" if isinstance(other, type(self)): # Note: Currently we are doing this with two maps across the entire # data. This can be done with a single map, but it will take a # modification in the `BlockPartition` class. # If this were in one pass it would be ~2x faster. # TODO (devin-petersohn) rewrite this to take one pass. def where_builder_first_pass(cond, other, **kwargs): return cond.where(cond, other, **kwargs) def where_builder_second_pass(df, new_other, **kwargs): return df.where(new_other.eq(True), new_other, **kwargs) first_pass = cond._inter_manager_operations( other, "left", where_builder_first_pass ) final_pass = self._inter_manager_operations( first_pass, "left", where_builder_second_pass ) return self.__constructor__(final_pass.data, self.index, self.columns) else: axis = kwargs.get("axis", 0) # Rather than serializing and passing in the index/columns, we will # just change this index to match the internal index. if isinstance(other, pandas.Series): other.index = pandas.RangeIndex(len(other.index)) def where_builder_series(df, cond): if axis == 0: df.index = pandas.RangeIndex(len(df.index)) cond.index = pandas.RangeIndex(len(cond.index)) else: df.columns = pandas.RangeIndex(len(df.columns)) cond.columns = pandas.RangeIndex(len(cond.columns)) return df.where(cond, other, **kwargs) reindexed_self, reindexed_cond, a = self.copartition( axis, cond, "left", False ) # Unwrap from list given by `copartition` reindexed_cond = reindexed_cond[0] new_data = reindexed_self.inter_data_operation( axis, lambda l, r: where_builder_series(l, r), reindexed_cond ) return self.__constructor__(new_data, self.index, self.columns)
[ "def", "where", "(", "self", ",", "cond", ",", "other", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "cond", ",", "type", "(", "self", ")", ")", ",", "\"Must have the same DataManager subclass to perform this operation\"", "if", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ":", "# Note: Currently we are doing this with two maps across the entire", "# data. This can be done with a single map, but it will take a", "# modification in the `BlockPartition` class.", "# If this were in one pass it would be ~2x faster.", "# TODO (devin-petersohn) rewrite this to take one pass.", "def", "where_builder_first_pass", "(", "cond", ",", "other", ",", "*", "*", "kwargs", ")", ":", "return", "cond", ".", "where", "(", "cond", ",", "other", ",", "*", "*", "kwargs", ")", "def", "where_builder_second_pass", "(", "df", ",", "new_other", ",", "*", "*", "kwargs", ")", ":", "return", "df", ".", "where", "(", "new_other", ".", "eq", "(", "True", ")", ",", "new_other", ",", "*", "*", "kwargs", ")", "first_pass", "=", "cond", ".", "_inter_manager_operations", "(", "other", ",", "\"left\"", ",", "where_builder_first_pass", ")", "final_pass", "=", "self", ".", "_inter_manager_operations", "(", "first_pass", ",", "\"left\"", ",", "where_builder_second_pass", ")", "return", "self", ".", "__constructor__", "(", "final_pass", ".", "data", ",", "self", ".", "index", ",", "self", ".", "columns", ")", "else", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "# Rather than serializing and passing in the index/columns, we will", "# just change this index to match the internal index.", "if", "isinstance", "(", "other", ",", "pandas", ".", "Series", ")", ":", "other", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "other", ".", "index", ")", ")", "def", "where_builder_series", "(", "df", ",", "cond", ")", ":", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "index", ")", ")", "cond", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "cond", ".", "index", ")", ")", "else", ":", "df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "columns", ")", ")", "cond", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "cond", ".", "columns", ")", ")", "return", "df", ".", "where", "(", "cond", ",", "other", ",", "*", "*", "kwargs", ")", "reindexed_self", ",", "reindexed_cond", ",", "a", "=", "self", ".", "copartition", "(", "axis", ",", "cond", ",", "\"left\"", ",", "False", ")", "# Unwrap from list given by `copartition`", "reindexed_cond", "=", "reindexed_cond", "[", "0", "]", "new_data", "=", "reindexed_self", ".", "inter_data_operation", "(", "axis", ",", "lambda", "l", ",", "r", ":", "where_builder_series", "(", "l", ",", "r", ")", ",", "reindexed_cond", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ")" ]
Gets values from this manager where cond is true else from other. Args: cond: Condition on which to evaluate values. Returns: New DataManager with updated data and index.
[ "Gets", "values", "from", "this", "manager", "where", "cond", "is", "true", "else", "from", "other", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L641-L697
27,412
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._scalar_operations
def _scalar_operations(self, axis, scalar, func): """Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index. """ if isinstance(scalar, (list, np.ndarray, pandas.Series)): new_index = self.index if axis == 0 else self.columns def list_like_op(df): if axis == 0: df.index = new_index else: df.columns = new_index return func(df) new_data = self._map_across_full_axis( axis, self._prepare_method(list_like_op) ) return self.__constructor__(new_data, self.index, self.columns) else: return self._map_partitions(self._prepare_method(func))
python
def _scalar_operations(self, axis, scalar, func): """Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index. """ if isinstance(scalar, (list, np.ndarray, pandas.Series)): new_index = self.index if axis == 0 else self.columns def list_like_op(df): if axis == 0: df.index = new_index else: df.columns = new_index return func(df) new_data = self._map_across_full_axis( axis, self._prepare_method(list_like_op) ) return self.__constructor__(new_data, self.index, self.columns) else: return self._map_partitions(self._prepare_method(func))
[ "def", "_scalar_operations", "(", "self", ",", "axis", ",", "scalar", ",", "func", ")", ":", "if", "isinstance", "(", "scalar", ",", "(", "list", ",", "np", ".", "ndarray", ",", "pandas", ".", "Series", ")", ")", ":", "new_index", "=", "self", ".", "index", "if", "axis", "==", "0", "else", "self", ".", "columns", "def", "list_like_op", "(", "df", ")", ":", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "new_index", "else", ":", "df", ".", "columns", "=", "new_index", "return", "func", "(", "df", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "self", ".", "_prepare_method", "(", "list_like_op", ")", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ")", "else", ":", "return", "self", ".", "_map_partitions", "(", "self", ".", "_prepare_method", "(", "func", ")", ")" ]
Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index.
[ "Handler", "for", "mapping", "scalar", "operations", "across", "a", "Manager", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L702-L728
27,413
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.reindex
def reindex(self, axis, labels, **kwargs): """Fits a new index for this Manger. Args: axis: The axis index object to target the reindex on. labels: New labels to conform 'axis' on to. Returns: A new QueryCompiler with updated data and new index. """ # To reindex, we need a function that will be shipped to each of the # partitions. def reindex_builer(df, axis, old_labels, new_labels, **kwargs): if axis: while len(df.columns) < len(old_labels): df[len(df.columns)] = np.nan df.columns = old_labels new_df = df.reindex(columns=new_labels, **kwargs) # reset the internal columns back to a RangeIndex new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df else: while len(df.index) < len(old_labels): df.loc[len(df.index)] = np.nan df.index = old_labels new_df = df.reindex(index=new_labels, **kwargs) # reset the internal index back to a RangeIndex new_df.reset_index(inplace=True, drop=True) return new_df old_labels = self.columns if axis else self.index new_index = self.index if axis else labels new_columns = labels if axis else self.columns func = self._prepare_method( lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs) ) # The reindex can just be mapped over the axis we are modifying. This # is for simplicity in implementation. We specify num_splits here # because if we are repartitioning we should (in the future). # Additionally this operation is often followed by an operation that # assumes identical partitioning. Internally, we *may* change the # partitioning during a map across a full axis. new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, new_index, new_columns)
python
def reindex(self, axis, labels, **kwargs): """Fits a new index for this Manger. Args: axis: The axis index object to target the reindex on. labels: New labels to conform 'axis' on to. Returns: A new QueryCompiler with updated data and new index. """ # To reindex, we need a function that will be shipped to each of the # partitions. def reindex_builer(df, axis, old_labels, new_labels, **kwargs): if axis: while len(df.columns) < len(old_labels): df[len(df.columns)] = np.nan df.columns = old_labels new_df = df.reindex(columns=new_labels, **kwargs) # reset the internal columns back to a RangeIndex new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df else: while len(df.index) < len(old_labels): df.loc[len(df.index)] = np.nan df.index = old_labels new_df = df.reindex(index=new_labels, **kwargs) # reset the internal index back to a RangeIndex new_df.reset_index(inplace=True, drop=True) return new_df old_labels = self.columns if axis else self.index new_index = self.index if axis else labels new_columns = labels if axis else self.columns func = self._prepare_method( lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs) ) # The reindex can just be mapped over the axis we are modifying. This # is for simplicity in implementation. We specify num_splits here # because if we are repartitioning we should (in the future). # Additionally this operation is often followed by an operation that # assumes identical partitioning. Internally, we *may* change the # partitioning during a map across a full axis. new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, new_index, new_columns)
[ "def", "reindex", "(", "self", ",", "axis", ",", "labels", ",", "*", "*", "kwargs", ")", ":", "# To reindex, we need a function that will be shipped to each of the", "# partitions.", "def", "reindex_builer", "(", "df", ",", "axis", ",", "old_labels", ",", "new_labels", ",", "*", "*", "kwargs", ")", ":", "if", "axis", ":", "while", "len", "(", "df", ".", "columns", ")", "<", "len", "(", "old_labels", ")", ":", "df", "[", "len", "(", "df", ".", "columns", ")", "]", "=", "np", ".", "nan", "df", ".", "columns", "=", "old_labels", "new_df", "=", "df", ".", "reindex", "(", "columns", "=", "new_labels", ",", "*", "*", "kwargs", ")", "# reset the internal columns back to a RangeIndex", "new_df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "new_df", ".", "columns", ")", ")", "return", "new_df", "else", ":", "while", "len", "(", "df", ".", "index", ")", "<", "len", "(", "old_labels", ")", ":", "df", ".", "loc", "[", "len", "(", "df", ".", "index", ")", "]", "=", "np", ".", "nan", "df", ".", "index", "=", "old_labels", "new_df", "=", "df", ".", "reindex", "(", "index", "=", "new_labels", ",", "*", "*", "kwargs", ")", "# reset the internal index back to a RangeIndex", "new_df", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "return", "new_df", "old_labels", "=", "self", ".", "columns", "if", "axis", "else", "self", ".", "index", "new_index", "=", "self", ".", "index", "if", "axis", "else", "labels", "new_columns", "=", "labels", "if", "axis", "else", "self", ".", "columns", "func", "=", "self", ".", "_prepare_method", "(", "lambda", "df", ":", "reindex_builer", "(", "df", ",", "axis", ",", "old_labels", ",", "labels", ",", "*", "*", "kwargs", ")", ")", "# The reindex can just be mapped over the axis we are modifying. This", "# is for simplicity in implementation. We specify num_splits here", "# because if we are repartitioning we should (in the future).", "# Additionally this operation is often followed by an operation that", "# assumes identical partitioning. Internally, we *may* change the", "# partitioning during a map across a full axis.", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ")" ]
Fits a new index for this Manger. Args: axis: The axis index object to target the reindex on. labels: New labels to conform 'axis' on to. Returns: A new QueryCompiler with updated data and new index.
[ "Fits", "a", "new", "index", "for", "this", "Manger", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L733-L777
27,414
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.reset_index
def reset_index(self, **kwargs): """Removes all levels from index and sets a default level_0 index. Returns: A new QueryCompiler with updated data and reset index. """ drop = kwargs.get("drop", False) new_index = pandas.RangeIndex(len(self.index)) if not drop: if isinstance(self.index, pandas.MultiIndex): # TODO (devin-petersohn) ensure partitioning is properly aligned new_column_names = pandas.Index(self.index.names) new_columns = new_column_names.append(self.columns) index_data = pandas.DataFrame(list(zip(*self.index))).T result = self.data.from_pandas(index_data).concat(1, self.data) return self.__constructor__(result, new_index, new_columns) else: new_column_name = ( self.index.name if self.index.name is not None else "index" if "index" not in self.columns else "level_0" ) new_columns = self.columns.insert(0, new_column_name) result = self.insert(0, new_column_name, self.index) return self.__constructor__(result.data, new_index, new_columns) else: # The copies here are to ensure that we do not give references to # this object for the purposes of updates. return self.__constructor__( self.data.copy(), new_index, self.columns.copy(), self._dtype_cache )
python
def reset_index(self, **kwargs): """Removes all levels from index and sets a default level_0 index. Returns: A new QueryCompiler with updated data and reset index. """ drop = kwargs.get("drop", False) new_index = pandas.RangeIndex(len(self.index)) if not drop: if isinstance(self.index, pandas.MultiIndex): # TODO (devin-petersohn) ensure partitioning is properly aligned new_column_names = pandas.Index(self.index.names) new_columns = new_column_names.append(self.columns) index_data = pandas.DataFrame(list(zip(*self.index))).T result = self.data.from_pandas(index_data).concat(1, self.data) return self.__constructor__(result, new_index, new_columns) else: new_column_name = ( self.index.name if self.index.name is not None else "index" if "index" not in self.columns else "level_0" ) new_columns = self.columns.insert(0, new_column_name) result = self.insert(0, new_column_name, self.index) return self.__constructor__(result.data, new_index, new_columns) else: # The copies here are to ensure that we do not give references to # this object for the purposes of updates. return self.__constructor__( self.data.copy(), new_index, self.columns.copy(), self._dtype_cache )
[ "def", "reset_index", "(", "self", ",", "*", "*", "kwargs", ")", ":", "drop", "=", "kwargs", ".", "get", "(", "\"drop\"", ",", "False", ")", "new_index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "self", ".", "index", ")", ")", "if", "not", "drop", ":", "if", "isinstance", "(", "self", ".", "index", ",", "pandas", ".", "MultiIndex", ")", ":", "# TODO (devin-petersohn) ensure partitioning is properly aligned", "new_column_names", "=", "pandas", ".", "Index", "(", "self", ".", "index", ".", "names", ")", "new_columns", "=", "new_column_names", ".", "append", "(", "self", ".", "columns", ")", "index_data", "=", "pandas", ".", "DataFrame", "(", "list", "(", "zip", "(", "*", "self", ".", "index", ")", ")", ")", ".", "T", "result", "=", "self", ".", "data", ".", "from_pandas", "(", "index_data", ")", ".", "concat", "(", "1", ",", "self", ".", "data", ")", "return", "self", ".", "__constructor__", "(", "result", ",", "new_index", ",", "new_columns", ")", "else", ":", "new_column_name", "=", "(", "self", ".", "index", ".", "name", "if", "self", ".", "index", ".", "name", "is", "not", "None", "else", "\"index\"", "if", "\"index\"", "not", "in", "self", ".", "columns", "else", "\"level_0\"", ")", "new_columns", "=", "self", ".", "columns", ".", "insert", "(", "0", ",", "new_column_name", ")", "result", "=", "self", ".", "insert", "(", "0", ",", "new_column_name", ",", "self", ".", "index", ")", "return", "self", ".", "__constructor__", "(", "result", ".", "data", ",", "new_index", ",", "new_columns", ")", "else", ":", "# The copies here are to ensure that we do not give references to", "# this object for the purposes of updates.", "return", "self", ".", "__constructor__", "(", "self", ".", "data", ".", "copy", "(", ")", ",", "new_index", ",", "self", ".", "columns", ".", "copy", "(", ")", ",", "self", ".", "_dtype_cache", ")" ]
Removes all levels from index and sets a default level_0 index. Returns: A new QueryCompiler with updated data and reset index.
[ "Removes", "all", "levels", "from", "index", "and", "sets", "a", "default", "level_0", "index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L779-L811
27,415
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.transpose
def transpose(self, *args, **kwargs): """Transposes this DataManager. Returns: Transposed new DataManager. """ new_data = self.data.transpose(*args, **kwargs) # Switch the index and columns and transpose the new_manager = self.__constructor__(new_data, self.columns, self.index) # It is possible that this is already transposed new_manager._is_transposed = self._is_transposed ^ 1 return new_manager
python
def transpose(self, *args, **kwargs): """Transposes this DataManager. Returns: Transposed new DataManager. """ new_data = self.data.transpose(*args, **kwargs) # Switch the index and columns and transpose the new_manager = self.__constructor__(new_data, self.columns, self.index) # It is possible that this is already transposed new_manager._is_transposed = self._is_transposed ^ 1 return new_manager
[ "def", "transpose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new_data", "=", "self", ".", "data", ".", "transpose", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Switch the index and columns and transpose the", "new_manager", "=", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "columns", ",", "self", ".", "index", ")", "# It is possible that this is already transposed", "new_manager", ".", "_is_transposed", "=", "self", ".", "_is_transposed", "^", "1", "return", "new_manager" ]
Transposes this DataManager. Returns: Transposed new DataManager.
[ "Transposes", "this", "DataManager", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L828-L839
27,416
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._full_reduce
def _full_reduce(self, axis, map_func, reduce_func=None): """Apply function that will reduce the data to a Pandas Series. Args: axis: 0 for columns and 1 for rows. Default is 0. map_func: Callable function to map the dataframe. reduce_func: Callable function to reduce the dataframe. If none, then apply map_func twice. Return: A new QueryCompiler object containing the results from map_func and reduce_func. """ if reduce_func is None: reduce_func = map_func mapped_parts = self.data.map_across_blocks(map_func) full_frame = mapped_parts.map_across_full_axis(axis, reduce_func) if axis == 0: columns = self.columns return self.__constructor__( full_frame, index=["__reduced__"], columns=columns ) else: index = self.index return self.__constructor__( full_frame, index=index, columns=["__reduced__"] )
python
def _full_reduce(self, axis, map_func, reduce_func=None): """Apply function that will reduce the data to a Pandas Series. Args: axis: 0 for columns and 1 for rows. Default is 0. map_func: Callable function to map the dataframe. reduce_func: Callable function to reduce the dataframe. If none, then apply map_func twice. Return: A new QueryCompiler object containing the results from map_func and reduce_func. """ if reduce_func is None: reduce_func = map_func mapped_parts = self.data.map_across_blocks(map_func) full_frame = mapped_parts.map_across_full_axis(axis, reduce_func) if axis == 0: columns = self.columns return self.__constructor__( full_frame, index=["__reduced__"], columns=columns ) else: index = self.index return self.__constructor__( full_frame, index=index, columns=["__reduced__"] )
[ "def", "_full_reduce", "(", "self", ",", "axis", ",", "map_func", ",", "reduce_func", "=", "None", ")", ":", "if", "reduce_func", "is", "None", ":", "reduce_func", "=", "map_func", "mapped_parts", "=", "self", ".", "data", ".", "map_across_blocks", "(", "map_func", ")", "full_frame", "=", "mapped_parts", ".", "map_across_full_axis", "(", "axis", ",", "reduce_func", ")", "if", "axis", "==", "0", ":", "columns", "=", "self", ".", "columns", "return", "self", ".", "__constructor__", "(", "full_frame", ",", "index", "=", "[", "\"__reduced__\"", "]", ",", "columns", "=", "columns", ")", "else", ":", "index", "=", "self", ".", "index", "return", "self", ".", "__constructor__", "(", "full_frame", ",", "index", "=", "index", ",", "columns", "=", "[", "\"__reduced__\"", "]", ")" ]
Apply function that will reduce the data to a Pandas Series. Args: axis: 0 for columns and 1 for rows. Default is 0. map_func: Callable function to map the dataframe. reduce_func: Callable function to reduce the dataframe. If none, then apply map_func twice. Return: A new QueryCompiler object containing the results from map_func and reduce_func.
[ "Apply", "function", "that", "will", "reduce", "the", "data", "to", "a", "Pandas", "Series", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L847-L874
27,417
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.count
def count(self, **kwargs): """Counts the number of non-NaN objects for each column or row. Return: A new QueryCompiler object containing counts of non-NaN objects from each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().count(**kwargs) axis = kwargs.get("axis", 0) map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs) reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs) return self._full_reduce(axis, map_func, reduce_func)
python
def count(self, **kwargs): """Counts the number of non-NaN objects for each column or row. Return: A new QueryCompiler object containing counts of non-NaN objects from each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().count(**kwargs) axis = kwargs.get("axis", 0) map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs) reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs) return self._full_reduce(axis, map_func, reduce_func)
[ "def", "count", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "count", "(", "*", "*", "kwargs", ")", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "map_func", "=", "self", ".", "_build_mapreduce_func", "(", "pandas", ".", "DataFrame", ".", "count", ",", "*", "*", "kwargs", ")", "reduce_func", "=", "self", ".", "_build_mapreduce_func", "(", "pandas", ".", "DataFrame", ".", "sum", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_reduce", "(", "axis", ",", "map_func", ",", "reduce_func", ")" ]
Counts the number of non-NaN objects for each column or row. Return: A new QueryCompiler object containing counts of non-NaN objects from each column or row.
[ "Counts", "the", "number", "of", "non", "-", "NaN", "objects", "for", "each", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L893-L906
27,418
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.mean
def mean(self, **kwargs): """Returns the mean for each numerical column or row. Return: A new QueryCompiler object containing the mean from each numerical column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().mean(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) sums = self.sum(**kwargs) counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None)) if sums._is_transposed and counts._is_transposed: sums = sums.transpose() counts = counts.transpose() result = sums.binary_op("truediv", counts, axis=axis) return result.transpose() if axis == 0 else result
python
def mean(self, **kwargs): """Returns the mean for each numerical column or row. Return: A new QueryCompiler object containing the mean from each numerical column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().mean(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) sums = self.sum(**kwargs) counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None)) if sums._is_transposed and counts._is_transposed: sums = sums.transpose() counts = counts.transpose() result = sums.binary_op("truediv", counts, axis=axis) return result.transpose() if axis == 0 else result
[ "def", "mean", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "mean", "(", "*", "*", "kwargs", ")", "# Pandas default is 0 (though not mentioned in docs)", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "sums", "=", "self", ".", "sum", "(", "*", "*", "kwargs", ")", "counts", "=", "self", ".", "count", "(", "axis", "=", "axis", ",", "numeric_only", "=", "kwargs", ".", "get", "(", "\"numeric_only\"", ",", "None", ")", ")", "if", "sums", ".", "_is_transposed", "and", "counts", ".", "_is_transposed", ":", "sums", "=", "sums", ".", "transpose", "(", ")", "counts", "=", "counts", ".", "transpose", "(", ")", "result", "=", "sums", ".", "binary_op", "(", "\"truediv\"", ",", "counts", ",", "axis", "=", "axis", ")", "return", "result", ".", "transpose", "(", ")", "if", "axis", "==", "0", "else", "result" ]
Returns the mean for each numerical column or row. Return: A new QueryCompiler object containing the mean from each numerical column or row.
[ "Returns", "the", "mean", "for", "each", "numerical", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L920-L938
27,419
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.min
def min(self, **kwargs): """Returns the minimum from each column or row. Return: A new QueryCompiler object with the minimum value from each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().min(**kwargs) mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs) return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
python
def min(self, **kwargs): """Returns the minimum from each column or row. Return: A new QueryCompiler object with the minimum value from each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().min(**kwargs) mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs) return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
[ "def", "min", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "min", "(", "*", "*", "kwargs", ")", "mapreduce_func", "=", "self", ".", "_build_mapreduce_func", "(", "pandas", ".", "DataFrame", ".", "min", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_reduce", "(", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", ",", "mapreduce_func", ")" ]
Returns the minimum from each column or row. Return: A new QueryCompiler object with the minimum value from each column or row.
[ "Returns", "the", "minimum", "from", "each", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L940-L950
27,420
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._process_sum_prod
def _process_sum_prod(self, func, **kwargs): """Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object. """ axis = kwargs.get("axis", 0) min_count = kwargs.get("min_count", 0) def sum_prod_builder(df, **kwargs): return func(df, **kwargs) if min_count <= 1: return self._full_reduce(axis, sum_prod_builder) else: return self._full_axis_reduce(axis, sum_prod_builder)
python
def _process_sum_prod(self, func, **kwargs): """Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object. """ axis = kwargs.get("axis", 0) min_count = kwargs.get("min_count", 0) def sum_prod_builder(df, **kwargs): return func(df, **kwargs) if min_count <= 1: return self._full_reduce(axis, sum_prod_builder) else: return self._full_axis_reduce(axis, sum_prod_builder)
[ "def", "_process_sum_prod", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "min_count", "=", "kwargs", ".", "get", "(", "\"min_count\"", ",", "0", ")", "def", "sum_prod_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "df", ",", "*", "*", "kwargs", ")", "if", "min_count", "<=", "1", ":", "return", "self", ".", "_full_reduce", "(", "axis", ",", "sum_prod_builder", ")", "else", ":", "return", "self", ".", "_full_axis_reduce", "(", "axis", ",", "sum_prod_builder", ")" ]
Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object.
[ "Calculates", "the", "sum", "or", "product", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L952-L970
27,421
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.prod
def prod(self, **kwargs): """Returns the product of each numerical column or row. Return: A new QueryCompiler object with the product of each numerical column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().prod(**kwargs) return self._process_sum_prod( self._build_mapreduce_func(pandas.DataFrame.prod, **kwargs), **kwargs )
python
def prod(self, **kwargs): """Returns the product of each numerical column or row. Return: A new QueryCompiler object with the product of each numerical column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().prod(**kwargs) return self._process_sum_prod( self._build_mapreduce_func(pandas.DataFrame.prod, **kwargs), **kwargs )
[ "def", "prod", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "prod", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_process_sum_prod", "(", "self", ".", "_build_mapreduce_func", "(", "pandas", ".", "DataFrame", ".", "prod", ",", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Returns the product of each numerical column or row. Return: A new QueryCompiler object with the product of each numerical column or row.
[ "Returns", "the", "product", "of", "each", "numerical", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L972-L983
27,422
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._process_all_any
def _process_all_any(self, func, **kwargs): """Calculates if any or all the values are true. Return: A new QueryCompiler object containing boolean values or boolean. """ axis = kwargs.get("axis", 0) axis = 0 if axis is None else axis kwargs["axis"] = axis builder_func = self._build_mapreduce_func(func, **kwargs) return self._full_reduce(axis, builder_func)
python
def _process_all_any(self, func, **kwargs): """Calculates if any or all the values are true. Return: A new QueryCompiler object containing boolean values or boolean. """ axis = kwargs.get("axis", 0) axis = 0 if axis is None else axis kwargs["axis"] = axis builder_func = self._build_mapreduce_func(func, **kwargs) return self._full_reduce(axis, builder_func)
[ "def", "_process_all_any", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "axis", "=", "0", "if", "axis", "is", "None", "else", "axis", "kwargs", "[", "\"axis\"", "]", "=", "axis", "builder_func", "=", "self", ".", "_build_mapreduce_func", "(", "func", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_reduce", "(", "axis", ",", "builder_func", ")" ]
Calculates if any or all the values are true. Return: A new QueryCompiler object containing boolean values or boolean.
[ "Calculates", "if", "any", "or", "all", "the", "values", "are", "true", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L998-L1008
27,423
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.all
def all(self, **kwargs): """Returns whether all the elements are true, potentially over an axis. Return: A new QueryCompiler object containing boolean values or boolean. """ if self._is_transposed: # Pandas ignores on axis=1 kwargs["bool_only"] = False kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().all(**kwargs) return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
python
def all(self, **kwargs): """Returns whether all the elements are true, potentially over an axis. Return: A new QueryCompiler object containing boolean values or boolean. """ if self._is_transposed: # Pandas ignores on axis=1 kwargs["bool_only"] = False kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().all(**kwargs) return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
[ "def", "all", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "# Pandas ignores on axis=1", "kwargs", "[", "\"bool_only\"", "]", "=", "False", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "all", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_process_all_any", "(", "lambda", "df", ",", "*", "*", "kwargs", ":", "df", ".", "all", "(", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Returns whether all the elements are true, potentially over an axis. Return: A new QueryCompiler object containing boolean values or boolean.
[ "Returns", "whether", "all", "the", "elements", "are", "true", "potentially", "over", "an", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1010-L1021
27,424
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.astype
def astype(self, col_dtypes, **kwargs): """Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes. """ # Group indices to update by dtype for less map operations dtype_indices = {} columns = col_dtypes.keys() numeric_indices = list(self.columns.get_indexer_for(columns)) # Create Series for the updated dtypes new_dtypes = self.dtypes.copy() for i, column in enumerate(columns): dtype = col_dtypes[column] if ( not isinstance(dtype, type(self.dtypes[column])) or dtype != self.dtypes[column] ): # Only add dtype only if different if dtype in dtype_indices.keys(): dtype_indices[dtype].append(numeric_indices[i]) else: dtype_indices[dtype] = [numeric_indices[i]] # Update the new dtype series to the proper pandas dtype try: new_dtype = np.dtype(dtype) except TypeError: new_dtype = dtype if dtype != np.int32 and new_dtype == np.int32: new_dtype = np.dtype("int64") elif dtype != np.float32 and new_dtype == np.float32: new_dtype = np.dtype("float64") new_dtypes[column] = new_dtype # Update partitions for each dtype that is updated new_data = self.data for dtype in dtype_indices.keys(): def astype(df, internal_indices=[]): block_dtypes = {} for ind in internal_indices: block_dtypes[df.columns[ind]] = dtype return df.astype(block_dtypes) new_data = new_data.apply_func_to_select_indices( 0, astype, dtype_indices[dtype], keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
python
def astype(self, col_dtypes, **kwargs): """Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes. """ # Group indices to update by dtype for less map operations dtype_indices = {} columns = col_dtypes.keys() numeric_indices = list(self.columns.get_indexer_for(columns)) # Create Series for the updated dtypes new_dtypes = self.dtypes.copy() for i, column in enumerate(columns): dtype = col_dtypes[column] if ( not isinstance(dtype, type(self.dtypes[column])) or dtype != self.dtypes[column] ): # Only add dtype only if different if dtype in dtype_indices.keys(): dtype_indices[dtype].append(numeric_indices[i]) else: dtype_indices[dtype] = [numeric_indices[i]] # Update the new dtype series to the proper pandas dtype try: new_dtype = np.dtype(dtype) except TypeError: new_dtype = dtype if dtype != np.int32 and new_dtype == np.int32: new_dtype = np.dtype("int64") elif dtype != np.float32 and new_dtype == np.float32: new_dtype = np.dtype("float64") new_dtypes[column] = new_dtype # Update partitions for each dtype that is updated new_data = self.data for dtype in dtype_indices.keys(): def astype(df, internal_indices=[]): block_dtypes = {} for ind in internal_indices: block_dtypes[df.columns[ind]] = dtype return df.astype(block_dtypes) new_data = new_data.apply_func_to_select_indices( 0, astype, dtype_indices[dtype], keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
[ "def", "astype", "(", "self", ",", "col_dtypes", ",", "*", "*", "kwargs", ")", ":", "# Group indices to update by dtype for less map operations", "dtype_indices", "=", "{", "}", "columns", "=", "col_dtypes", ".", "keys", "(", ")", "numeric_indices", "=", "list", "(", "self", ".", "columns", ".", "get_indexer_for", "(", "columns", ")", ")", "# Create Series for the updated dtypes", "new_dtypes", "=", "self", ".", "dtypes", ".", "copy", "(", ")", "for", "i", ",", "column", "in", "enumerate", "(", "columns", ")", ":", "dtype", "=", "col_dtypes", "[", "column", "]", "if", "(", "not", "isinstance", "(", "dtype", ",", "type", "(", "self", ".", "dtypes", "[", "column", "]", ")", ")", "or", "dtype", "!=", "self", ".", "dtypes", "[", "column", "]", ")", ":", "# Only add dtype only if different", "if", "dtype", "in", "dtype_indices", ".", "keys", "(", ")", ":", "dtype_indices", "[", "dtype", "]", ".", "append", "(", "numeric_indices", "[", "i", "]", ")", "else", ":", "dtype_indices", "[", "dtype", "]", "=", "[", "numeric_indices", "[", "i", "]", "]", "# Update the new dtype series to the proper pandas dtype", "try", ":", "new_dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "except", "TypeError", ":", "new_dtype", "=", "dtype", "if", "dtype", "!=", "np", ".", "int32", "and", "new_dtype", "==", "np", ".", "int32", ":", "new_dtype", "=", "np", ".", "dtype", "(", "\"int64\"", ")", "elif", "dtype", "!=", "np", ".", "float32", "and", "new_dtype", "==", "np", ".", "float32", ":", "new_dtype", "=", "np", ".", "dtype", "(", "\"float64\"", ")", "new_dtypes", "[", "column", "]", "=", "new_dtype", "# Update partitions for each dtype that is updated", "new_data", "=", "self", ".", "data", "for", "dtype", "in", "dtype_indices", ".", "keys", "(", ")", ":", "def", "astype", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "block_dtypes", "=", "{", "}", "for", "ind", "in", "internal_indices", ":", "block_dtypes", "[", "df", ".", "columns", "[", "ind", "]", "]", "=", "dtype", "return", "df", ".", "astype", "(", "block_dtypes", ")", "new_data", "=", "new_data", ".", "apply_func_to_select_indices", "(", "0", ",", "astype", ",", "dtype_indices", "[", "dtype", "]", ",", "keep_remaining", "=", "True", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ",", "new_dtypes", ")" ]
Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes.
[ "Converts", "columns", "dtypes", "to", "given", "dtypes", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1090-L1140
27,425
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._full_axis_reduce
def _full_axis_reduce(self, axis, func, alternate_index=None): """Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data. """ result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
python
def _full_axis_reduce(self, axis, func, alternate_index=None): """Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data. """ result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
[ "def", "_full_axis_reduce", "(", "self", ",", "axis", ",", "func", ",", "alternate_index", "=", "None", ")", ":", "result", "=", "self", ".", "data", ".", "map_across_full_axis", "(", "axis", ",", "func", ")", "if", "axis", "==", "0", ":", "columns", "=", "alternate_index", "if", "alternate_index", "is", "not", "None", "else", "self", ".", "columns", "return", "self", ".", "__constructor__", "(", "result", ",", "index", "=", "[", "\"__reduced__\"", "]", ",", "columns", "=", "columns", ")", "else", ":", "index", "=", "alternate_index", "if", "alternate_index", "is", "not", "None", "else", "self", ".", "index", "return", "self", ".", "__constructor__", "(", "result", ",", "index", "=", "index", ",", "columns", "=", "[", "\"__reduced__\"", "]", ")" ]
Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data.
[ "Applies", "map", "that", "reduce", "Manager", "to", "series", "but", "require", "knowledge", "of", "full", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1148-L1166
27,426
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.idxmax
def idxmax(self, **kwargs): """Returns the first occurrence of the maximum over requested axis. Returns: A new QueryCompiler object containing the maximum of each column or axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmax(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmax_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmax(**kwargs) func = self._build_mapreduce_func(idxmax_builder, **kwargs) return self._full_axis_reduce(axis, func)
python
def idxmax(self, **kwargs): """Returns the first occurrence of the maximum over requested axis. Returns: A new QueryCompiler object containing the maximum of each column or axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmax(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmax_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmax(**kwargs) func = self._build_mapreduce_func(idxmax_builder, **kwargs) return self._full_axis_reduce(axis, func)
[ "def", "idxmax", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "idxmax", "(", "*", "*", "kwargs", ")", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "index", "=", "self", ".", "index", "if", "axis", "==", "0", "else", "self", ".", "columns", "def", "idxmax_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "index", "else", ":", "df", ".", "columns", "=", "index", "return", "df", ".", "idxmax", "(", "*", "*", "kwargs", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "idxmax_builder", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_axis_reduce", "(", "axis", ",", "func", ")" ]
Returns the first occurrence of the maximum over requested axis. Returns: A new QueryCompiler object containing the maximum of each column or axis.
[ "Returns", "the", "first", "occurrence", "of", "the", "maximum", "over", "requested", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1187-L1208
27,427
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.idxmin
def idxmin(self, **kwargs): """Returns the first occurrence of the minimum over requested axis. Returns: A new QueryCompiler object containing the minimum of each column or axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmin(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmin_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmin(**kwargs) func = self._build_mapreduce_func(idxmin_builder, **kwargs) return self._full_axis_reduce(axis, func)
python
def idxmin(self, **kwargs): """Returns the first occurrence of the minimum over requested axis. Returns: A new QueryCompiler object containing the minimum of each column or axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmin(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmin_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmin(**kwargs) func = self._build_mapreduce_func(idxmin_builder, **kwargs) return self._full_axis_reduce(axis, func)
[ "def", "idxmin", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "idxmin", "(", "*", "*", "kwargs", ")", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "index", "=", "self", ".", "index", "if", "axis", "==", "0", "else", "self", ".", "columns", "def", "idxmin_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "==", "0", ":", "df", ".", "index", "=", "index", "else", ":", "df", ".", "columns", "=", "index", "return", "df", ".", "idxmin", "(", "*", "*", "kwargs", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "idxmin_builder", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_axis_reduce", "(", "axis", ",", "func", ")" ]
Returns the first occurrence of the minimum over requested axis. Returns: A new QueryCompiler object containing the minimum of each column or axis.
[ "Returns", "the", "first", "occurrence", "of", "the", "minimum", "over", "requested", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1210-L1231
27,428
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.median
def median(self, **kwargs): """Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
python
def median(self, **kwargs): """Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
[ "def", "median", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "median", "(", "*", "*", "kwargs", ")", "# Pandas default is 0 (though not mentioned in docs)", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "pandas", ".", "DataFrame", ".", "median", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_axis_reduce", "(", "axis", ",", "func", ")" ]
Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row.
[ "Returns", "median", "of", "each", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1251-L1263
27,429
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.memory_usage
def memory_usage(self, **kwargs): """Returns the memory usage of each column. Returns: A new QueryCompiler object containing the memory usage of each column. """ def memory_usage_builder(df, **kwargs): return df.memory_usage(**kwargs) func = self._build_mapreduce_func(memory_usage_builder, **kwargs) return self._full_axis_reduce(0, func)
python
def memory_usage(self, **kwargs): """Returns the memory usage of each column. Returns: A new QueryCompiler object containing the memory usage of each column. """ def memory_usage_builder(df, **kwargs): return df.memory_usage(**kwargs) func = self._build_mapreduce_func(memory_usage_builder, **kwargs) return self._full_axis_reduce(0, func)
[ "def", "memory_usage", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "memory_usage_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "return", "df", ".", "memory_usage", "(", "*", "*", "kwargs", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "memory_usage_builder", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_full_axis_reduce", "(", "0", ",", "func", ")" ]
Returns the memory usage of each column. Returns: A new QueryCompiler object containing the memory usage of each column.
[ "Returns", "the", "memory", "usage", "of", "each", "column", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1265-L1276
27,430
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.quantile_for_single_value
def quantile_for_single_value(self, **kwargs): """Returns quantile of each column or row. Returns: A new QueryCompiler object containing the quantile of each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_single_value(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q", 0.5) assert type(q) is float def quantile_builder(df, **kwargs): try: return pandas.DataFrame.quantile(df, **kwargs) except ValueError: return pandas.Series() func = self._build_mapreduce_func(quantile_builder, **kwargs) result = self._full_axis_reduce(axis, func) if axis == 0: result.index = [q] else: result.columns = [q] return result
python
def quantile_for_single_value(self, **kwargs): """Returns quantile of each column or row. Returns: A new QueryCompiler object containing the quantile of each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_single_value(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q", 0.5) assert type(q) is float def quantile_builder(df, **kwargs): try: return pandas.DataFrame.quantile(df, **kwargs) except ValueError: return pandas.Series() func = self._build_mapreduce_func(quantile_builder, **kwargs) result = self._full_axis_reduce(axis, func) if axis == 0: result.index = [q] else: result.columns = [q] return result
[ "def", "quantile_for_single_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "quantile_for_single_value", "(", "*", "*", "kwargs", ")", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "q", "=", "kwargs", ".", "get", "(", "\"q\"", ",", "0.5", ")", "assert", "type", "(", "q", ")", "is", "float", "def", "quantile_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "pandas", ".", "DataFrame", ".", "quantile", "(", "df", ",", "*", "*", "kwargs", ")", "except", "ValueError", ":", "return", "pandas", ".", "Series", "(", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "quantile_builder", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_full_axis_reduce", "(", "axis", ",", "func", ")", "if", "axis", "==", "0", ":", "result", ".", "index", "=", "[", "q", "]", "else", ":", "result", ".", "columns", "=", "[", "q", "]", "return", "result" ]
Returns quantile of each column or row. Returns: A new QueryCompiler object containing the quantile of each column or row.
[ "Returns", "quantile", "of", "each", "column", "or", "row", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1291-L1316
27,431
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._full_axis_reduce_along_select_indices
def _full_axis_reduce_along_select_indices(self, func, axis, index): """Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object. """ # Convert indices to numeric indices old_index = self.index if axis else self.columns numeric_indices = [i for i, name in enumerate(old_index) if name in index] result = self.data.apply_func_to_select_indices_along_full_axis( axis, func, numeric_indices ) return result
python
def _full_axis_reduce_along_select_indices(self, func, axis, index): """Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object. """ # Convert indices to numeric indices old_index = self.index if axis else self.columns numeric_indices = [i for i, name in enumerate(old_index) if name in index] result = self.data.apply_func_to_select_indices_along_full_axis( axis, func, numeric_indices ) return result
[ "def", "_full_axis_reduce_along_select_indices", "(", "self", ",", "func", ",", "axis", ",", "index", ")", ":", "# Convert indices to numeric indices", "old_index", "=", "self", ".", "index", "if", "axis", "else", "self", ".", "columns", "numeric_indices", "=", "[", "i", "for", "i", ",", "name", "in", "enumerate", "(", "old_index", ")", "if", "name", "in", "index", "]", "result", "=", "self", ".", "data", ".", "apply_func_to_select_indices_along_full_axis", "(", "axis", ",", "func", ",", "numeric_indices", ")", "return", "result" ]
Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object.
[ "Reduce", "Manger", "along", "select", "indices", "using", "function", "that", "needs", "full", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1367-L1385
27,432
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.describe
def describe(self, **kwargs): """Generates descriptive statistics. Returns: DataFrame object containing the descriptive statistics of the DataFrame. """ # Use pandas to calculate the correct columns new_columns = ( pandas.DataFrame(columns=self.columns) .astype(self.dtypes) .describe(**kwargs) .columns ) def describe_builder(df, internal_indices=[], **kwargs): return df.iloc[:, internal_indices].describe(**kwargs) # Apply describe and update indices, columns, and dtypes func = self._prepare_method(describe_builder, **kwargs) new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns) new_index = self.compute_index(0, new_data, False) return self.__constructor__(new_data, new_index, new_columns)
python
def describe(self, **kwargs): """Generates descriptive statistics. Returns: DataFrame object containing the descriptive statistics of the DataFrame. """ # Use pandas to calculate the correct columns new_columns = ( pandas.DataFrame(columns=self.columns) .astype(self.dtypes) .describe(**kwargs) .columns ) def describe_builder(df, internal_indices=[], **kwargs): return df.iloc[:, internal_indices].describe(**kwargs) # Apply describe and update indices, columns, and dtypes func = self._prepare_method(describe_builder, **kwargs) new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns) new_index = self.compute_index(0, new_data, False) return self.__constructor__(new_data, new_index, new_columns)
[ "def", "describe", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Use pandas to calculate the correct columns", "new_columns", "=", "(", "pandas", ".", "DataFrame", "(", "columns", "=", "self", ".", "columns", ")", ".", "astype", "(", "self", ".", "dtypes", ")", ".", "describe", "(", "*", "*", "kwargs", ")", ".", "columns", ")", "def", "describe_builder", "(", "df", ",", "internal_indices", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "return", "df", ".", "iloc", "[", ":", ",", "internal_indices", "]", ".", "describe", "(", "*", "*", "kwargs", ")", "# Apply describe and update indices, columns, and dtypes", "func", "=", "self", ".", "_prepare_method", "(", "describe_builder", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_full_axis_reduce_along_select_indices", "(", "func", ",", "0", ",", "new_columns", ")", "new_index", "=", "self", ".", "compute_index", "(", "0", ",", "new_data", ",", "False", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ")" ]
Generates descriptive statistics. Returns: DataFrame object containing the descriptive statistics of the DataFrame.
[ "Generates", "descriptive", "statistics", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1387-L1408
27,433
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.eval
def eval(self, expr, **kwargs): """Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr. """ columns = self.index if self._is_transposed else self.columns index = self.columns if self._is_transposed else self.index # Make a copy of columns and eval on the copy to determine if result type is # series or not columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False, **kwargs) expect_series = isinstance(columns_copy, pandas.Series) def eval_builder(df, **kwargs): # pop the `axis` parameter because it was needed to build the mapreduce # function but it is not a parameter used by `eval`. kwargs.pop("axis", None) df.columns = columns result = df.eval(expr, inplace=False, **kwargs) return result func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs) new_data = self._map_across_full_axis(1, func) if expect_series: new_columns = [columns_copy.name] new_index = index else: new_columns = columns_copy.columns new_index = self.index return self.__constructor__(new_data, new_index, new_columns)
python
def eval(self, expr, **kwargs): """Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr. """ columns = self.index if self._is_transposed else self.columns index = self.columns if self._is_transposed else self.index # Make a copy of columns and eval on the copy to determine if result type is # series or not columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False, **kwargs) expect_series = isinstance(columns_copy, pandas.Series) def eval_builder(df, **kwargs): # pop the `axis` parameter because it was needed to build the mapreduce # function but it is not a parameter used by `eval`. kwargs.pop("axis", None) df.columns = columns result = df.eval(expr, inplace=False, **kwargs) return result func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs) new_data = self._map_across_full_axis(1, func) if expect_series: new_columns = [columns_copy.name] new_index = index else: new_columns = columns_copy.columns new_index = self.index return self.__constructor__(new_data, new_index, new_columns)
[ "def", "eval", "(", "self", ",", "expr", ",", "*", "*", "kwargs", ")", ":", "columns", "=", "self", ".", "index", "if", "self", ".", "_is_transposed", "else", "self", ".", "columns", "index", "=", "self", ".", "columns", "if", "self", ".", "_is_transposed", "else", "self", ".", "index", "# Make a copy of columns and eval on the copy to determine if result type is", "# series or not", "columns_copy", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "self", ".", "columns", ")", "columns_copy", "=", "columns_copy", ".", "eval", "(", "expr", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", "expect_series", "=", "isinstance", "(", "columns_copy", ",", "pandas", ".", "Series", ")", "def", "eval_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "# pop the `axis` parameter because it was needed to build the mapreduce", "# function but it is not a parameter used by `eval`.", "kwargs", ".", "pop", "(", "\"axis\"", ",", "None", ")", "df", ".", "columns", "=", "columns", "result", "=", "df", ".", "eval", "(", "expr", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", "return", "result", "func", "=", "self", ".", "_build_mapreduce_func", "(", "eval_builder", ",", "axis", "=", "1", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "1", ",", "func", ")", "if", "expect_series", ":", "new_columns", "=", "[", "columns_copy", ".", "name", "]", "new_index", "=", "index", "else", ":", "new_columns", "=", "columns_copy", ".", "columns", "new_index", "=", "self", ".", "index", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ")" ]
Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr.
[ "Returns", "a", "new", "QueryCompiler", "with", "expr", "evaluated", "on", "columns", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1527-L1562
27,434
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.mode
def mode(self, **kwargs): """Returns a new QueryCompiler with modes calculated for each label along given axis. Returns: A new QueryCompiler with modes calculated. """ axis = kwargs.get("axis", 0) def mode_builder(df, **kwargs): result = df.mode(**kwargs) # We return a dataframe with the same shape as the input to ensure # that all the partitions will be the same shape if not axis and len(df) != len(result): # Pad columns append_values = pandas.DataFrame( columns=result.columns, index=range(len(result), len(df)) ) result = pandas.concat([result, append_values], ignore_index=True) elif axis and len(df.columns) != len(result.columns): # Pad rows append_vals = pandas.DataFrame( columns=range(len(result.columns), len(df.columns)), index=result.index, ) result = pandas.concat([result, append_vals], axis=1) return pandas.DataFrame(result) func = self._prepare_method(mode_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns)) new_dtypes = self._dtype_cache if new_dtypes is not None: new_dtypes.index = new_columns return self.__constructor__( new_data, new_index, new_columns, new_dtypes ).dropna(axis=axis, how="all")
python
def mode(self, **kwargs): """Returns a new QueryCompiler with modes calculated for each label along given axis. Returns: A new QueryCompiler with modes calculated. """ axis = kwargs.get("axis", 0) def mode_builder(df, **kwargs): result = df.mode(**kwargs) # We return a dataframe with the same shape as the input to ensure # that all the partitions will be the same shape if not axis and len(df) != len(result): # Pad columns append_values = pandas.DataFrame( columns=result.columns, index=range(len(result), len(df)) ) result = pandas.concat([result, append_values], ignore_index=True) elif axis and len(df.columns) != len(result.columns): # Pad rows append_vals = pandas.DataFrame( columns=range(len(result.columns), len(df.columns)), index=result.index, ) result = pandas.concat([result, append_vals], axis=1) return pandas.DataFrame(result) func = self._prepare_method(mode_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns)) new_dtypes = self._dtype_cache if new_dtypes is not None: new_dtypes.index = new_columns return self.__constructor__( new_data, new_index, new_columns, new_dtypes ).dropna(axis=axis, how="all")
[ "def", "mode", "(", "self", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "def", "mode_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "result", "=", "df", ".", "mode", "(", "*", "*", "kwargs", ")", "# We return a dataframe with the same shape as the input to ensure", "# that all the partitions will be the same shape", "if", "not", "axis", "and", "len", "(", "df", ")", "!=", "len", "(", "result", ")", ":", "# Pad columns", "append_values", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "result", ".", "columns", ",", "index", "=", "range", "(", "len", "(", "result", ")", ",", "len", "(", "df", ")", ")", ")", "result", "=", "pandas", ".", "concat", "(", "[", "result", ",", "append_values", "]", ",", "ignore_index", "=", "True", ")", "elif", "axis", "and", "len", "(", "df", ".", "columns", ")", "!=", "len", "(", "result", ".", "columns", ")", ":", "# Pad rows", "append_vals", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "range", "(", "len", "(", "result", ".", "columns", ")", ",", "len", "(", "df", ".", "columns", ")", ")", ",", "index", "=", "result", ".", "index", ",", ")", "result", "=", "pandas", ".", "concat", "(", "[", "result", ",", "append_vals", "]", ",", "axis", "=", "1", ")", "return", "pandas", ".", "DataFrame", "(", "result", ")", "func", "=", "self", ".", "_prepare_method", "(", "mode_builder", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "new_index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "self", ".", "index", ")", ")", "if", "not", "axis", "else", "self", ".", "index", "new_columns", "=", "self", ".", "columns", "if", "not", "axis", "else", "pandas", ".", "RangeIndex", "(", "len", "(", "self", ".", "columns", ")", ")", "new_dtypes", "=", "self", ".", "_dtype_cache", "if", "new_dtypes", "is", "not", "None", ":", "new_dtypes", ".", "index", "=", "new_columns", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ",", "new_dtypes", ")", ".", "dropna", "(", "axis", "=", "axis", ",", "how", "=", "\"all\"", ")" ]
Returns a new QueryCompiler with modes calculated for each label along given axis. Returns: A new QueryCompiler with modes calculated.
[ "Returns", "a", "new", "QueryCompiler", "with", "modes", "calculated", "for", "each", "label", "along", "given", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1564-L1601
27,435
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.fillna
def fillna(self, **kwargs): """Replaces NaN values with the method provided. Returns: A new QueryCompiler with null values filled. """ axis = kwargs.get("axis", 0) value = kwargs.get("value") if isinstance(value, dict): value = kwargs.pop("value") if axis == 0: index = self.columns else: index = self.index value = { idx: value[key] for key in value for idx in index.get_indexer_for([key]) } def fillna_dict_builder(df, func_dict={}): # We do this to ensure that no matter the state of the columns we get # the correct ones. func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict} return df.fillna(value=func_dict, **kwargs) new_data = self.data.apply_func_to_select_indices( axis, fillna_dict_builder, value, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns) else: func = self._prepare_method(pandas.DataFrame.fillna, **kwargs) new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, self.index, self.columns)
python
def fillna(self, **kwargs): """Replaces NaN values with the method provided. Returns: A new QueryCompiler with null values filled. """ axis = kwargs.get("axis", 0) value = kwargs.get("value") if isinstance(value, dict): value = kwargs.pop("value") if axis == 0: index = self.columns else: index = self.index value = { idx: value[key] for key in value for idx in index.get_indexer_for([key]) } def fillna_dict_builder(df, func_dict={}): # We do this to ensure that no matter the state of the columns we get # the correct ones. func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict} return df.fillna(value=func_dict, **kwargs) new_data = self.data.apply_func_to_select_indices( axis, fillna_dict_builder, value, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns) else: func = self._prepare_method(pandas.DataFrame.fillna, **kwargs) new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, self.index, self.columns)
[ "def", "fillna", "(", "self", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "value", "=", "kwargs", ".", "get", "(", "\"value\"", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "value", "=", "kwargs", ".", "pop", "(", "\"value\"", ")", "if", "axis", "==", "0", ":", "index", "=", "self", ".", "columns", "else", ":", "index", "=", "self", ".", "index", "value", "=", "{", "idx", ":", "value", "[", "key", "]", "for", "key", "in", "value", "for", "idx", "in", "index", ".", "get_indexer_for", "(", "[", "key", "]", ")", "}", "def", "fillna_dict_builder", "(", "df", ",", "func_dict", "=", "{", "}", ")", ":", "# We do this to ensure that no matter the state of the columns we get", "# the correct ones.", "func_dict", "=", "{", "df", ".", "columns", "[", "idx", "]", ":", "func_dict", "[", "idx", "]", "for", "idx", "in", "func_dict", "}", "return", "df", ".", "fillna", "(", "value", "=", "func_dict", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices", "(", "axis", ",", "fillna_dict_builder", ",", "value", ",", "keep_remaining", "=", "True", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ")", "else", ":", "func", "=", "self", ".", "_prepare_method", "(", "pandas", ".", "DataFrame", ".", "fillna", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ")" ]
Replaces NaN values with the method provided. Returns: A new QueryCompiler with null values filled.
[ "Replaces", "NaN", "values", "with", "the", "method", "provided", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1603-L1635
27,436
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.query
def query(self, expr, **kwargs): """Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied. """ columns = self.columns def query_builder(df, **kwargs): # This is required because of an Arrow limitation # TODO revisit for Arrow error df = df.copy() df.index = pandas.RangeIndex(len(df)) df.columns = columns df.query(expr, inplace=True, **kwargs) df.columns = pandas.RangeIndex(len(df.columns)) return df func = self._prepare_method(query_builder, **kwargs) new_data = self._map_across_full_axis(1, func) # Query removes rows, so we need to update the index new_index = self.compute_index(0, new_data, True) return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
python
def query(self, expr, **kwargs): """Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied. """ columns = self.columns def query_builder(df, **kwargs): # This is required because of an Arrow limitation # TODO revisit for Arrow error df = df.copy() df.index = pandas.RangeIndex(len(df)) df.columns = columns df.query(expr, inplace=True, **kwargs) df.columns = pandas.RangeIndex(len(df.columns)) return df func = self._prepare_method(query_builder, **kwargs) new_data = self._map_across_full_axis(1, func) # Query removes rows, so we need to update the index new_index = self.compute_index(0, new_data, True) return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
[ "def", "query", "(", "self", ",", "expr", ",", "*", "*", "kwargs", ")", ":", "columns", "=", "self", ".", "columns", "def", "query_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "# This is required because of an Arrow limitation", "# TODO revisit for Arrow error", "df", "=", "df", ".", "copy", "(", ")", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ")", ")", "df", ".", "columns", "=", "columns", "df", ".", "query", "(", "expr", ",", "inplace", "=", "True", ",", "*", "*", "kwargs", ")", "df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "columns", ")", ")", "return", "df", "func", "=", "self", ".", "_prepare_method", "(", "query_builder", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "1", ",", "func", ")", "# Query removes rows, so we need to update the index", "new_index", "=", "self", ".", "compute_index", "(", "0", ",", "new_data", ",", "True", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "self", ".", "columns", ",", "self", ".", "dtypes", ")" ]
Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied.
[ "Query", "columns", "of", "the", "DataManager", "with", "a", "boolean", "expression", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1637-L1663
27,437
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.rank
def rank(self, **kwargs): """Computes numerical rank along axis. Equal values are set to the average. Returns: DataManager containing the ranks of the values along an axis. """ axis = kwargs.get("axis", 0) numeric_only = True if axis else kwargs.get("numeric_only", False) func = self._prepare_method(pandas.DataFrame.rank, **kwargs) new_data = self._map_across_full_axis(axis, func) # Since we assume no knowledge of internal state, we get the columns # from the internal partitions. if numeric_only: new_columns = self.compute_index(1, new_data, True) else: new_columns = self.columns new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns) return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
python
def rank(self, **kwargs): """Computes numerical rank along axis. Equal values are set to the average. Returns: DataManager containing the ranks of the values along an axis. """ axis = kwargs.get("axis", 0) numeric_only = True if axis else kwargs.get("numeric_only", False) func = self._prepare_method(pandas.DataFrame.rank, **kwargs) new_data = self._map_across_full_axis(axis, func) # Since we assume no knowledge of internal state, we get the columns # from the internal partitions. if numeric_only: new_columns = self.compute_index(1, new_data, True) else: new_columns = self.columns new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns) return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
[ "def", "rank", "(", "self", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "numeric_only", "=", "True", "if", "axis", "else", "kwargs", ".", "get", "(", "\"numeric_only\"", ",", "False", ")", "func", "=", "self", ".", "_prepare_method", "(", "pandas", ".", "DataFrame", ".", "rank", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "# Since we assume no knowledge of internal state, we get the columns", "# from the internal partitions.", "if", "numeric_only", ":", "new_columns", "=", "self", ".", "compute_index", "(", "1", ",", "new_data", ",", "True", ")", "else", ":", "new_columns", "=", "self", ".", "columns", "new_dtypes", "=", "pandas", ".", "Series", "(", "[", "np", ".", "float64", "for", "_", "in", "new_columns", "]", ",", "index", "=", "new_columns", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "new_columns", ",", "new_dtypes", ")" ]
Computes numerical rank along axis. Equal values are set to the average. Returns: DataManager containing the ranks of the values along an axis.
[ "Computes", "numerical", "rank", "along", "axis", ".", "Equal", "values", "are", "set", "to", "the", "average", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1665-L1682
27,438
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.sort_index
def sort_index(self, **kwargs): """Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices. """ axis = kwargs.pop("axis", 0) index = self.columns if axis else self.index # sort_index can have ascending be None and behaves as if it is False. # sort_values cannot have ascending be None. Thus, the following logic is to # convert the ascending argument to one that works with sort_values ascending = kwargs.pop("ascending", True) if ascending is None: ascending = False kwargs["ascending"] = ascending def sort_index_builder(df, **kwargs): if axis: df.columns = index else: df.index = index return df.sort_index(axis=axis, **kwargs) func = self._prepare_method(sort_index_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) if axis: new_columns = pandas.Series(self.columns).sort_values(**kwargs) new_index = self.index else: new_index = pandas.Series(self.index).sort_values(**kwargs) new_columns = self.columns return self.__constructor__( new_data, new_index, new_columns, self.dtypes.copy() )
python
def sort_index(self, **kwargs): """Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices. """ axis = kwargs.pop("axis", 0) index = self.columns if axis else self.index # sort_index can have ascending be None and behaves as if it is False. # sort_values cannot have ascending be None. Thus, the following logic is to # convert the ascending argument to one that works with sort_values ascending = kwargs.pop("ascending", True) if ascending is None: ascending = False kwargs["ascending"] = ascending def sort_index_builder(df, **kwargs): if axis: df.columns = index else: df.index = index return df.sort_index(axis=axis, **kwargs) func = self._prepare_method(sort_index_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) if axis: new_columns = pandas.Series(self.columns).sort_values(**kwargs) new_index = self.index else: new_index = pandas.Series(self.index).sort_values(**kwargs) new_columns = self.columns return self.__constructor__( new_data, new_index, new_columns, self.dtypes.copy() )
[ "def", "sort_index", "(", "self", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "pop", "(", "\"axis\"", ",", "0", ")", "index", "=", "self", ".", "columns", "if", "axis", "else", "self", ".", "index", "# sort_index can have ascending be None and behaves as if it is False.", "# sort_values cannot have ascending be None. Thus, the following logic is to", "# convert the ascending argument to one that works with sort_values", "ascending", "=", "kwargs", ".", "pop", "(", "\"ascending\"", ",", "True", ")", "if", "ascending", "is", "None", ":", "ascending", "=", "False", "kwargs", "[", "\"ascending\"", "]", "=", "ascending", "def", "sort_index_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "if", "axis", ":", "df", ".", "columns", "=", "index", "else", ":", "df", ".", "index", "=", "index", "return", "df", ".", "sort_index", "(", "axis", "=", "axis", ",", "*", "*", "kwargs", ")", "func", "=", "self", ".", "_prepare_method", "(", "sort_index_builder", ",", "*", "*", "kwargs", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "if", "axis", ":", "new_columns", "=", "pandas", ".", "Series", "(", "self", ".", "columns", ")", ".", "sort_values", "(", "*", "*", "kwargs", ")", "new_index", "=", "self", ".", "index", "else", ":", "new_index", "=", "pandas", ".", "Series", "(", "self", ".", "index", ")", ".", "sort_values", "(", "*", "*", "kwargs", ")", "new_columns", "=", "self", ".", "columns", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ",", "self", ".", "dtypes", ".", "copy", "(", ")", ")" ]
Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices.
[ "Sorts", "the", "data", "with", "respect", "to", "either", "the", "columns", "or", "the", "indices", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1684-L1718
27,439
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._map_across_full_axis_select_indices
def _map_across_full_axis_select_indices( self, axis, func, indices, keep_remaining=False ): """Maps function to select indices along full axis. Args: axis: 0 for columns and 1 for rows. func: Callable mapping function over the BlockParitions. indices: indices along axis to map over. keep_remaining: True if keep indices where function was not applied. Returns: BaseFrameManager containing the result of mapping func over axis on indices. """ return self.data.apply_func_to_select_indices_along_full_axis( axis, func, indices, keep_remaining )
python
def _map_across_full_axis_select_indices( self, axis, func, indices, keep_remaining=False ): """Maps function to select indices along full axis. Args: axis: 0 for columns and 1 for rows. func: Callable mapping function over the BlockParitions. indices: indices along axis to map over. keep_remaining: True if keep indices where function was not applied. Returns: BaseFrameManager containing the result of mapping func over axis on indices. """ return self.data.apply_func_to_select_indices_along_full_axis( axis, func, indices, keep_remaining )
[ "def", "_map_across_full_axis_select_indices", "(", "self", ",", "axis", ",", "func", ",", "indices", ",", "keep_remaining", "=", "False", ")", ":", "return", "self", ".", "data", ".", "apply_func_to_select_indices_along_full_axis", "(", "axis", ",", "func", ",", "indices", ",", "keep_remaining", ")" ]
Maps function to select indices along full axis. Args: axis: 0 for columns and 1 for rows. func: Callable mapping function over the BlockParitions. indices: indices along axis to map over. keep_remaining: True if keep indices where function was not applied. Returns: BaseFrameManager containing the result of mapping func over axis on indices.
[ "Maps", "function", "to", "select", "indices", "along", "full", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1726-L1742
27,440
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.quantile_for_list_of_values
def quantile_for_list_of_values(self, **kwargs): """Returns Manager containing quantiles along an axis for numeric columns. Returns: DataManager containing quantiles of original DataManager along an axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_list_of_values(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q") numeric_only = kwargs.get("numeric_only", True) assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)) if numeric_only: new_columns = self.numeric_columns() else: new_columns = [ col for col, dtype in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype)) ] if axis: # If along rows, then drop the nonnumeric columns, record the index, and # take transpose. We have to do this because if we don't, the result is all # in one column for some reason. nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] query_compiler = self.drop(columns=nonnumeric) new_columns = query_compiler.index else: query_compiler = self def quantile_builder(df, **kwargs): result = df.quantile(**kwargs) return result.T if axis == 1 else result func = query_compiler._prepare_method(quantile_builder, **kwargs) q_index = pandas.Float64Index(q) new_data = query_compiler._map_across_full_axis(axis, func) # This took a long time to debug, so here is the rundown of why this is needed. # Previously, we were operating on select indices, but that was broken. We were # not correctly setting the columns/index. Because of how we compute `to_pandas` # and because of the static nature of the index for `axis=1` it is easier to # just handle this as the transpose (see `quantile_builder` above for the # transpose within the partition) than it is to completely rework other # internal methods. Basically we are returning the transpose of the object for # correctness and cleanliness of the code. if axis == 1: q_index = new_columns new_columns = pandas.Float64Index(q) result = self.__constructor__(new_data, q_index, new_columns) return result.transpose() if axis == 1 else result
python
def quantile_for_list_of_values(self, **kwargs): """Returns Manager containing quantiles along an axis for numeric columns. Returns: DataManager containing quantiles of original DataManager along an axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_list_of_values(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q") numeric_only = kwargs.get("numeric_only", True) assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)) if numeric_only: new_columns = self.numeric_columns() else: new_columns = [ col for col, dtype in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype)) ] if axis: # If along rows, then drop the nonnumeric columns, record the index, and # take transpose. We have to do this because if we don't, the result is all # in one column for some reason. nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] query_compiler = self.drop(columns=nonnumeric) new_columns = query_compiler.index else: query_compiler = self def quantile_builder(df, **kwargs): result = df.quantile(**kwargs) return result.T if axis == 1 else result func = query_compiler._prepare_method(quantile_builder, **kwargs) q_index = pandas.Float64Index(q) new_data = query_compiler._map_across_full_axis(axis, func) # This took a long time to debug, so here is the rundown of why this is needed. # Previously, we were operating on select indices, but that was broken. We were # not correctly setting the columns/index. Because of how we compute `to_pandas` # and because of the static nature of the index for `axis=1` it is easier to # just handle this as the transpose (see `quantile_builder` above for the # transpose within the partition) than it is to completely rework other # internal methods. Basically we are returning the transpose of the object for # correctness and cleanliness of the code. if axis == 1: q_index = new_columns new_columns = pandas.Float64Index(q) result = self.__constructor__(new_data, q_index, new_columns) return result.transpose() if axis == 1 else result
[ "def", "quantile_for_list_of_values", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(", ")", ".", "quantile_for_list_of_values", "(", "*", "*", "kwargs", ")", "axis", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "q", "=", "kwargs", ".", "get", "(", "\"q\"", ")", "numeric_only", "=", "kwargs", ".", "get", "(", "\"numeric_only\"", ",", "True", ")", "assert", "isinstance", "(", "q", ",", "(", "pandas", ".", "Series", ",", "np", ".", "ndarray", ",", "pandas", ".", "Index", ",", "list", ")", ")", "if", "numeric_only", ":", "new_columns", "=", "self", ".", "numeric_columns", "(", ")", "else", ":", "new_columns", "=", "[", "col", "for", "col", ",", "dtype", "in", "zip", "(", "self", ".", "columns", ",", "self", ".", "dtypes", ")", "if", "(", "is_numeric_dtype", "(", "dtype", ")", "or", "is_datetime_or_timedelta_dtype", "(", "dtype", ")", ")", "]", "if", "axis", ":", "# If along rows, then drop the nonnumeric columns, record the index, and", "# take transpose. We have to do this because if we don't, the result is all", "# in one column for some reason.", "nonnumeric", "=", "[", "col", "for", "col", ",", "dtype", "in", "zip", "(", "self", ".", "columns", ",", "self", ".", "dtypes", ")", "if", "not", "is_numeric_dtype", "(", "dtype", ")", "]", "query_compiler", "=", "self", ".", "drop", "(", "columns", "=", "nonnumeric", ")", "new_columns", "=", "query_compiler", ".", "index", "else", ":", "query_compiler", "=", "self", "def", "quantile_builder", "(", "df", ",", "*", "*", "kwargs", ")", ":", "result", "=", "df", ".", "quantile", "(", "*", "*", "kwargs", ")", "return", "result", ".", "T", "if", "axis", "==", "1", "else", "result", "func", "=", "query_compiler", ".", "_prepare_method", "(", "quantile_builder", ",", "*", "*", "kwargs", ")", "q_index", "=", "pandas", ".", "Float64Index", "(", "q", ")", "new_data", "=", "query_compiler", ".", "_map_across_full_axis", "(", "axis", ",", "func", ")", "# This took a long time to debug, so here is the rundown of why this is needed.", "# Previously, we were operating on select indices, but that was broken. We were", "# not correctly setting the columns/index. Because of how we compute `to_pandas`", "# and because of the static nature of the index for `axis=1` it is easier to", "# just handle this as the transpose (see `quantile_builder` above for the", "# transpose within the partition) than it is to completely rework other", "# internal methods. Basically we are returning the transpose of the object for", "# correctness and cleanliness of the code.", "if", "axis", "==", "1", ":", "q_index", "=", "new_columns", "new_columns", "=", "pandas", ".", "Float64Index", "(", "q", ")", "result", "=", "self", ".", "__constructor__", "(", "new_data", ",", "q_index", ",", "new_columns", ")", "return", "result", ".", "transpose", "(", ")", "if", "axis", "==", "1", "else", "result" ]
Returns Manager containing quantiles along an axis for numeric columns. Returns: DataManager containing quantiles of original DataManager along an axis.
[ "Returns", "Manager", "containing", "quantiles", "along", "an", "axis", "for", "numeric", "columns", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1744-L1800
27,441
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.tail
def tail(self, n): """Returns the last n rows. Args: n: Integer containing the number of rows to return. Returns: DataManager containing the last n rows of the original DataManager. """ # See head for an explanation of the transposed behavior if n < 0: n = max(0, len(self.index) + n) if self._is_transposed: result = self.__constructor__( self.data.transpose().take(1, -n).transpose(), self.index[-n:], self.columns, self._dtype_cache, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache ) return result
python
def tail(self, n): """Returns the last n rows. Args: n: Integer containing the number of rows to return. Returns: DataManager containing the last n rows of the original DataManager. """ # See head for an explanation of the transposed behavior if n < 0: n = max(0, len(self.index) + n) if self._is_transposed: result = self.__constructor__( self.data.transpose().take(1, -n).transpose(), self.index[-n:], self.columns, self._dtype_cache, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache ) return result
[ "def", "tail", "(", "self", ",", "n", ")", ":", "# See head for an explanation of the transposed behavior", "if", "n", "<", "0", ":", "n", "=", "max", "(", "0", ",", "len", "(", "self", ".", "index", ")", "+", "n", ")", "if", "self", ".", "_is_transposed", ":", "result", "=", "self", ".", "__constructor__", "(", "self", ".", "data", ".", "transpose", "(", ")", ".", "take", "(", "1", ",", "-", "n", ")", ".", "transpose", "(", ")", ",", "self", ".", "index", "[", "-", "n", ":", "]", ",", "self", ".", "columns", ",", "self", ".", "_dtype_cache", ",", ")", "result", ".", "_is_transposed", "=", "True", "else", ":", "result", "=", "self", ".", "__constructor__", "(", "self", ".", "data", ".", "take", "(", "0", ",", "-", "n", ")", ",", "self", ".", "index", "[", "-", "n", ":", "]", ",", "self", ".", "columns", ",", "self", ".", "_dtype_cache", ")", "return", "result" ]
Returns the last n rows. Args: n: Integer containing the number of rows to return. Returns: DataManager containing the last n rows of the original DataManager.
[ "Returns", "the", "last", "n", "rows", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1837-L1861
27,442
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.front
def front(self, n): """Returns the first n columns. Args: n: Integer containing the number of columns to return. Returns: DataManager containing the first n columns of the original DataManager. """ new_dtypes = ( self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n] ) # See head for an explanation of the transposed behavior if self._is_transposed: result = self.__constructor__( self.data.transpose().take(0, n).transpose(), self.index, self.columns[:n], new_dtypes, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(1, n), self.index, self.columns[:n], new_dtypes ) return result
python
def front(self, n): """Returns the first n columns. Args: n: Integer containing the number of columns to return. Returns: DataManager containing the first n columns of the original DataManager. """ new_dtypes = ( self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n] ) # See head for an explanation of the transposed behavior if self._is_transposed: result = self.__constructor__( self.data.transpose().take(0, n).transpose(), self.index, self.columns[:n], new_dtypes, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(1, n), self.index, self.columns[:n], new_dtypes ) return result
[ "def", "front", "(", "self", ",", "n", ")", ":", "new_dtypes", "=", "(", "self", ".", "_dtype_cache", "if", "self", ".", "_dtype_cache", "is", "None", "else", "self", ".", "_dtype_cache", "[", ":", "n", "]", ")", "# See head for an explanation of the transposed behavior", "if", "self", ".", "_is_transposed", ":", "result", "=", "self", ".", "__constructor__", "(", "self", ".", "data", ".", "transpose", "(", ")", ".", "take", "(", "0", ",", "n", ")", ".", "transpose", "(", ")", ",", "self", ".", "index", ",", "self", ".", "columns", "[", ":", "n", "]", ",", "new_dtypes", ",", ")", "result", ".", "_is_transposed", "=", "True", "else", ":", "result", "=", "self", ".", "__constructor__", "(", "self", ".", "data", ".", "take", "(", "1", ",", "n", ")", ",", "self", ".", "index", ",", "self", ".", "columns", "[", ":", "n", "]", ",", "new_dtypes", ")", "return", "result" ]
Returns the first n columns. Args: n: Integer containing the number of columns to return. Returns: DataManager containing the first n columns of the original DataManager.
[ "Returns", "the", "first", "n", "columns", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1863-L1888
27,443
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.getitem_column_array
def getitem_column_array(self, key): """Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler. """ # Convert to list for type checking numeric_indices = list(self.columns.get_indexer_for(key)) # Internal indices is left blank and the internal # `apply_func_to_select_indices` will do the conversion and pass it in. def getitem(df, internal_indices=[]): return df.iloc[:, internal_indices] result = self.data.apply_func_to_select_indices( 0, getitem, numeric_indices, keep_remaining=False ) # We can't just set the columns to key here because there may be # multiple instances of a key. new_columns = self.columns[numeric_indices] new_dtypes = self.dtypes[numeric_indices] return self.__constructor__(result, self.index, new_columns, new_dtypes)
python
def getitem_column_array(self, key): """Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler. """ # Convert to list for type checking numeric_indices = list(self.columns.get_indexer_for(key)) # Internal indices is left blank and the internal # `apply_func_to_select_indices` will do the conversion and pass it in. def getitem(df, internal_indices=[]): return df.iloc[:, internal_indices] result = self.data.apply_func_to_select_indices( 0, getitem, numeric_indices, keep_remaining=False ) # We can't just set the columns to key here because there may be # multiple instances of a key. new_columns = self.columns[numeric_indices] new_dtypes = self.dtypes[numeric_indices] return self.__constructor__(result, self.index, new_columns, new_dtypes)
[ "def", "getitem_column_array", "(", "self", ",", "key", ")", ":", "# Convert to list for type checking", "numeric_indices", "=", "list", "(", "self", ".", "columns", ".", "get_indexer_for", "(", "key", ")", ")", "# Internal indices is left blank and the internal", "# `apply_func_to_select_indices` will do the conversion and pass it in.", "def", "getitem", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "return", "df", ".", "iloc", "[", ":", ",", "internal_indices", "]", "result", "=", "self", ".", "data", ".", "apply_func_to_select_indices", "(", "0", ",", "getitem", ",", "numeric_indices", ",", "keep_remaining", "=", "False", ")", "# We can't just set the columns to key here because there may be", "# multiple instances of a key.", "new_columns", "=", "self", ".", "columns", "[", "numeric_indices", "]", "new_dtypes", "=", "self", ".", "dtypes", "[", "numeric_indices", "]", "return", "self", ".", "__constructor__", "(", "result", ",", "self", ".", "index", ",", "new_columns", ",", "new_dtypes", ")" ]
Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler.
[ "Get", "column", "data", "for", "target", "labels", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1920-L1944
27,444
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.getitem_row_array
def getitem_row_array(self, key): """Get row data for target labels. Args: key: Target numeric indices by which to retrieve data. Returns: A new QueryCompiler. """ # Convert to list for type checking key = list(key) def getitem(df, internal_indices=[]): return df.iloc[internal_indices] result = self.data.apply_func_to_select_indices( 1, getitem, key, keep_remaining=False ) # We can't just set the index to key here because there may be multiple # instances of a key. new_index = self.index[key] return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
python
def getitem_row_array(self, key): """Get row data for target labels. Args: key: Target numeric indices by which to retrieve data. Returns: A new QueryCompiler. """ # Convert to list for type checking key = list(key) def getitem(df, internal_indices=[]): return df.iloc[internal_indices] result = self.data.apply_func_to_select_indices( 1, getitem, key, keep_remaining=False ) # We can't just set the index to key here because there may be multiple # instances of a key. new_index = self.index[key] return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
[ "def", "getitem_row_array", "(", "self", ",", "key", ")", ":", "# Convert to list for type checking", "key", "=", "list", "(", "key", ")", "def", "getitem", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "return", "df", ".", "iloc", "[", "internal_indices", "]", "result", "=", "self", ".", "data", ".", "apply_func_to_select_indices", "(", "1", ",", "getitem", ",", "key", ",", "keep_remaining", "=", "False", ")", "# We can't just set the index to key here because there may be multiple", "# instances of a key.", "new_index", "=", "self", ".", "index", "[", "key", "]", "return", "self", ".", "__constructor__", "(", "result", ",", "new_index", ",", "self", ".", "columns", ",", "self", ".", "_dtype_cache", ")" ]
Get row data for target labels. Args: key: Target numeric indices by which to retrieve data. Returns: A new QueryCompiler.
[ "Get", "row", "data", "for", "target", "labels", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1946-L1967
27,445
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.setitem
def setitem(self, axis, key, value): """Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler """ def setitem(df, internal_indices=[]): def _setitem(): if len(internal_indices) == 1: if axis == 0: df[df.columns[internal_indices[0]]] = value else: df.iloc[internal_indices[0]] = value else: if axis == 0: df[df.columns[internal_indices]] = value else: df.iloc[internal_indices] = value try: _setitem() except ValueError: # TODO: This is a workaround for a pyarrow serialization issue df = df.copy() _setitem() return df if axis == 0: numeric_indices = list(self.columns.get_indexer_for([key])) else: numeric_indices = list(self.index.get_indexer_for([key])) prepared_func = self._prepare_method(setitem) if is_list_like(value): new_data = self.data.apply_func_to_select_indices_along_full_axis( axis, prepared_func, numeric_indices, keep_remaining=True ) else: new_data = self.data.apply_func_to_select_indices( axis, prepared_func, numeric_indices, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns)
python
def setitem(self, axis, key, value): """Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler """ def setitem(df, internal_indices=[]): def _setitem(): if len(internal_indices) == 1: if axis == 0: df[df.columns[internal_indices[0]]] = value else: df.iloc[internal_indices[0]] = value else: if axis == 0: df[df.columns[internal_indices]] = value else: df.iloc[internal_indices] = value try: _setitem() except ValueError: # TODO: This is a workaround for a pyarrow serialization issue df = df.copy() _setitem() return df if axis == 0: numeric_indices = list(self.columns.get_indexer_for([key])) else: numeric_indices = list(self.index.get_indexer_for([key])) prepared_func = self._prepare_method(setitem) if is_list_like(value): new_data = self.data.apply_func_to_select_indices_along_full_axis( axis, prepared_func, numeric_indices, keep_remaining=True ) else: new_data = self.data.apply_func_to_select_indices( axis, prepared_func, numeric_indices, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns)
[ "def", "setitem", "(", "self", ",", "axis", ",", "key", ",", "value", ")", ":", "def", "setitem", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "def", "_setitem", "(", ")", ":", "if", "len", "(", "internal_indices", ")", "==", "1", ":", "if", "axis", "==", "0", ":", "df", "[", "df", ".", "columns", "[", "internal_indices", "[", "0", "]", "]", "]", "=", "value", "else", ":", "df", ".", "iloc", "[", "internal_indices", "[", "0", "]", "]", "=", "value", "else", ":", "if", "axis", "==", "0", ":", "df", "[", "df", ".", "columns", "[", "internal_indices", "]", "]", "=", "value", "else", ":", "df", ".", "iloc", "[", "internal_indices", "]", "=", "value", "try", ":", "_setitem", "(", ")", "except", "ValueError", ":", "# TODO: This is a workaround for a pyarrow serialization issue", "df", "=", "df", ".", "copy", "(", ")", "_setitem", "(", ")", "return", "df", "if", "axis", "==", "0", ":", "numeric_indices", "=", "list", "(", "self", ".", "columns", ".", "get_indexer_for", "(", "[", "key", "]", ")", ")", "else", ":", "numeric_indices", "=", "list", "(", "self", ".", "index", ".", "get_indexer_for", "(", "[", "key", "]", ")", ")", "prepared_func", "=", "self", ".", "_prepare_method", "(", "setitem", ")", "if", "is_list_like", "(", "value", ")", ":", "new_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices_along_full_axis", "(", "axis", ",", "prepared_func", ",", "numeric_indices", ",", "keep_remaining", "=", "True", ")", "else", ":", "new_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices", "(", "axis", ",", "prepared_func", ",", "numeric_indices", ",", "keep_remaining", "=", "True", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "self", ".", "columns", ")" ]
Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler
[ "Set", "the", "column", "defined", "by", "key", "to", "the", "value", "provided", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1969-L2014
27,446
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.drop
def drop(self, index=None, columns=None): """Remove row data for target index and columns. Args: index: Target index to drop. columns: Target columns to drop. Returns: A new QueryCompiler. """ if self._is_transposed: return self.transpose().drop(index=columns, columns=index).transpose() if index is None: new_data = self.data new_index = self.index else: def delitem(df, internal_indices=[]): return df.drop(index=df.index[internal_indices]) numeric_indices = list(self.index.get_indexer_for(index)) new_data = self.data.apply_func_to_select_indices( 1, delitem, numeric_indices, keep_remaining=True ) # We can't use self.index.drop with duplicate keys because in Pandas # it throws an error. new_index = self.index[~self.index.isin(index)] if columns is None: new_columns = self.columns new_dtypes = self.dtypes else: def delitem(df, internal_indices=[]): return df.drop(columns=df.columns[internal_indices]) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = new_data.apply_func_to_select_indices( 0, delitem, numeric_indices, keep_remaining=True ) new_columns = self.columns[~self.columns.isin(columns)] new_dtypes = self.dtypes.drop(columns) return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
python
def drop(self, index=None, columns=None): """Remove row data for target index and columns. Args: index: Target index to drop. columns: Target columns to drop. Returns: A new QueryCompiler. """ if self._is_transposed: return self.transpose().drop(index=columns, columns=index).transpose() if index is None: new_data = self.data new_index = self.index else: def delitem(df, internal_indices=[]): return df.drop(index=df.index[internal_indices]) numeric_indices = list(self.index.get_indexer_for(index)) new_data = self.data.apply_func_to_select_indices( 1, delitem, numeric_indices, keep_remaining=True ) # We can't use self.index.drop with duplicate keys because in Pandas # it throws an error. new_index = self.index[~self.index.isin(index)] if columns is None: new_columns = self.columns new_dtypes = self.dtypes else: def delitem(df, internal_indices=[]): return df.drop(columns=df.columns[internal_indices]) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = new_data.apply_func_to_select_indices( 0, delitem, numeric_indices, keep_remaining=True ) new_columns = self.columns[~self.columns.isin(columns)] new_dtypes = self.dtypes.drop(columns) return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
[ "def", "drop", "(", "self", ",", "index", "=", "None", ",", "columns", "=", "None", ")", ":", "if", "self", ".", "_is_transposed", ":", "return", "self", ".", "transpose", "(", ")", ".", "drop", "(", "index", "=", "columns", ",", "columns", "=", "index", ")", ".", "transpose", "(", ")", "if", "index", "is", "None", ":", "new_data", "=", "self", ".", "data", "new_index", "=", "self", ".", "index", "else", ":", "def", "delitem", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "return", "df", ".", "drop", "(", "index", "=", "df", ".", "index", "[", "internal_indices", "]", ")", "numeric_indices", "=", "list", "(", "self", ".", "index", ".", "get_indexer_for", "(", "index", ")", ")", "new_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices", "(", "1", ",", "delitem", ",", "numeric_indices", ",", "keep_remaining", "=", "True", ")", "# We can't use self.index.drop with duplicate keys because in Pandas", "# it throws an error.", "new_index", "=", "self", ".", "index", "[", "~", "self", ".", "index", ".", "isin", "(", "index", ")", "]", "if", "columns", "is", "None", ":", "new_columns", "=", "self", ".", "columns", "new_dtypes", "=", "self", ".", "dtypes", "else", ":", "def", "delitem", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "return", "df", ".", "drop", "(", "columns", "=", "df", ".", "columns", "[", "internal_indices", "]", ")", "numeric_indices", "=", "list", "(", "self", ".", "columns", ".", "get_indexer_for", "(", "columns", ")", ")", "new_data", "=", "new_data", ".", "apply_func_to_select_indices", "(", "0", ",", "delitem", ",", "numeric_indices", ",", "keep_remaining", "=", "True", ")", "new_columns", "=", "self", ".", "columns", "[", "~", "self", ".", "columns", ".", "isin", "(", "columns", ")", "]", "new_dtypes", "=", "self", ".", "dtypes", ".", "drop", "(", "columns", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ",", "new_dtypes", ")" ]
Remove row data for target index and columns. Args: index: Target index to drop. columns: Target columns to drop. Returns: A new QueryCompiler.
[ "Remove", "row", "data", "for", "target", "index", "and", "columns", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2020-L2062
27,447
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.insert
def insert(self, loc, column, value): """Insert new column data. Args: loc: Insertion index. column: Column labels to insert. value: Dtype object values to insert. Returns: A new PandasQueryCompiler with new data inserted. """ if is_list_like(value): # TODO make work with another querycompiler object as `value`. # This will require aligning the indices with a `reindex` and ensuring that # the data is partitioned identically. if isinstance(value, pandas.Series): value = value.reindex(self.index) value = list(value) def insert(df, internal_indices=[]): internal_idx = int(internal_indices[0]) old_index = df.index df.index = pandas.RangeIndex(len(df.index)) df.insert(internal_idx, internal_idx, value, allow_duplicates=True) df.columns = pandas.RangeIndex(len(df.columns)) df.index = old_index return df new_data = self.data.apply_func_to_select_indices_along_full_axis( 0, insert, loc, keep_remaining=True ) new_columns = self.columns.insert(loc, column) return self.__constructor__(new_data, self.index, new_columns)
python
def insert(self, loc, column, value): """Insert new column data. Args: loc: Insertion index. column: Column labels to insert. value: Dtype object values to insert. Returns: A new PandasQueryCompiler with new data inserted. """ if is_list_like(value): # TODO make work with another querycompiler object as `value`. # This will require aligning the indices with a `reindex` and ensuring that # the data is partitioned identically. if isinstance(value, pandas.Series): value = value.reindex(self.index) value = list(value) def insert(df, internal_indices=[]): internal_idx = int(internal_indices[0]) old_index = df.index df.index = pandas.RangeIndex(len(df.index)) df.insert(internal_idx, internal_idx, value, allow_duplicates=True) df.columns = pandas.RangeIndex(len(df.columns)) df.index = old_index return df new_data = self.data.apply_func_to_select_indices_along_full_axis( 0, insert, loc, keep_remaining=True ) new_columns = self.columns.insert(loc, column) return self.__constructor__(new_data, self.index, new_columns)
[ "def", "insert", "(", "self", ",", "loc", ",", "column", ",", "value", ")", ":", "if", "is_list_like", "(", "value", ")", ":", "# TODO make work with another querycompiler object as `value`.", "# This will require aligning the indices with a `reindex` and ensuring that", "# the data is partitioned identically.", "if", "isinstance", "(", "value", ",", "pandas", ".", "Series", ")", ":", "value", "=", "value", ".", "reindex", "(", "self", ".", "index", ")", "value", "=", "list", "(", "value", ")", "def", "insert", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "internal_idx", "=", "int", "(", "internal_indices", "[", "0", "]", ")", "old_index", "=", "df", ".", "index", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "index", ")", ")", "df", ".", "insert", "(", "internal_idx", ",", "internal_idx", ",", "value", ",", "allow_duplicates", "=", "True", ")", "df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "columns", ")", ")", "df", ".", "index", "=", "old_index", "return", "df", "new_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices_along_full_axis", "(", "0", ",", "insert", ",", "loc", ",", "keep_remaining", "=", "True", ")", "new_columns", "=", "self", ".", "columns", ".", "insert", "(", "loc", ",", "column", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "self", ".", "index", ",", "new_columns", ")" ]
Insert new column data. Args: loc: Insertion index. column: Column labels to insert. value: Dtype object values to insert. Returns: A new PandasQueryCompiler with new data inserted.
[ "Insert", "new", "column", "data", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2071-L2103
27,448
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.apply
def apply(self, func, axis, *args, **kwargs): """Apply func across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ if callable(func): return self._callable_func(func, axis, *args, **kwargs) elif isinstance(func, dict): return self._dict_func(func, axis, *args, **kwargs) elif is_list_like(func): return self._list_like_func(func, axis, *args, **kwargs) else: pass
python
def apply(self, func, axis, *args, **kwargs): """Apply func across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ if callable(func): return self._callable_func(func, axis, *args, **kwargs) elif isinstance(func, dict): return self._dict_func(func, axis, *args, **kwargs) elif is_list_like(func): return self._list_like_func(func, axis, *args, **kwargs) else: pass
[ "def", "apply", "(", "self", ",", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "callable", "(", "func", ")", ":", "return", "self", ".", "_callable_func", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "func", ",", "dict", ")", ":", "return", "self", ".", "_dict_func", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "is_list_like", "(", "func", ")", ":", "return", "self", ".", "_list_like_func", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "pass" ]
Apply func across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
[ "Apply", "func", "across", "given", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2110-L2127
27,449
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._post_process_apply
def _post_process_apply(self, result_data, axis, try_scale=True): """Recompute the index after applying function. Args: result_data: a BaseFrameManager object. axis: Target axis along which function was applied. Returns: A new PandasQueryCompiler. """ if try_scale: try: internal_index = self.compute_index(0, result_data, True) except IndexError: internal_index = self.compute_index(0, result_data, False) try: internal_columns = self.compute_index(1, result_data, True) except IndexError: internal_columns = self.compute_index(1, result_data, False) else: internal_index = self.compute_index(0, result_data, False) internal_columns = self.compute_index(1, result_data, False) if not axis: index = internal_index # We check if the two columns are the same length because if # they are the same length, `self.columns` is the correct index. # However, if the operation resulted in a different number of columns, # we must use the derived columns from `self.compute_index()`. if len(internal_columns) != len(self.columns): columns = internal_columns else: columns = self.columns else: columns = internal_columns # See above explanation for checking the lengths of columns if len(internal_index) != len(self.index): index = internal_index else: index = self.index return self.__constructor__(result_data, index, columns)
python
def _post_process_apply(self, result_data, axis, try_scale=True): """Recompute the index after applying function. Args: result_data: a BaseFrameManager object. axis: Target axis along which function was applied. Returns: A new PandasQueryCompiler. """ if try_scale: try: internal_index = self.compute_index(0, result_data, True) except IndexError: internal_index = self.compute_index(0, result_data, False) try: internal_columns = self.compute_index(1, result_data, True) except IndexError: internal_columns = self.compute_index(1, result_data, False) else: internal_index = self.compute_index(0, result_data, False) internal_columns = self.compute_index(1, result_data, False) if not axis: index = internal_index # We check if the two columns are the same length because if # they are the same length, `self.columns` is the correct index. # However, if the operation resulted in a different number of columns, # we must use the derived columns from `self.compute_index()`. if len(internal_columns) != len(self.columns): columns = internal_columns else: columns = self.columns else: columns = internal_columns # See above explanation for checking the lengths of columns if len(internal_index) != len(self.index): index = internal_index else: index = self.index return self.__constructor__(result_data, index, columns)
[ "def", "_post_process_apply", "(", "self", ",", "result_data", ",", "axis", ",", "try_scale", "=", "True", ")", ":", "if", "try_scale", ":", "try", ":", "internal_index", "=", "self", ".", "compute_index", "(", "0", ",", "result_data", ",", "True", ")", "except", "IndexError", ":", "internal_index", "=", "self", ".", "compute_index", "(", "0", ",", "result_data", ",", "False", ")", "try", ":", "internal_columns", "=", "self", ".", "compute_index", "(", "1", ",", "result_data", ",", "True", ")", "except", "IndexError", ":", "internal_columns", "=", "self", ".", "compute_index", "(", "1", ",", "result_data", ",", "False", ")", "else", ":", "internal_index", "=", "self", ".", "compute_index", "(", "0", ",", "result_data", ",", "False", ")", "internal_columns", "=", "self", ".", "compute_index", "(", "1", ",", "result_data", ",", "False", ")", "if", "not", "axis", ":", "index", "=", "internal_index", "# We check if the two columns are the same length because if", "# they are the same length, `self.columns` is the correct index.", "# However, if the operation resulted in a different number of columns,", "# we must use the derived columns from `self.compute_index()`.", "if", "len", "(", "internal_columns", ")", "!=", "len", "(", "self", ".", "columns", ")", ":", "columns", "=", "internal_columns", "else", ":", "columns", "=", "self", ".", "columns", "else", ":", "columns", "=", "internal_columns", "# See above explanation for checking the lengths of columns", "if", "len", "(", "internal_index", ")", "!=", "len", "(", "self", ".", "index", ")", ":", "index", "=", "internal_index", "else", ":", "index", "=", "self", ".", "index", "return", "self", ".", "__constructor__", "(", "result_data", ",", "index", ",", "columns", ")" ]
Recompute the index after applying function. Args: result_data: a BaseFrameManager object. axis: Target axis along which function was applied. Returns: A new PandasQueryCompiler.
[ "Recompute", "the", "index", "after", "applying", "function", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2129-L2168
27,450
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._dict_func
def _dict_func(self, func, axis, *args, **kwargs): """Apply function to certain indices across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ if "axis" not in kwargs: kwargs["axis"] = axis if axis == 0: index = self.columns else: index = self.index func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])} def dict_apply_builder(df, func_dict={}): # Sometimes `apply` can return a `Series`, but we require that internally # all objects are `DataFrame`s. return pandas.DataFrame(df.apply(func_dict, *args, **kwargs)) result_data = self.data.apply_func_to_select_indices_along_full_axis( axis, dict_apply_builder, func, keep_remaining=False ) full_result = self._post_process_apply(result_data, axis) return full_result
python
def _dict_func(self, func, axis, *args, **kwargs): """Apply function to certain indices across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ if "axis" not in kwargs: kwargs["axis"] = axis if axis == 0: index = self.columns else: index = self.index func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])} def dict_apply_builder(df, func_dict={}): # Sometimes `apply` can return a `Series`, but we require that internally # all objects are `DataFrame`s. return pandas.DataFrame(df.apply(func_dict, *args, **kwargs)) result_data = self.data.apply_func_to_select_indices_along_full_axis( axis, dict_apply_builder, func, keep_remaining=False ) full_result = self._post_process_apply(result_data, axis) return full_result
[ "def", "_dict_func", "(", "self", ",", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"axis\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"axis\"", "]", "=", "axis", "if", "axis", "==", "0", ":", "index", "=", "self", ".", "columns", "else", ":", "index", "=", "self", ".", "index", "func", "=", "{", "idx", ":", "func", "[", "key", "]", "for", "key", "in", "func", "for", "idx", "in", "index", ".", "get_indexer_for", "(", "[", "key", "]", ")", "}", "def", "dict_apply_builder", "(", "df", ",", "func_dict", "=", "{", "}", ")", ":", "# Sometimes `apply` can return a `Series`, but we require that internally", "# all objects are `DataFrame`s.", "return", "pandas", ".", "DataFrame", "(", "df", ".", "apply", "(", "func_dict", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "result_data", "=", "self", ".", "data", ".", "apply_func_to_select_indices_along_full_axis", "(", "axis", ",", "dict_apply_builder", ",", "func", ",", "keep_remaining", "=", "False", ")", "full_result", "=", "self", ".", "_post_process_apply", "(", "result_data", ",", "axis", ")", "return", "full_result" ]
Apply function to certain indices across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
[ "Apply", "function", "to", "certain", "indices", "across", "given", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2170-L2198
27,451
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._list_like_func
def _list_like_func(self, func, axis, *args, **kwargs): """Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) # When the function is list-like, the function names become the index/columns new_index = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 0 else self.index ) new_columns = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 1 else self.columns ) return self.__constructor__(new_data, new_index, new_columns)
python
def _list_like_func(self, func, axis, *args, **kwargs): """Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) # When the function is list-like, the function names become the index/columns new_index = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 0 else self.index ) new_columns = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 1 else self.columns ) return self.__constructor__(new_data, new_index, new_columns)
[ "def", "_list_like_func", "(", "self", ",", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func_prepared", "=", "self", ".", "_prepare_method", "(", "lambda", "df", ":", "pandas", ".", "DataFrame", "(", "df", ".", "apply", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", ")", "new_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func_prepared", ")", "# When the function is list-like, the function names become the index/columns", "new_index", "=", "(", "[", "f", "if", "isinstance", "(", "f", ",", "string_types", ")", "else", "f", ".", "__name__", "for", "f", "in", "func", "]", "if", "axis", "==", "0", "else", "self", ".", "index", ")", "new_columns", "=", "(", "[", "f", "if", "isinstance", "(", "f", ",", "string_types", ")", "else", "f", ".", "__name__", "for", "f", "in", "func", "]", "if", "axis", "==", "1", "else", "self", ".", "columns", ")", "return", "self", ".", "__constructor__", "(", "new_data", ",", "new_index", ",", "new_columns", ")" ]
Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
[ "Apply", "list", "-", "like", "function", "across", "given", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2200-L2225
27,452
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._callable_func
def _callable_func(self, func, axis, *args, **kwargs): """Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ def callable_apply_builder(df, axis=0): if not axis: df.index = index df.columns = pandas.RangeIndex(len(df.columns)) else: df.columns = index df.index = pandas.RangeIndex(len(df.index)) result = df.apply(func, axis=axis, *args, **kwargs) return result index = self.index if not axis else self.columns func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis) result_data = self._map_across_full_axis(axis, func_prepared) return self._post_process_apply(result_data, axis)
python
def _callable_func(self, func, axis, *args, **kwargs): """Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ def callable_apply_builder(df, axis=0): if not axis: df.index = index df.columns = pandas.RangeIndex(len(df.columns)) else: df.columns = index df.index = pandas.RangeIndex(len(df.index)) result = df.apply(func, axis=axis, *args, **kwargs) return result index = self.index if not axis else self.columns func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis) result_data = self._map_across_full_axis(axis, func_prepared) return self._post_process_apply(result_data, axis)
[ "def", "_callable_func", "(", "self", ",", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "callable_apply_builder", "(", "df", ",", "axis", "=", "0", ")", ":", "if", "not", "axis", ":", "df", ".", "index", "=", "index", "df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "columns", ")", ")", "else", ":", "df", ".", "columns", "=", "index", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "index", ")", ")", "result", "=", "df", ".", "apply", "(", "func", ",", "axis", "=", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "index", "=", "self", ".", "index", "if", "not", "axis", "else", "self", ".", "columns", "func_prepared", "=", "self", ".", "_build_mapreduce_func", "(", "callable_apply_builder", ",", "axis", "=", "axis", ")", "result_data", "=", "self", ".", "_map_across_full_axis", "(", "axis", ",", "func_prepared", ")", "return", "self", ".", "_post_process_apply", "(", "result_data", ",", "axis", ")" ]
Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
[ "Apply", "callable", "functions", "across", "given", "axis", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2227-L2251
27,453
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler._manual_repartition
def _manual_repartition(self, axis, repartition_func, **kwargs): """This method applies all manual partitioning functions. Args: axis: The axis to shuffle data along. repartition_func: The function used to repartition data. Returns: A `BaseFrameManager` object. """ func = self._prepare_method(repartition_func, **kwargs) return self.data.manual_shuffle(axis, func)
python
def _manual_repartition(self, axis, repartition_func, **kwargs): """This method applies all manual partitioning functions. Args: axis: The axis to shuffle data along. repartition_func: The function used to repartition data. Returns: A `BaseFrameManager` object. """ func = self._prepare_method(repartition_func, **kwargs) return self.data.manual_shuffle(axis, func)
[ "def", "_manual_repartition", "(", "self", ",", "axis", ",", "repartition_func", ",", "*", "*", "kwargs", ")", ":", "func", "=", "self", ".", "_prepare_method", "(", "repartition_func", ",", "*", "*", "kwargs", ")", "return", "self", ".", "data", ".", "manual_shuffle", "(", "axis", ",", "func", ")" ]
This method applies all manual partitioning functions. Args: axis: The axis to shuffle data along. repartition_func: The function used to repartition data. Returns: A `BaseFrameManager` object.
[ "This", "method", "applies", "all", "manual", "partitioning", "functions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2259-L2270
27,454
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompiler.get_dummies
def get_dummies(self, columns, **kwargs): """Convert categorical variables to dummy variables for certain columns. Args: columns: The columns to convert. Returns: A new QueryCompiler. """ cls = type(self) # `columns` as None does not mean all columns, by default it means only # non-numeric columns. if columns is None: columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])] # If we aren't computing any dummies, there is no need for any # remote compute. if len(columns) == 0: return self.copy() elif not is_list_like(columns): columns = [columns] # We have to do one of two things in order to ensure the final columns # are correct. Our first option is to map over the data and assign the # columns in a separate pass. That is what we have chosen to do here. # This is not as efficient, but it requires less information from the # lower layers and does not break any of our internal requirements. The # second option is that we assign the columns as a part of the # `get_dummies` call. This requires knowledge of the length of each # partition, and breaks some of our assumptions and separation of # concerns. def set_columns(df, columns): df.columns = columns return df set_cols = self.columns columns_applied = self._map_across_full_axis( 1, lambda df: set_columns(df, set_cols) ) # In some cases, we are mapping across all of the data. It is more # efficient if we are mapping over all of the data to do it this way # than it would be to reuse the code for specific columns. if len(columns) == len(self.columns): def get_dummies_builder(df): if df is not None: if not df.empty: return pandas.get_dummies(df, **kwargs) else: return pandas.DataFrame([]) func = self._prepare_method(lambda df: get_dummies_builder(df)) new_data = columns_applied.map_across_full_axis(0, func) untouched_data = None else: def get_dummies_builder(df, internal_indices=[]): return pandas.get_dummies( df.iloc[:, internal_indices], columns=None, **kwargs ) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = columns_applied.apply_func_to_select_indices_along_full_axis( 0, get_dummies_builder, numeric_indices, keep_remaining=False ) untouched_data = self.drop(columns=columns) # Since we set the columns in the beginning, we can just extract them # here. There is fortunately no required extra steps for a correct # column index. final_columns = self.compute_index(1, new_data, False) # If we mapped over all the data we are done. If not, we need to # prepend the `new_data` with the raw data from the columns that were # not selected. if len(columns) != len(self.columns): new_data = untouched_data.data.concat(1, new_data) final_columns = untouched_data.columns.append(pandas.Index(final_columns)) return cls(new_data, self.index, final_columns)
python
def get_dummies(self, columns, **kwargs): """Convert categorical variables to dummy variables for certain columns. Args: columns: The columns to convert. Returns: A new QueryCompiler. """ cls = type(self) # `columns` as None does not mean all columns, by default it means only # non-numeric columns. if columns is None: columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])] # If we aren't computing any dummies, there is no need for any # remote compute. if len(columns) == 0: return self.copy() elif not is_list_like(columns): columns = [columns] # We have to do one of two things in order to ensure the final columns # are correct. Our first option is to map over the data and assign the # columns in a separate pass. That is what we have chosen to do here. # This is not as efficient, but it requires less information from the # lower layers and does not break any of our internal requirements. The # second option is that we assign the columns as a part of the # `get_dummies` call. This requires knowledge of the length of each # partition, and breaks some of our assumptions and separation of # concerns. def set_columns(df, columns): df.columns = columns return df set_cols = self.columns columns_applied = self._map_across_full_axis( 1, lambda df: set_columns(df, set_cols) ) # In some cases, we are mapping across all of the data. It is more # efficient if we are mapping over all of the data to do it this way # than it would be to reuse the code for specific columns. if len(columns) == len(self.columns): def get_dummies_builder(df): if df is not None: if not df.empty: return pandas.get_dummies(df, **kwargs) else: return pandas.DataFrame([]) func = self._prepare_method(lambda df: get_dummies_builder(df)) new_data = columns_applied.map_across_full_axis(0, func) untouched_data = None else: def get_dummies_builder(df, internal_indices=[]): return pandas.get_dummies( df.iloc[:, internal_indices], columns=None, **kwargs ) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = columns_applied.apply_func_to_select_indices_along_full_axis( 0, get_dummies_builder, numeric_indices, keep_remaining=False ) untouched_data = self.drop(columns=columns) # Since we set the columns in the beginning, we can just extract them # here. There is fortunately no required extra steps for a correct # column index. final_columns = self.compute_index(1, new_data, False) # If we mapped over all the data we are done. If not, we need to # prepend the `new_data` with the raw data from the columns that were # not selected. if len(columns) != len(self.columns): new_data = untouched_data.data.concat(1, new_data) final_columns = untouched_data.columns.append(pandas.Index(final_columns)) return cls(new_data, self.index, final_columns)
[ "def", "get_dummies", "(", "self", ",", "columns", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "type", "(", "self", ")", "# `columns` as None does not mean all columns, by default it means only", "# non-numeric columns.", "if", "columns", "is", "None", ":", "columns", "=", "[", "c", "for", "c", "in", "self", ".", "columns", "if", "not", "is_numeric_dtype", "(", "self", ".", "dtypes", "[", "c", "]", ")", "]", "# If we aren't computing any dummies, there is no need for any", "# remote compute.", "if", "len", "(", "columns", ")", "==", "0", ":", "return", "self", ".", "copy", "(", ")", "elif", "not", "is_list_like", "(", "columns", ")", ":", "columns", "=", "[", "columns", "]", "# We have to do one of two things in order to ensure the final columns", "# are correct. Our first option is to map over the data and assign the", "# columns in a separate pass. That is what we have chosen to do here.", "# This is not as efficient, but it requires less information from the", "# lower layers and does not break any of our internal requirements. The", "# second option is that we assign the columns as a part of the", "# `get_dummies` call. This requires knowledge of the length of each", "# partition, and breaks some of our assumptions and separation of", "# concerns.", "def", "set_columns", "(", "df", ",", "columns", ")", ":", "df", ".", "columns", "=", "columns", "return", "df", "set_cols", "=", "self", ".", "columns", "columns_applied", "=", "self", ".", "_map_across_full_axis", "(", "1", ",", "lambda", "df", ":", "set_columns", "(", "df", ",", "set_cols", ")", ")", "# In some cases, we are mapping across all of the data. It is more", "# efficient if we are mapping over all of the data to do it this way", "# than it would be to reuse the code for specific columns.", "if", "len", "(", "columns", ")", "==", "len", "(", "self", ".", "columns", ")", ":", "def", "get_dummies_builder", "(", "df", ")", ":", "if", "df", "is", "not", "None", ":", "if", "not", "df", ".", "empty", ":", "return", "pandas", ".", "get_dummies", "(", "df", ",", "*", "*", "kwargs", ")", "else", ":", "return", "pandas", ".", "DataFrame", "(", "[", "]", ")", "func", "=", "self", ".", "_prepare_method", "(", "lambda", "df", ":", "get_dummies_builder", "(", "df", ")", ")", "new_data", "=", "columns_applied", ".", "map_across_full_axis", "(", "0", ",", "func", ")", "untouched_data", "=", "None", "else", ":", "def", "get_dummies_builder", "(", "df", ",", "internal_indices", "=", "[", "]", ")", ":", "return", "pandas", ".", "get_dummies", "(", "df", ".", "iloc", "[", ":", ",", "internal_indices", "]", ",", "columns", "=", "None", ",", "*", "*", "kwargs", ")", "numeric_indices", "=", "list", "(", "self", ".", "columns", ".", "get_indexer_for", "(", "columns", ")", ")", "new_data", "=", "columns_applied", ".", "apply_func_to_select_indices_along_full_axis", "(", "0", ",", "get_dummies_builder", ",", "numeric_indices", ",", "keep_remaining", "=", "False", ")", "untouched_data", "=", "self", ".", "drop", "(", "columns", "=", "columns", ")", "# Since we set the columns in the beginning, we can just extract them", "# here. There is fortunately no required extra steps for a correct", "# column index.", "final_columns", "=", "self", ".", "compute_index", "(", "1", ",", "new_data", ",", "False", ")", "# If we mapped over all the data we are done. If not, we need to", "# prepend the `new_data` with the raw data from the columns that were", "# not selected.", "if", "len", "(", "columns", ")", "!=", "len", "(", "self", ".", "columns", ")", ":", "new_data", "=", "untouched_data", ".", "data", ".", "concat", "(", "1", ",", "new_data", ")", "final_columns", "=", "untouched_data", ".", "columns", ".", "append", "(", "pandas", ".", "Index", "(", "final_columns", ")", ")", "return", "cls", "(", "new_data", ",", "self", ".", "index", ",", "final_columns", ")" ]
Convert categorical variables to dummy variables for certain columns. Args: columns: The columns to convert. Returns: A new QueryCompiler.
[ "Convert", "categorical", "variables", "to", "dummy", "variables", "for", "certain", "columns", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2327-L2402
27,455
modin-project/modin
modin/backends/pandas/query_compiler.py
PandasQueryCompilerView._get_data
def _get_data(self) -> BaseFrameManager: """Perform the map step Returns: A BaseFrameManager object. """ def iloc(partition, row_internal_indices, col_internal_indices): return partition.iloc[row_internal_indices, col_internal_indices] masked_data = self.parent_data.apply_func_to_indices_both_axis( func=iloc, row_indices=self.index_map.values, col_indices=self.columns_map.values, lazy=False, keep_remaining=False, ) return masked_data
python
def _get_data(self) -> BaseFrameManager: """Perform the map step Returns: A BaseFrameManager object. """ def iloc(partition, row_internal_indices, col_internal_indices): return partition.iloc[row_internal_indices, col_internal_indices] masked_data = self.parent_data.apply_func_to_indices_both_axis( func=iloc, row_indices=self.index_map.values, col_indices=self.columns_map.values, lazy=False, keep_remaining=False, ) return masked_data
[ "def", "_get_data", "(", "self", ")", "->", "BaseFrameManager", ":", "def", "iloc", "(", "partition", ",", "row_internal_indices", ",", "col_internal_indices", ")", ":", "return", "partition", ".", "iloc", "[", "row_internal_indices", ",", "col_internal_indices", "]", "masked_data", "=", "self", ".", "parent_data", ".", "apply_func_to_indices_both_axis", "(", "func", "=", "iloc", ",", "row_indices", "=", "self", ".", "index_map", ".", "values", ",", "col_indices", "=", "self", ".", "columns_map", ".", "values", ",", "lazy", "=", "False", ",", "keep_remaining", "=", "False", ",", ")", "return", "masked_data" ]
Perform the map step Returns: A BaseFrameManager object.
[ "Perform", "the", "map", "step" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2528-L2545
27,456
modin-project/modin
modin/pandas/base.py
BasePandasDataset._validate_other
def _validate_other( self, other, axis, numeric_only=False, numeric_or_time_only=False, numeric_or_object_only=False, comparison_dtypes_only=False, ): """Helper method to check validity of other in inter-df operations""" axis = self._get_axis_number(axis) if axis is not None else 1 result = other if isinstance(other, BasePandasDataset): return other._query_compiler elif is_list_like(other): if axis == 0: if len(other) != len(self._query_compiler.index): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format(len(self._query_compiler.index), len(other)) ) else: if len(other) != len(self._query_compiler.columns): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format( len(self._query_compiler.columns), len(other) ) ) if hasattr(other, "dtype"): other_dtypes = [other.dtype] * len(other) else: other_dtypes = [type(x) for x in other] else: other_dtypes = [ type(other) for _ in range( len(self._query_compiler.index) if axis else len(self._query_compiler.columns) ) ] # Do dtype checking if numeric_only: if not all( is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation on non-numeric dtypes") elif numeric_or_object_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype)) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation non-numeric dtypes") elif comparison_dtypes_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) or is_dtype_equal(self_dtype, other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) elif numeric_or_time_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) return result
python
def _validate_other( self, other, axis, numeric_only=False, numeric_or_time_only=False, numeric_or_object_only=False, comparison_dtypes_only=False, ): """Helper method to check validity of other in inter-df operations""" axis = self._get_axis_number(axis) if axis is not None else 1 result = other if isinstance(other, BasePandasDataset): return other._query_compiler elif is_list_like(other): if axis == 0: if len(other) != len(self._query_compiler.index): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format(len(self._query_compiler.index), len(other)) ) else: if len(other) != len(self._query_compiler.columns): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format( len(self._query_compiler.columns), len(other) ) ) if hasattr(other, "dtype"): other_dtypes = [other.dtype] * len(other) else: other_dtypes = [type(x) for x in other] else: other_dtypes = [ type(other) for _ in range( len(self._query_compiler.index) if axis else len(self._query_compiler.columns) ) ] # Do dtype checking if numeric_only: if not all( is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation on non-numeric dtypes") elif numeric_or_object_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype)) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation non-numeric dtypes") elif comparison_dtypes_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) or is_dtype_equal(self_dtype, other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) elif numeric_or_time_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) return result
[ "def", "_validate_other", "(", "self", ",", "other", ",", "axis", ",", "numeric_only", "=", "False", ",", "numeric_or_time_only", "=", "False", ",", "numeric_or_object_only", "=", "False", ",", "comparison_dtypes_only", "=", "False", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "1", "result", "=", "other", "if", "isinstance", "(", "other", ",", "BasePandasDataset", ")", ":", "return", "other", ".", "_query_compiler", "elif", "is_list_like", "(", "other", ")", ":", "if", "axis", "==", "0", ":", "if", "len", "(", "other", ")", "!=", "len", "(", "self", ".", "_query_compiler", ".", "index", ")", ":", "raise", "ValueError", "(", "\"Unable to coerce to Series, length must be {0}: \"", "\"given {1}\"", ".", "format", "(", "len", "(", "self", ".", "_query_compiler", ".", "index", ")", ",", "len", "(", "other", ")", ")", ")", "else", ":", "if", "len", "(", "other", ")", "!=", "len", "(", "self", ".", "_query_compiler", ".", "columns", ")", ":", "raise", "ValueError", "(", "\"Unable to coerce to Series, length must be {0}: \"", "\"given {1}\"", ".", "format", "(", "len", "(", "self", ".", "_query_compiler", ".", "columns", ")", ",", "len", "(", "other", ")", ")", ")", "if", "hasattr", "(", "other", ",", "\"dtype\"", ")", ":", "other_dtypes", "=", "[", "other", ".", "dtype", "]", "*", "len", "(", "other", ")", "else", ":", "other_dtypes", "=", "[", "type", "(", "x", ")", "for", "x", "in", "other", "]", "else", ":", "other_dtypes", "=", "[", "type", "(", "other", ")", "for", "_", "in", "range", "(", "len", "(", "self", ".", "_query_compiler", ".", "index", ")", "if", "axis", "else", "len", "(", "self", ".", "_query_compiler", ".", "columns", ")", ")", "]", "# Do dtype checking\r", "if", "numeric_only", ":", "if", "not", "all", "(", "is_numeric_dtype", "(", "self_dtype", ")", "and", "is_numeric_dtype", "(", "other_dtype", ")", "for", "self_dtype", ",", "other_dtype", "in", "zip", "(", "self", ".", "_get_dtypes", "(", ")", ",", "other_dtypes", ")", ")", ":", "raise", "TypeError", "(", "\"Cannot do operation on non-numeric dtypes\"", ")", "elif", "numeric_or_object_only", ":", "if", "not", "all", "(", "(", "is_numeric_dtype", "(", "self_dtype", ")", "and", "is_numeric_dtype", "(", "other_dtype", ")", ")", "or", "(", "is_object_dtype", "(", "self_dtype", ")", "and", "is_object_dtype", "(", "other_dtype", ")", ")", "for", "self_dtype", ",", "other_dtype", "in", "zip", "(", "self", ".", "_get_dtypes", "(", ")", ",", "other_dtypes", ")", ")", ":", "raise", "TypeError", "(", "\"Cannot do operation non-numeric dtypes\"", ")", "elif", "comparison_dtypes_only", ":", "if", "not", "all", "(", "(", "is_numeric_dtype", "(", "self_dtype", ")", "and", "is_numeric_dtype", "(", "other_dtype", ")", ")", "or", "(", "is_datetime_or_timedelta_dtype", "(", "self_dtype", ")", "and", "is_datetime_or_timedelta_dtype", "(", "other_dtype", ")", ")", "or", "is_dtype_equal", "(", "self_dtype", ",", "other_dtype", ")", "for", "self_dtype", ",", "other_dtype", "in", "zip", "(", "self", ".", "_get_dtypes", "(", ")", ",", "other_dtypes", ")", ")", ":", "raise", "TypeError", "(", "\"Cannot do operation non-numeric objects with numeric objects\"", ")", "elif", "numeric_or_time_only", ":", "if", "not", "all", "(", "(", "is_numeric_dtype", "(", "self_dtype", ")", "and", "is_numeric_dtype", "(", "other_dtype", ")", ")", "or", "(", "is_datetime_or_timedelta_dtype", "(", "self_dtype", ")", "and", "is_datetime_or_timedelta_dtype", "(", "other_dtype", ")", ")", "for", "self_dtype", ",", "other_dtype", "in", "zip", "(", "self", ".", "_get_dtypes", "(", ")", ",", "other_dtypes", ")", ")", ":", "raise", "TypeError", "(", "\"Cannot do operation non-numeric objects with numeric objects\"", ")", "return", "result" ]
Helper method to check validity of other in inter-df operations
[ "Helper", "method", "to", "check", "validity", "of", "other", "in", "inter", "-", "df", "operations" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L84-L165
27,457
modin-project/modin
modin/pandas/base.py
BasePandasDataset._default_to_pandas
def _default_to_pandas(self, op, *args, **kwargs): """Helper method to use default pandas function""" empty_self_str = "" if not self.empty else " for empty DataFrame" ErrorMessage.default_to_pandas( "`{}.{}`{}".format( self.__name__, op if isinstance(op, str) else op.__name__, empty_self_str, ) ) if callable(op): result = op(self._to_pandas(), *args, **kwargs) elif isinstance(op, str): # The inner `getattr` is ensuring that we are treating this object (whether # it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr` # will get the operation (`op`) from the pandas version of the class and run # it on the object after we have converted it to pandas. result = getattr(getattr(pandas, self.__name__), op)( self._to_pandas(), *args, **kwargs ) # SparseDataFrames cannot be serialize by arrow and cause problems for Modin. # For now we will use pandas. if isinstance(result, type(self)) and not isinstance( result, (pandas.SparseDataFrame, pandas.SparseSeries) ): return self._create_or_update_from_compiler( result, inplace=kwargs.get("inplace", False) ) elif isinstance(result, pandas.DataFrame): from .dataframe import DataFrame return DataFrame(result) elif isinstance(result, pandas.Series): from .series import Series return Series(result) else: try: if ( isinstance(result, (list, tuple)) and len(result) == 2 and isinstance(result[0], pandas.DataFrame) ): # Some operations split the DataFrame into two (e.g. align). We need to wrap # both of the returned results if isinstance(result[1], pandas.DataFrame): second = self.__constructor__(result[1]) else: second = result[1] return self.__constructor__(result[0]), second else: return result except TypeError: return result
python
def _default_to_pandas(self, op, *args, **kwargs): """Helper method to use default pandas function""" empty_self_str = "" if not self.empty else " for empty DataFrame" ErrorMessage.default_to_pandas( "`{}.{}`{}".format( self.__name__, op if isinstance(op, str) else op.__name__, empty_self_str, ) ) if callable(op): result = op(self._to_pandas(), *args, **kwargs) elif isinstance(op, str): # The inner `getattr` is ensuring that we are treating this object (whether # it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr` # will get the operation (`op`) from the pandas version of the class and run # it on the object after we have converted it to pandas. result = getattr(getattr(pandas, self.__name__), op)( self._to_pandas(), *args, **kwargs ) # SparseDataFrames cannot be serialize by arrow and cause problems for Modin. # For now we will use pandas. if isinstance(result, type(self)) and not isinstance( result, (pandas.SparseDataFrame, pandas.SparseSeries) ): return self._create_or_update_from_compiler( result, inplace=kwargs.get("inplace", False) ) elif isinstance(result, pandas.DataFrame): from .dataframe import DataFrame return DataFrame(result) elif isinstance(result, pandas.Series): from .series import Series return Series(result) else: try: if ( isinstance(result, (list, tuple)) and len(result) == 2 and isinstance(result[0], pandas.DataFrame) ): # Some operations split the DataFrame into two (e.g. align). We need to wrap # both of the returned results if isinstance(result[1], pandas.DataFrame): second = self.__constructor__(result[1]) else: second = result[1] return self.__constructor__(result[0]), second else: return result except TypeError: return result
[ "def", "_default_to_pandas", "(", "self", ",", "op", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "empty_self_str", "=", "\"\"", "if", "not", "self", ".", "empty", "else", "\" for empty DataFrame\"", "ErrorMessage", ".", "default_to_pandas", "(", "\"`{}.{}`{}\"", ".", "format", "(", "self", ".", "__name__", ",", "op", "if", "isinstance", "(", "op", ",", "str", ")", "else", "op", ".", "__name__", ",", "empty_self_str", ",", ")", ")", "if", "callable", "(", "op", ")", ":", "result", "=", "op", "(", "self", ".", "_to_pandas", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "op", ",", "str", ")", ":", "# The inner `getattr` is ensuring that we are treating this object (whether\r", "# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`\r", "# will get the operation (`op`) from the pandas version of the class and run\r", "# it on the object after we have converted it to pandas.\r", "result", "=", "getattr", "(", "getattr", "(", "pandas", ",", "self", ".", "__name__", ")", ",", "op", ")", "(", "self", ".", "_to_pandas", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# SparseDataFrames cannot be serialize by arrow and cause problems for Modin.\r", "# For now we will use pandas.\r", "if", "isinstance", "(", "result", ",", "type", "(", "self", ")", ")", "and", "not", "isinstance", "(", "result", ",", "(", "pandas", ".", "SparseDataFrame", ",", "pandas", ".", "SparseSeries", ")", ")", ":", "return", "self", ".", "_create_or_update_from_compiler", "(", "result", ",", "inplace", "=", "kwargs", ".", "get", "(", "\"inplace\"", ",", "False", ")", ")", "elif", "isinstance", "(", "result", ",", "pandas", ".", "DataFrame", ")", ":", "from", ".", "dataframe", "import", "DataFrame", "return", "DataFrame", "(", "result", ")", "elif", "isinstance", "(", "result", ",", "pandas", ".", "Series", ")", ":", "from", ".", "series", "import", "Series", "return", "Series", "(", "result", ")", "else", ":", "try", ":", "if", "(", "isinstance", "(", "result", ",", "(", "list", ",", "tuple", ")", ")", "and", "len", "(", "result", ")", "==", "2", "and", "isinstance", "(", "result", "[", "0", "]", ",", "pandas", ".", "DataFrame", ")", ")", ":", "# Some operations split the DataFrame into two (e.g. align). We need to wrap\r", "# both of the returned results\r", "if", "isinstance", "(", "result", "[", "1", "]", ",", "pandas", ".", "DataFrame", ")", ":", "second", "=", "self", ".", "__constructor__", "(", "result", "[", "1", "]", ")", "else", ":", "second", "=", "result", "[", "1", "]", "return", "self", ".", "__constructor__", "(", "result", "[", "0", "]", ")", ",", "second", "else", ":", "return", "result", "except", "TypeError", ":", "return", "result" ]
Helper method to use default pandas function
[ "Helper", "method", "to", "use", "default", "pandas", "function" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L181-L234
27,458
modin-project/modin
modin/pandas/base.py
BasePandasDataset.bool
def bool(self): """Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """ shape = self.shape if shape != (1,) and shape != (1, 1): raise ValueError( """The PandasObject does not have exactly 1 element. Return the bool of a single element PandasObject. The truth value is ambiguous. Use a.empty, a.item(), a.any() or a.all().""" ) else: return self._to_pandas().bool()
python
def bool(self): """Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """ shape = self.shape if shape != (1,) and shape != (1, 1): raise ValueError( """The PandasObject does not have exactly 1 element. Return the bool of a single element PandasObject. The truth value is ambiguous. Use a.empty, a.item(), a.any() or a.all().""" ) else: return self._to_pandas().bool()
[ "def", "bool", "(", "self", ")", ":", "shape", "=", "self", ".", "shape", "if", "shape", "!=", "(", "1", ",", ")", "and", "shape", "!=", "(", "1", ",", "1", ")", ":", "raise", "ValueError", "(", "\"\"\"The PandasObject does not have exactly\r\n 1 element. Return the bool of a single\r\n element PandasObject. The truth value is\r\n ambiguous. Use a.empty, a.item(), a.any()\r\n or a.all().\"\"\"", ")", "else", ":", "return", "self", ".", "_to_pandas", "(", ")", ".", "bool", "(", ")" ]
Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean
[ "Return", "the", "bool", "of", "a", "single", "element", "PandasObject", ".", "This", "must", "be", "a", "boolean", "scalar", "value", "either", "True", "or", "False", ".", "Raise", "a", "ValueError", "if", "the", "PandasObject", "does", "not", "have", "exactly", "1", "element", "or", "that", "element", "is", "not", "boolean" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L583-L600
27,459
modin-project/modin
modin/engines/python/pandas_on_python/frame/partition.py
PandasOnPythonFramePartition.get
def get(self): """Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`. """ if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
python
def get(self): """Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`. """ if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "call_queue", ":", "return", "self", ".", "apply", "(", "lambda", "df", ":", "df", ")", ".", "data", "else", ":", "return", "self", ".", "data", ".", "copy", "(", ")" ]
Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`.
[ "Flushes", "the", "call_queue", "and", "returns", "the", "data", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/python/pandas_on_python/frame/partition.py#L23-L34
27,460
modin-project/modin
modin/engines/dask/pandas_on_dask_delayed/frame/partition.py
DaskFramePartition.add_to_apply_calls
def add_to_apply_calls(self, func, **kwargs): """Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return """ import dask self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs) return self
python
def add_to_apply_calls(self, func, **kwargs): """Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return """ import dask self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs) return self
[ "def", "add_to_apply_calls", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "import", "dask", "self", ".", "delayed_call", "=", "dask", ".", "delayed", "(", "func", ")", "(", "self", ".", "delayed_call", ",", "*", "*", "kwargs", ")", "return", "self" ]
Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return
[ "Add", "the", "function", "to", "the", "apply", "function", "call", "stack", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/dask/pandas_on_dask_delayed/frame/partition.py#L50-L59
27,461
modin-project/modin
modin/data_management/utils.py
_get_nan_block_id
def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): """A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block. """ global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) # TODO Not use pandas.DataFrame here, but something more general. _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
python
def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): """A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block. """ global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) # TODO Not use pandas.DataFrame here, but something more general. _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
[ "def", "_get_nan_block_id", "(", "partition_class", ",", "n_row", "=", "1", ",", "n_col", "=", "1", ",", "transpose", "=", "False", ")", ":", "global", "_NAN_BLOCKS", "if", "transpose", ":", "n_row", ",", "n_col", "=", "n_col", ",", "n_row", "shape", "=", "(", "n_row", ",", "n_col", ")", "if", "shape", "not", "in", "_NAN_BLOCKS", ":", "arr", "=", "np", ".", "tile", "(", "np", ".", "array", "(", "np", ".", "NaN", ")", ",", "shape", ")", "# TODO Not use pandas.DataFrame here, but something more general.", "_NAN_BLOCKS", "[", "shape", "]", "=", "partition_class", ".", "put", "(", "pandas", ".", "DataFrame", "(", "data", "=", "arr", ")", ")", "return", "_NAN_BLOCKS", "[", "shape", "]" ]
A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block.
[ "A", "memory", "efficient", "way", "to", "get", "a", "block", "of", "NaNs", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/data_management/utils.py#L55-L75
27,462
modin-project/modin
modin/data_management/utils.py
split_result_of_axis_func_pandas
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """ if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
python
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """ if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
[ "def", "split_result_of_axis_func_pandas", "(", "axis", ",", "num_splits", ",", "result", ",", "length_list", "=", "None", ")", ":", "if", "num_splits", "==", "1", ":", "return", "result", "if", "length_list", "is", "not", "None", ":", "length_list", ".", "insert", "(", "0", ",", "0", ")", "sums", "=", "np", ".", "cumsum", "(", "length_list", ")", "if", "axis", "==", "0", ":", "return", "[", "result", ".", "iloc", "[", "sums", "[", "i", "]", ":", "sums", "[", "i", "+", "1", "]", "]", "for", "i", "in", "range", "(", "len", "(", "sums", ")", "-", "1", ")", "]", "else", ":", "return", "[", "result", ".", "iloc", "[", ":", ",", "sums", "[", "i", "]", ":", "sums", "[", "i", "+", "1", "]", "]", "for", "i", "in", "range", "(", "len", "(", "sums", ")", "-", "1", ")", "]", "# We do this to restore block partitioning", "chunksize", "=", "compute_chunksize", "(", "result", ",", "num_splits", ",", "axis", "=", "axis", ")", "if", "axis", "==", "0", ":", "return", "[", "result", ".", "iloc", "[", "chunksize", "*", "i", ":", "chunksize", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "num_splits", ")", "]", "else", ":", "return", "[", "result", ".", "iloc", "[", ":", ",", "chunksize", "*", "i", ":", "chunksize", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "num_splits", ")", "]" ]
Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames.
[ "Split", "the", "Pandas", "result", "evenly", "based", "on", "the", "provided", "number", "of", "splits", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/data_management/utils.py#L78-L111
27,463
modin-project/modin
modin/pandas/indexing.py
_parse_tuple
def _parse_tuple(tup): """Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D """ row_loc, col_loc = slice(None), slice(None) if is_tuple(tup): row_loc = tup[0] if len(tup) == 2: col_loc = tup[1] if len(tup) > 2: raise IndexingError("Too many indexers") else: row_loc = tup ndim = _compute_ndim(row_loc, col_loc) row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) row_loc = [row_loc] if row_scaler else row_loc col_loc = [col_loc] if col_scaler else col_loc return row_loc, col_loc, ndim, row_scaler, col_scaler
python
def _parse_tuple(tup): """Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D """ row_loc, col_loc = slice(None), slice(None) if is_tuple(tup): row_loc = tup[0] if len(tup) == 2: col_loc = tup[1] if len(tup) > 2: raise IndexingError("Too many indexers") else: row_loc = tup ndim = _compute_ndim(row_loc, col_loc) row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) row_loc = [row_loc] if row_scaler else row_loc col_loc = [col_loc] if col_scaler else col_loc return row_loc, col_loc, ndim, row_scaler, col_scaler
[ "def", "_parse_tuple", "(", "tup", ")", ":", "row_loc", ",", "col_loc", "=", "slice", "(", "None", ")", ",", "slice", "(", "None", ")", "if", "is_tuple", "(", "tup", ")", ":", "row_loc", "=", "tup", "[", "0", "]", "if", "len", "(", "tup", ")", "==", "2", ":", "col_loc", "=", "tup", "[", "1", "]", "if", "len", "(", "tup", ")", ">", "2", ":", "raise", "IndexingError", "(", "\"Too many indexers\"", ")", "else", ":", "row_loc", "=", "tup", "ndim", "=", "_compute_ndim", "(", "row_loc", ",", "col_loc", ")", "row_scaler", "=", "is_scalar", "(", "row_loc", ")", "col_scaler", "=", "is_scalar", "(", "col_loc", ")", "row_loc", "=", "[", "row_loc", "]", "if", "row_scaler", "else", "row_loc", "col_loc", "=", "[", "col_loc", "]", "if", "col_scaler", "else", "col_loc", "return", "row_loc", ",", "col_loc", ",", "ndim", ",", "row_scaler", ",", "col_scaler" ]
Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D
[ "Unpack", "the", "user", "input", "for", "getitem", "and", "setitem", "and", "compute", "ndim" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L77-L101
27,464
modin-project/modin
modin/pandas/indexing.py
_is_enlargement
def _is_enlargement(locator, global_index): """Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs ! """ if ( is_list_like(locator) and not is_slice(locator) and len(locator) > 0 and not is_boolean_array(locator) and (isinstance(locator, type(global_index[0])) and locator not in global_index) ): n_diff_elems = len(pandas.Index(locator).difference(global_index)) is_enlargement_boolean = n_diff_elems > 0 return is_enlargement_boolean return False
python
def _is_enlargement(locator, global_index): """Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs ! """ if ( is_list_like(locator) and not is_slice(locator) and len(locator) > 0 and not is_boolean_array(locator) and (isinstance(locator, type(global_index[0])) and locator not in global_index) ): n_diff_elems = len(pandas.Index(locator).difference(global_index)) is_enlargement_boolean = n_diff_elems > 0 return is_enlargement_boolean return False
[ "def", "_is_enlargement", "(", "locator", ",", "global_index", ")", ":", "if", "(", "is_list_like", "(", "locator", ")", "and", "not", "is_slice", "(", "locator", ")", "and", "len", "(", "locator", ")", ">", "0", "and", "not", "is_boolean_array", "(", "locator", ")", "and", "(", "isinstance", "(", "locator", ",", "type", "(", "global_index", "[", "0", "]", ")", ")", "and", "locator", "not", "in", "global_index", ")", ")", ":", "n_diff_elems", "=", "len", "(", "pandas", ".", "Index", "(", "locator", ")", ".", "difference", "(", "global_index", ")", ")", "is_enlargement_boolean", "=", "n_diff_elems", ">", "0", "return", "is_enlargement_boolean", "return", "False" ]
Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs !
[ "Determine", "if", "a", "locator", "will", "enlarge", "the", "global", "index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L104-L120
27,465
modin-project/modin
modin/pandas/indexing.py
_compute_ndim
def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """ row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
python
def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """ row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
[ "def", "_compute_ndim", "(", "row_loc", ",", "col_loc", ")", ":", "row_scaler", "=", "is_scalar", "(", "row_loc", ")", "col_scaler", "=", "is_scalar", "(", "col_loc", ")", "if", "row_scaler", "and", "col_scaler", ":", "ndim", "=", "0", "elif", "row_scaler", "^", "col_scaler", ":", "ndim", "=", "1", "else", ":", "ndim", "=", "2", "return", "ndim" ]
Compute the ndim of result from locators
[ "Compute", "the", "ndim", "of", "result", "from", "locators" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L127-L140
27,466
modin-project/modin
modin/pandas/indexing.py
_LocationIndexerBase._broadcast_item
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): """Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue. """ # It is valid to pass a DataFrame or Series to __setitem__ that is larger than # the target the user is trying to overwrite. This if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
python
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): """Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue. """ # It is valid to pass a DataFrame or Series to __setitem__ that is larger than # the target the user is trying to overwrite. This if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
[ "def", "_broadcast_item", "(", "self", ",", "row_lookup", ",", "col_lookup", ",", "item", ",", "to_shape", ")", ":", "# It is valid to pass a DataFrame or Series to __setitem__ that is larger than", "# the target the user is trying to overwrite. This", "if", "isinstance", "(", "item", ",", "(", "pandas", ".", "Series", ",", "pandas", ".", "DataFrame", ",", "DataFrame", ")", ")", ":", "if", "not", "all", "(", "idx", "in", "item", ".", "index", "for", "idx", "in", "row_lookup", ")", ":", "raise", "ValueError", "(", "\"Must have equal len keys and value when setting with \"", "\"an iterable\"", ")", "if", "hasattr", "(", "item", ",", "\"columns\"", ")", ":", "if", "not", "all", "(", "idx", "in", "item", ".", "columns", "for", "idx", "in", "col_lookup", ")", ":", "raise", "ValueError", "(", "\"Must have equal len keys and value when setting \"", "\"with an iterable\"", ")", "item", "=", "item", ".", "reindex", "(", "index", "=", "row_lookup", ",", "columns", "=", "col_lookup", ")", "else", ":", "item", "=", "item", ".", "reindex", "(", "index", "=", "row_lookup", ")", "try", ":", "item", "=", "np", ".", "array", "(", "item", ")", "if", "np", ".", "prod", "(", "to_shape", ")", "==", "np", ".", "prod", "(", "item", ".", "shape", ")", ":", "return", "item", ".", "reshape", "(", "to_shape", ")", "else", ":", "return", "np", ".", "broadcast_to", "(", "item", ",", "to_shape", ")", "except", "ValueError", ":", "from_shape", "=", "np", ".", "array", "(", "item", ")", ".", "shape", "raise", "ValueError", "(", "\"could not broadcast input array from shape {from_shape} into shape \"", "\"{to_shape}\"", ".", "format", "(", "from_shape", "=", "from_shape", ",", "to_shape", "=", "to_shape", ")", ")" ]
Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue.
[ "Use", "numpy", "to", "broadcast", "or", "reshape", "item", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L187-L221
27,467
modin-project/modin
modin/pandas/indexing.py
_LocationIndexerBase._write_items
def _write_items(self, row_lookup, col_lookup, item): """Perform remote write and replace blocks. """ self.qc.write_items(row_lookup, col_lookup, item)
python
def _write_items(self, row_lookup, col_lookup, item): """Perform remote write and replace blocks. """ self.qc.write_items(row_lookup, col_lookup, item)
[ "def", "_write_items", "(", "self", ",", "row_lookup", ",", "col_lookup", ",", "item", ")", ":", "self", ".", "qc", ".", "write_items", "(", "row_lookup", ",", "col_lookup", ",", "item", ")" ]
Perform remote write and replace blocks.
[ "Perform", "remote", "write", "and", "replace", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L223-L226
27,468
modin-project/modin
modin/pandas/indexing.py
_LocIndexer._compute_enlarge_labels
def _compute_enlarge_labels(self, locator, base_index): """Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added """ # base_index_type can be pd.Index or pd.DatetimeIndex # depending on user input and pandas behavior # See issue #2264 base_index_type = type(base_index) locator_as_index = base_index_type(locator) nan_labels = locator_as_index.difference(base_index) common_labels = locator_as_index.intersection(base_index) if len(common_labels) == 0: raise KeyError( "None of [{labels}] are in the [{base_index_name}]".format( labels=list(locator_as_index), base_index_name=base_index ) ) return nan_labels
python
def _compute_enlarge_labels(self, locator, base_index): """Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added """ # base_index_type can be pd.Index or pd.DatetimeIndex # depending on user input and pandas behavior # See issue #2264 base_index_type = type(base_index) locator_as_index = base_index_type(locator) nan_labels = locator_as_index.difference(base_index) common_labels = locator_as_index.intersection(base_index) if len(common_labels) == 0: raise KeyError( "None of [{labels}] are in the [{base_index_name}]".format( labels=list(locator_as_index), base_index_name=base_index ) ) return nan_labels
[ "def", "_compute_enlarge_labels", "(", "self", ",", "locator", ",", "base_index", ")", ":", "# base_index_type can be pd.Index or pd.DatetimeIndex", "# depending on user input and pandas behavior", "# See issue #2264", "base_index_type", "=", "type", "(", "base_index", ")", "locator_as_index", "=", "base_index_type", "(", "locator", ")", "nan_labels", "=", "locator_as_index", ".", "difference", "(", "base_index", ")", "common_labels", "=", "locator_as_index", ".", "intersection", "(", "base_index", ")", "if", "len", "(", "common_labels", ")", "==", "0", ":", "raise", "KeyError", "(", "\"None of [{labels}] are in the [{base_index_name}]\"", ".", "format", "(", "labels", "=", "list", "(", "locator_as_index", ")", ",", "base_index_name", "=", "base_index", ")", ")", "return", "nan_labels" ]
Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added
[ "Helper", "for", "_enlarge_axis", "compute", "common", "labels", "and", "extra", "labels", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L294-L315
27,469
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_split_result_for_readers
def _split_result_for_readers(axis, num_splits, df): # pragma: no cover """Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames. """ splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
python
def _split_result_for_readers(axis, num_splits, df): # pragma: no cover """Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames. """ splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
[ "def", "_split_result_for_readers", "(", "axis", ",", "num_splits", ",", "df", ")", ":", "# pragma: no cover", "splits", "=", "split_result_of_axis_func_pandas", "(", "axis", ",", "num_splits", ",", "df", ")", "if", "not", "isinstance", "(", "splits", ",", "list", ")", ":", "splits", "=", "[", "splits", "]", "return", "splits" ]
Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames.
[ "Splits", "the", "DataFrame", "read", "into", "smaller", "DataFrames", "and", "handles", "all", "edge", "cases", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L18-L32
27,470
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_parquet_columns
def _read_parquet_columns(path, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_parquet_columns(path, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_parquet_columns", "(", "path", ",", "columns", ",", "num_splits", ",", "kwargs", ")", ":", "# pragma: no cover", "import", "pyarrow", ".", "parquet", "as", "pq", "df", "=", "pq", ".", "read_pandas", "(", "path", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", ".", "to_pandas", "(", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "Parquet", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L36-L56
27,471
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_csv_with_offset_pandas_on_ray
def _read_csv_with_offset_pandas_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ index_col = kwargs.get("index_col", None) bio = file_open(fname, "rb") bio.seek(start) to_read = header + bio.read(end - start) bio.close() pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs) pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns)) if index_col is not None: index = pandas_df.index # Partitions must have RangeIndex pandas_df.index = pandas.RangeIndex(0, len(pandas_df)) else: # We will use the lengths to build the index if we are not given an # `index_col`. index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
python
def _read_csv_with_offset_pandas_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ index_col = kwargs.get("index_col", None) bio = file_open(fname, "rb") bio.seek(start) to_read = header + bio.read(end - start) bio.close() pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs) pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns)) if index_col is not None: index = pandas_df.index # Partitions must have RangeIndex pandas_df.index = pandas.RangeIndex(0, len(pandas_df)) else: # We will use the lengths to build the index if we are not given an # `index_col`. index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
[ "def", "_read_csv_with_offset_pandas_on_ray", "(", "fname", ",", "num_splits", ",", "start", ",", "end", ",", "kwargs", ",", "header", ")", ":", "# pragma: no cover", "index_col", "=", "kwargs", ".", "get", "(", "\"index_col\"", ",", "None", ")", "bio", "=", "file_open", "(", "fname", ",", "\"rb\"", ")", "bio", ".", "seek", "(", "start", ")", "to_read", "=", "header", "+", "bio", ".", "read", "(", "end", "-", "start", ")", "bio", ".", "close", "(", ")", "pandas_df", "=", "pandas", ".", "read_csv", "(", "BytesIO", "(", "to_read", ")", ",", "*", "*", "kwargs", ")", "pandas_df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "pandas_df", ".", "columns", ")", ")", "if", "index_col", "is", "not", "None", ":", "index", "=", "pandas_df", ".", "index", "# Partitions must have RangeIndex", "pandas_df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "0", ",", "len", "(", "pandas_df", ")", ")", "else", ":", "# We will use the lengths to build the index if we are not given an", "# `index_col`.", "index", "=", "len", "(", "pandas_df", ")", "return", "_split_result_for_readers", "(", "1", ",", "num_splits", ",", "pandas_df", ")", "+", "[", "index", "]" ]
Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "a", "chunk", "of", "a", "CSV", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L60-L96
27,472
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_hdf_columns
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_hdf_columns", "(", "path_or_buf", ",", "columns", ",", "num_splits", ",", "kwargs", ")", ":", "# pragma: no cover", "df", "=", "pandas", ".", "read_hdf", "(", "path_or_buf", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "HDF5", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L100-L119
27,473
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_feather_columns
def _read_feather_columns(path, columns, num_splits): # pragma: no cover """Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ from pyarrow import feather df = feather.read_feather(path, columns=columns) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_feather_columns(path, columns, num_splits): # pragma: no cover """Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ from pyarrow import feather df = feather.read_feather(path, columns=columns) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_feather_columns", "(", "path", ",", "columns", ",", "num_splits", ")", ":", "# pragma: no cover", "from", "pyarrow", "import", "feather", "df", "=", "feather", ".", "read_feather", "(", "path", ",", "columns", "=", "columns", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "Feather", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L123-L143
27,474
modin-project/modin
modin/engines/ray/generic/io.py
get_index
def get_index(index_name, *partition_indices): # pragma: no cover """Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)""" index = partition_indices[0].append(partition_indices[1:]) index.names = index_name return index
python
def get_index(index_name, *partition_indices): # pragma: no cover """Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)""" index = partition_indices[0].append(partition_indices[1:]) index.names = index_name return index
[ "def", "get_index", "(", "index_name", ",", "*", "partition_indices", ")", ":", "# pragma: no cover", "index", "=", "partition_indices", "[", "0", "]", ".", "append", "(", "partition_indices", "[", "1", ":", "]", ")", "index", ".", "names", "=", "index_name", "return", "index" ]
Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)
[ "Get", "the", "index", "from", "the", "indices", "returned", "by", "the", "workers", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L64-L70
27,475
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_hdf
def read_hdf(cls, path_or_buf, **kwargs): """Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file. """ if cls.read_hdf_remote_task is None: return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if format is None: ErrorMessage.default_to_pandas( "File format seems to be `fixed`. For better distribution consider saving the file in `table` format. " "df.to_hdf(format=`table`)." ) return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get("columns", None) if not columns: empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_hdf_remote_task._remote( args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
python
def read_hdf(cls, path_or_buf, **kwargs): """Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file. """ if cls.read_hdf_remote_task is None: return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if format is None: ErrorMessage.default_to_pandas( "File format seems to be `fixed`. For better distribution consider saving the file in `table` format. " "df.to_hdf(format=`table`)." ) return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get("columns", None) if not columns: empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_hdf_remote_task._remote( args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_hdf", "(", "cls", ",", "path_or_buf", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "read_hdf_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_hdf", "(", "path_or_buf", ",", "*", "*", "kwargs", ")", "format", "=", "cls", ".", "_validate_hdf_format", "(", "path_or_buf", "=", "path_or_buf", ")", "if", "format", "is", "None", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. \"", "\"df.to_hdf(format=`table`).\"", ")", "return", "cls", ".", "from_pandas", "(", "pandas", ".", "read_hdf", "(", "path_or_buf", "=", "path_or_buf", ",", "*", "*", "kwargs", ")", ")", "columns", "=", "kwargs", ".", "get", "(", "\"columns\"", ",", "None", ")", "if", "not", "columns", ":", "empty_pd_df", "=", "pandas", ".", "read_hdf", "(", "path_or_buf", ",", "start", "=", "0", ",", "stop", "=", "0", ")", "columns", "=", "empty_pd_df", ".", "columns", "num_partitions", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "num_splits", "=", "min", "(", "len", "(", "columns", ")", ",", "num_partitions", ")", "# Each item in this list will be a list of column names of the original df", "column_splits", "=", "(", "len", "(", "columns", ")", "//", "num_partitions", "if", "len", "(", "columns", ")", "%", "num_partitions", "==", "0", "else", "len", "(", "columns", ")", "//", "num_partitions", "+", "1", ")", "col_partitions", "=", "[", "columns", "[", "i", ":", "i", "+", "column_splits", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "columns", ")", ",", "column_splits", ")", "]", "blk_partitions", "=", "np", ".", "array", "(", "[", "cls", ".", "read_hdf_remote_task", ".", "_remote", "(", "args", "=", "(", "path_or_buf", ",", "cols", ",", "num_splits", ",", "kwargs", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ",", ")", "for", "cols", "in", "col_partitions", "]", ")", ".", "T", "remote_partitions", "=", "np", ".", "array", "(", "[", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "row", "]", "for", "row", "in", "blk_partitions", "[", ":", "-", "1", "]", "]", ")", "index_len", "=", "ray", ".", "get", "(", "blk_partitions", "[", "-", "1", "]", "[", "0", "]", ")", "index", "=", "pandas", ".", "RangeIndex", "(", "index_len", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "remote_partitions", ")", ",", "index", ",", "columns", ")", "return", "new_query_compiler" ]
Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file.
[ "Load", "a", "h5", "file", "from", "the", "file", "path", "or", "buffer", "returning", "a", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L565-L625
27,476
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_feather
def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
python
def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_feather", "(", "cls", ",", "path", ",", "columns", "=", "None", ",", "use_threads", "=", "True", ")", ":", "if", "cls", ".", "read_feather_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_feather", "(", "path", ",", "columns", "=", "columns", ",", "use_threads", "=", "use_threads", ")", "if", "columns", "is", "None", ":", "from", "pyarrow", ".", "feather", "import", "FeatherReader", "fr", "=", "FeatherReader", "(", "path", ")", "columns", "=", "[", "fr", ".", "get_column_name", "(", "i", ")", "for", "i", "in", "range", "(", "fr", ".", "num_columns", ")", "]", "num_partitions", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "num_splits", "=", "min", "(", "len", "(", "columns", ")", ",", "num_partitions", ")", "# Each item in this list will be a list of column names of the original df", "column_splits", "=", "(", "len", "(", "columns", ")", "//", "num_partitions", "if", "len", "(", "columns", ")", "%", "num_partitions", "==", "0", "else", "len", "(", "columns", ")", "//", "num_partitions", "+", "1", ")", "col_partitions", "=", "[", "columns", "[", "i", ":", "i", "+", "column_splits", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "columns", ")", ",", "column_splits", ")", "]", "blk_partitions", "=", "np", ".", "array", "(", "[", "cls", ".", "read_feather_remote_task", ".", "_remote", "(", "args", "=", "(", "path", ",", "cols", ",", "num_splits", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ")", "for", "cols", "in", "col_partitions", "]", ")", ".", "T", "remote_partitions", "=", "np", ".", "array", "(", "[", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "row", "]", "for", "row", "in", "blk_partitions", "[", ":", "-", "1", "]", "]", ")", "index_len", "=", "ray", ".", "get", "(", "blk_partitions", "[", "-", "1", "]", "[", "0", "]", ")", "index", "=", "pandas", ".", "RangeIndex", "(", "index_len", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "remote_partitions", ")", ",", "index", ",", "columns", ")", "return", "new_query_compiler" ]
Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format
[ "Read", "a", "pandas", ".", "DataFrame", "from", "Feather", "format", ".", "Ray", "DataFrame", "only", "supports", "pyarrow", "engine", "for", "now", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L628-L686
27,477
modin-project/modin
modin/pandas/datetimes.py
to_datetime
def to_datetime( arg, errors="raise", dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin="unix", cache=False, ): """Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp """ if not isinstance(arg, DataFrame): return pandas.to_datetime( arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) # Pandas seems to ignore this kwarg so we will too pandas.to_datetime( pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) return arg._query_compiler.to_datetime()
python
def to_datetime( arg, errors="raise", dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin="unix", cache=False, ): """Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp """ if not isinstance(arg, DataFrame): return pandas.to_datetime( arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) # Pandas seems to ignore this kwarg so we will too pandas.to_datetime( pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) return arg._query_compiler.to_datetime()
[ "def", "to_datetime", "(", "arg", ",", "errors", "=", "\"raise\"", ",", "dayfirst", "=", "False", ",", "yearfirst", "=", "False", ",", "utc", "=", "None", ",", "box", "=", "True", ",", "format", "=", "None", ",", "exact", "=", "True", ",", "unit", "=", "None", ",", "infer_datetime_format", "=", "False", ",", "origin", "=", "\"unix\"", ",", "cache", "=", "False", ",", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "DataFrame", ")", ":", "return", "pandas", ".", "to_datetime", "(", "arg", ",", "errors", "=", "errors", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "utc", "=", "utc", ",", "box", "=", "box", ",", "format", "=", "format", ",", "exact", "=", "exact", ",", "unit", "=", "unit", ",", "infer_datetime_format", "=", "infer_datetime_format", ",", "origin", "=", "origin", ",", "cache", "=", "cache", ",", ")", "# Pandas seems to ignore this kwarg so we will too", "pandas", ".", "to_datetime", "(", "pandas", ".", "DataFrame", "(", "columns", "=", "arg", ".", "columns", ")", ",", "errors", "=", "errors", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "utc", "=", "utc", ",", "box", "=", "box", ",", "format", "=", "format", ",", "exact", "=", "exact", ",", "unit", "=", "unit", ",", "infer_datetime_format", "=", "infer_datetime_format", ",", "origin", "=", "origin", ",", "cache", "=", "cache", ",", ")", "return", "arg", ".", "_query_compiler", ".", "to_datetime", "(", ")" ]
Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp
[ "Convert", "the", "arg", "to", "datetime", "format", ".", "If", "not", "Ray", "DataFrame", "this", "falls", "back", "on", "pandas", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/datetimes.py#L10-L77
27,478
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.copartition_datasets
def copartition_datasets(self, axis, other, left_func, right_func): """Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right. """ if left_func is None: new_self = self else: new_self = self.map_across_full_axis(axis, left_func) # This block of code will only shuffle if absolutely necessary. If we do need to # shuffle, we use the identity function and then reshuffle. if right_func is None: if axis == 0 and not np.array_equal( other.block_lengths, new_self.block_lengths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_lengths ) elif axis == 1 and not np.array_equal( other.block_widths, new_self.block_widths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_widths ) else: new_other = other # Most of the time, we will be given an operation to do. We perform that with # manual_shuffle. else: new_other = other.manual_shuffle( axis, right_func, new_self.block_lengths if axis == 0 else new_self.block_widths, ) return new_self, new_other
python
def copartition_datasets(self, axis, other, left_func, right_func): """Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right. """ if left_func is None: new_self = self else: new_self = self.map_across_full_axis(axis, left_func) # This block of code will only shuffle if absolutely necessary. If we do need to # shuffle, we use the identity function and then reshuffle. if right_func is None: if axis == 0 and not np.array_equal( other.block_lengths, new_self.block_lengths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_lengths ) elif axis == 1 and not np.array_equal( other.block_widths, new_self.block_widths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_widths ) else: new_other = other # Most of the time, we will be given an operation to do. We perform that with # manual_shuffle. else: new_other = other.manual_shuffle( axis, right_func, new_self.block_lengths if axis == 0 else new_self.block_widths, ) return new_self, new_other
[ "def", "copartition_datasets", "(", "self", ",", "axis", ",", "other", ",", "left_func", ",", "right_func", ")", ":", "if", "left_func", "is", "None", ":", "new_self", "=", "self", "else", ":", "new_self", "=", "self", ".", "map_across_full_axis", "(", "axis", ",", "left_func", ")", "# This block of code will only shuffle if absolutely necessary. If we do need to", "# shuffle, we use the identity function and then reshuffle.", "if", "right_func", "is", "None", ":", "if", "axis", "==", "0", "and", "not", "np", ".", "array_equal", "(", "other", ".", "block_lengths", ",", "new_self", ".", "block_lengths", ")", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "lambda", "x", ":", "x", ",", "new_self", ".", "block_lengths", ")", "elif", "axis", "==", "1", "and", "not", "np", ".", "array_equal", "(", "other", ".", "block_widths", ",", "new_self", ".", "block_widths", ")", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "lambda", "x", ":", "x", ",", "new_self", ".", "block_widths", ")", "else", ":", "new_other", "=", "other", "# Most of the time, we will be given an operation to do. We perform that with", "# manual_shuffle.", "else", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "right_func", ",", "new_self", ".", "block_lengths", "if", "axis", "==", "0", "else", "new_self", ".", "block_widths", ",", ")", "return", "new_self", ",", "new_other" ]
Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right.
[ "Copartition", "two", "BlockPartitions", "objects", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L231-L275
27,479
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.concat
def concat(self, axis, other_blocks): """Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this. """ if type(other_blocks) is list: other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__( np.concatenate([self.partitions] + other_blocks, axis=axis) ) else: return self.__constructor__( np.append(self.partitions, other_blocks.partitions, axis=axis) )
python
def concat(self, axis, other_blocks): """Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this. """ if type(other_blocks) is list: other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__( np.concatenate([self.partitions] + other_blocks, axis=axis) ) else: return self.__constructor__( np.append(self.partitions, other_blocks.partitions, axis=axis) )
[ "def", "concat", "(", "self", ",", "axis", ",", "other_blocks", ")", ":", "if", "type", "(", "other_blocks", ")", "is", "list", ":", "other_blocks", "=", "[", "blocks", ".", "partitions", "for", "blocks", "in", "other_blocks", "]", "return", "self", ".", "__constructor__", "(", "np", ".", "concatenate", "(", "[", "self", ".", "partitions", "]", "+", "other_blocks", ",", "axis", "=", "axis", ")", ")", "else", ":", "return", "self", ".", "__constructor__", "(", "np", ".", "append", "(", "self", ".", "partitions", ",", "other_blocks", ".", "partitions", ",", "axis", "=", "axis", ")", ")" ]
Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Concatenate", "the", "blocks", "with", "another", "set", "of", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L398-L421
27,480
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.to_pandas
def to_pandas(self, is_transposed=False): """Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame """ # In the case this is transposed, it is easier to just temporarily # transpose back then transpose after the conversion. The performance # is the same as if we individually transposed the blocks and # concatenated them, but the code is much smaller. if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [ [obj.to_pandas() for obj in part] for part in self.partitions ] if all( isinstance(part, pandas.Series) for row in retrieved_objects for part in row ): axis = 0 elif all( isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row ): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [ pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if not all(part.empty for part in row) ] if len(df_rows) == 0: return pandas.DataFrame() else: return pandas.concat(df_rows)
python
def to_pandas(self, is_transposed=False): """Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame """ # In the case this is transposed, it is easier to just temporarily # transpose back then transpose after the conversion. The performance # is the same as if we individually transposed the blocks and # concatenated them, but the code is much smaller. if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [ [obj.to_pandas() for obj in part] for part in self.partitions ] if all( isinstance(part, pandas.Series) for row in retrieved_objects for part in row ): axis = 0 elif all( isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row ): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [ pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if not all(part.empty for part in row) ] if len(df_rows) == 0: return pandas.DataFrame() else: return pandas.concat(df_rows)
[ "def", "to_pandas", "(", "self", ",", "is_transposed", "=", "False", ")", ":", "# In the case this is transposed, it is easier to just temporarily", "# transpose back then transpose after the conversion. The performance", "# is the same as if we individually transposed the blocks and", "# concatenated them, but the code is much smaller.", "if", "is_transposed", ":", "return", "self", ".", "transpose", "(", ")", ".", "to_pandas", "(", "False", ")", ".", "T", "else", ":", "retrieved_objects", "=", "[", "[", "obj", ".", "to_pandas", "(", ")", "for", "obj", "in", "part", "]", "for", "part", "in", "self", ".", "partitions", "]", "if", "all", "(", "isinstance", "(", "part", ",", "pandas", ".", "Series", ")", "for", "row", "in", "retrieved_objects", "for", "part", "in", "row", ")", ":", "axis", "=", "0", "elif", "all", "(", "isinstance", "(", "part", ",", "pandas", ".", "DataFrame", ")", "for", "row", "in", "retrieved_objects", "for", "part", "in", "row", ")", ":", "axis", "=", "1", "else", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "True", ")", "df_rows", "=", "[", "pandas", ".", "concat", "(", "[", "part", "for", "part", "in", "row", "]", ",", "axis", "=", "axis", ")", "for", "row", "in", "retrieved_objects", "if", "not", "all", "(", "part", ".", "empty", "for", "part", "in", "row", ")", "]", "if", "len", "(", "df_rows", ")", "==", "0", ":", "return", "pandas", ".", "DataFrame", "(", ")", "else", ":", "return", "pandas", ".", "concat", "(", "df_rows", ")" ]
Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame
[ "Convert", "this", "object", "into", "a", "Pandas", "DataFrame", "from", "the", "partitions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L439-L481
27,481
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.get_indices
def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """ ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
python
def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """ ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
[ "def", "get_indices", "(", "self", ",", "axis", "=", "0", ",", "index_func", "=", "None", ",", "old_blocks", "=", "None", ")", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "not", "callable", "(", "index_func", ")", ")", "func", "=", "self", ".", "preprocess_func", "(", "index_func", ")", "if", "axis", "==", "0", ":", "# We grab the first column of blocks and extract the indices", "# Note: We use _partitions_cache in the context of this function to make", "# sure that none of the partitions are modified or filtered out before we", "# get the index information.", "# DO NOT CHANGE TO self.partitions under any circumstance.", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", ".", "T", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ".", "T", ")", "else", "[", "]", ")", "# This is important because sometimes we have resized the data. The new", "# sizes will not be valid if we are trying to compute the index on a", "# new object that has a different length.", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ")", "else", "[", "]", ")", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_widths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_widths", ")", ".", "cumsum", "(", ")", "full_indices", "=", "new_indices", "[", "0", "]", "if", "len", "(", "new_indices", ")", "else", "new_indices", "if", "old_blocks", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "new_indices", ")", ")", ":", "# If the length is 0 there is nothing to append.", "if", "i", "==", "0", "or", "len", "(", "new_indices", "[", "i", "]", ")", "==", "0", ":", "continue", "# The try-except here is intended to catch issues where we are", "# trying to get a string index out of the internal index.", "try", ":", "append_val", "=", "new_indices", "[", "i", "]", "+", "cumulative_block_lengths", "[", "i", "-", "1", "]", "except", "TypeError", ":", "append_val", "=", "new_indices", "[", "i", "]", "full_indices", "=", "full_indices", ".", "append", "(", "append_val", ")", "else", ":", "full_indices", "=", "full_indices", ".", "append", "(", "new_indices", "[", "1", ":", "]", ")", "return", "full_indices" ]
This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object.
[ "This", "gets", "the", "internal", "indices", "stored", "in", "the", "partitions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L503-L566
27,482
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager._get_blocks_containing_index
def _get_blocks_containing_index(self, axis, index): """Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index). """ if not axis: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths)) cumulative_column_widths = np.array(self.block_widths).cumsum() block_idx = int(np.digitize(index, cumulative_column_widths)) if block_idx == len(cumulative_column_widths): block_idx -= 1 # Compute the internal index based on the previous lengths. This # is a global index, so we must subtract the lengths first. internal_idx = ( index if not block_idx else index - cumulative_column_widths[block_idx - 1] ) else: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_lengths)) cumulative_row_lengths = np.array(self.block_lengths).cumsum() block_idx = int(np.digitize(index, cumulative_row_lengths)) # See note above about internal index internal_idx = ( index if not block_idx else index - cumulative_row_lengths[block_idx - 1] ) return block_idx, internal_idx
python
def _get_blocks_containing_index(self, axis, index): """Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index). """ if not axis: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths)) cumulative_column_widths = np.array(self.block_widths).cumsum() block_idx = int(np.digitize(index, cumulative_column_widths)) if block_idx == len(cumulative_column_widths): block_idx -= 1 # Compute the internal index based on the previous lengths. This # is a global index, so we must subtract the lengths first. internal_idx = ( index if not block_idx else index - cumulative_column_widths[block_idx - 1] ) else: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_lengths)) cumulative_row_lengths = np.array(self.block_lengths).cumsum() block_idx = int(np.digitize(index, cumulative_row_lengths)) # See note above about internal index internal_idx = ( index if not block_idx else index - cumulative_row_lengths[block_idx - 1] ) return block_idx, internal_idx
[ "def", "_get_blocks_containing_index", "(", "self", ",", "axis", ",", "index", ")", ":", "if", "not", "axis", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "index", ">", "sum", "(", "self", ".", "block_widths", ")", ")", "cumulative_column_widths", "=", "np", ".", "array", "(", "self", ".", "block_widths", ")", ".", "cumsum", "(", ")", "block_idx", "=", "int", "(", "np", ".", "digitize", "(", "index", ",", "cumulative_column_widths", ")", ")", "if", "block_idx", "==", "len", "(", "cumulative_column_widths", ")", ":", "block_idx", "-=", "1", "# Compute the internal index based on the previous lengths. This", "# is a global index, so we must subtract the lengths first.", "internal_idx", "=", "(", "index", "if", "not", "block_idx", "else", "index", "-", "cumulative_column_widths", "[", "block_idx", "-", "1", "]", ")", "else", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "index", ">", "sum", "(", "self", ".", "block_lengths", ")", ")", "cumulative_row_lengths", "=", "np", ".", "array", "(", "self", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "block_idx", "=", "int", "(", "np", ".", "digitize", "(", "index", ",", "cumulative_row_lengths", ")", ")", "# See note above about internal index", "internal_idx", "=", "(", "index", "if", "not", "block_idx", "else", "index", "-", "cumulative_row_lengths", "[", "block_idx", "-", "1", "]", ")", "return", "block_idx", ",", "internal_idx" ]
Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index).
[ "Convert", "a", "global", "index", "to", "a", "block", "index", "and", "local", "index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L580-L618
27,483
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager._get_dict_of_block_index
def _get_dict_of_block_index(self, axis, indices, ordered=False): """Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices. """ # Get the internal index and create a dictionary so we only have to # travel to each partition once. all_partitions_and_idx = [ self._get_blocks_containing_index(axis, i) for i in indices ] # In ordered, we have to maintain the order of the list of indices provided. # This means that we need to return a list instead of a dictionary. if ordered: # In ordered, the partitions dict is a list of tuples partitions_dict = [] # This variable is used to store the most recent partition that we added to # the partitions_dict. This allows us to only visit a partition once when we # have multiple values that will be operated on in that partition. last_part = -1 for part_idx, internal_idx in all_partitions_and_idx: if part_idx == last_part: # We append to the list, which is the value part of the tuple. partitions_dict[-1][-1].append(internal_idx) else: # This is where we add new values. partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: # For unordered, we can just return a dictionary mapping partition to the # list of indices being operated on. partitions_dict = {} for part_idx, internal_idx in all_partitions_and_idx: if part_idx not in partitions_dict: partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
python
def _get_dict_of_block_index(self, axis, indices, ordered=False): """Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices. """ # Get the internal index and create a dictionary so we only have to # travel to each partition once. all_partitions_and_idx = [ self._get_blocks_containing_index(axis, i) for i in indices ] # In ordered, we have to maintain the order of the list of indices provided. # This means that we need to return a list instead of a dictionary. if ordered: # In ordered, the partitions dict is a list of tuples partitions_dict = [] # This variable is used to store the most recent partition that we added to # the partitions_dict. This allows us to only visit a partition once when we # have multiple values that will be operated on in that partition. last_part = -1 for part_idx, internal_idx in all_partitions_and_idx: if part_idx == last_part: # We append to the list, which is the value part of the tuple. partitions_dict[-1][-1].append(internal_idx) else: # This is where we add new values. partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: # For unordered, we can just return a dictionary mapping partition to the # list of indices being operated on. partitions_dict = {} for part_idx, internal_idx in all_partitions_and_idx: if part_idx not in partitions_dict: partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
[ "def", "_get_dict_of_block_index", "(", "self", ",", "axis", ",", "indices", ",", "ordered", "=", "False", ")", ":", "# Get the internal index and create a dictionary so we only have to", "# travel to each partition once.", "all_partitions_and_idx", "=", "[", "self", ".", "_get_blocks_containing_index", "(", "axis", ",", "i", ")", "for", "i", "in", "indices", "]", "# In ordered, we have to maintain the order of the list of indices provided.", "# This means that we need to return a list instead of a dictionary.", "if", "ordered", ":", "# In ordered, the partitions dict is a list of tuples", "partitions_dict", "=", "[", "]", "# This variable is used to store the most recent partition that we added to", "# the partitions_dict. This allows us to only visit a partition once when we", "# have multiple values that will be operated on in that partition.", "last_part", "=", "-", "1", "for", "part_idx", ",", "internal_idx", "in", "all_partitions_and_idx", ":", "if", "part_idx", "==", "last_part", ":", "# We append to the list, which is the value part of the tuple.", "partitions_dict", "[", "-", "1", "]", "[", "-", "1", "]", ".", "append", "(", "internal_idx", ")", "else", ":", "# This is where we add new values.", "partitions_dict", ".", "append", "(", "(", "part_idx", ",", "[", "internal_idx", "]", ")", ")", "last_part", "=", "part_idx", "else", ":", "# For unordered, we can just return a dictionary mapping partition to the", "# list of indices being operated on.", "partitions_dict", "=", "{", "}", "for", "part_idx", ",", "internal_idx", "in", "all_partitions_and_idx", ":", "if", "part_idx", "not", "in", "partitions_dict", ":", "partitions_dict", "[", "part_idx", "]", "=", "[", "internal_idx", "]", "else", ":", "partitions_dict", "[", "part_idx", "]", ".", "append", "(", "internal_idx", ")", "return", "partitions_dict" ]
Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices.
[ "Convert", "indices", "to", "a", "dict", "of", "block", "index", "to", "internal", "index", "mapping", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L620-L668
27,484
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager._apply_func_to_list_of_partitions
def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs): """Applies a function to a list of remote partitions. Note: The main use for this is to preprocess the func. Args: func: The func to apply partitions: The list of partitions Returns: A list of BaseFramePartition objects. """ preprocessed_func = self.preprocess_func(func) return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
python
def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs): """Applies a function to a list of remote partitions. Note: The main use for this is to preprocess the func. Args: func: The func to apply partitions: The list of partitions Returns: A list of BaseFramePartition objects. """ preprocessed_func = self.preprocess_func(func) return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
[ "def", "_apply_func_to_list_of_partitions", "(", "self", ",", "func", ",", "partitions", ",", "*", "*", "kwargs", ")", ":", "preprocessed_func", "=", "self", ".", "preprocess_func", "(", "func", ")", "return", "[", "obj", ".", "apply", "(", "preprocessed_func", ",", "*", "*", "kwargs", ")", "for", "obj", "in", "partitions", "]" ]
Applies a function to a list of remote partitions. Note: The main use for this is to preprocess the func. Args: func: The func to apply partitions: The list of partitions Returns: A list of BaseFramePartition objects.
[ "Applies", "a", "function", "to", "a", "list", "of", "remote", "partitions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L670-L683
27,485
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.apply_func_to_select_indices
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False): """Applies a function to select indices. Note: Your internal function must take a kwarg `internal_indices` for this to work correctly. This prevents information leakage of the internal index to the external representation. Args: axis: The axis to apply the func over. func: The function to apply to these indices. indices: The indices to apply the function to. keep_remaining: Whether or not to keep the other partitions. Some operations may want to drop the remaining partitions and keep only the results. Returns: A new BaseFrameManager object, the type of object that called this. """ if self.partitions.size == 0: return np.array([[]]) # Handling dictionaries has to be done differently, but we still want # to figure out the partitions that need to be applied to, so we will # store the dictionary in a separate variable and assign `indices` to # the keys to handle it the same as we normally would. if isinstance(indices, dict): dict_indices = indices indices = list(indices.keys()) else: dict_indices = None if not isinstance(indices, list): indices = [indices] partitions_dict = self._get_dict_of_block_index( axis, indices, ordered=not keep_remaining ) if not axis: partitions_for_apply = self.partitions.T else: partitions_for_apply = self.partitions # We may have a command to perform different functions on different # columns at the same time. We attempt to handle this as efficiently as # possible here. Functions that use this in the dictionary format must # accept a keyword argument `func_dict`. if dict_indices is not None: def local_to_global_idx(partition_id, local_idx): if partition_id == 0: return local_idx if axis == 0: cumulative_axis = np.cumsum(self.block_widths) else: cumulative_axis = np.cumsum(self.block_lengths) return cumulative_axis[partition_id - 1] + local_idx if not keep_remaining: result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[o_idx], func_dict={ i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)] for i_idx in list_to_apply if i_idx >= 0 }, ) for o_idx, list_to_apply in partitions_dict ] ) else: result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], func_dict={ idx: dict_indices[local_to_global_idx(i, idx)] for idx in partitions_dict[i] if idx >= 0 }, ) for i in range(len(partitions_for_apply)) ] ) else: if not keep_remaining: # We are passing internal indices in here. In order for func to # actually be able to use this information, it must be able to take in # the internal indices. This might mean an iloc in the case of Pandas # or some other way to index into the internal representation. result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[idx], internal_indices=list_to_apply, ) for idx, list_to_apply in partitions_dict ] ) else: # The difference here is that we modify a subset and return the # remaining (non-updated) blocks in their original position. result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], internal_indices=partitions_dict[i], ) for i in range(len(partitions_for_apply)) ] ) return ( self.__constructor__(result.T) if not axis else self.__constructor__(result) )
python
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False): """Applies a function to select indices. Note: Your internal function must take a kwarg `internal_indices` for this to work correctly. This prevents information leakage of the internal index to the external representation. Args: axis: The axis to apply the func over. func: The function to apply to these indices. indices: The indices to apply the function to. keep_remaining: Whether or not to keep the other partitions. Some operations may want to drop the remaining partitions and keep only the results. Returns: A new BaseFrameManager object, the type of object that called this. """ if self.partitions.size == 0: return np.array([[]]) # Handling dictionaries has to be done differently, but we still want # to figure out the partitions that need to be applied to, so we will # store the dictionary in a separate variable and assign `indices` to # the keys to handle it the same as we normally would. if isinstance(indices, dict): dict_indices = indices indices = list(indices.keys()) else: dict_indices = None if not isinstance(indices, list): indices = [indices] partitions_dict = self._get_dict_of_block_index( axis, indices, ordered=not keep_remaining ) if not axis: partitions_for_apply = self.partitions.T else: partitions_for_apply = self.partitions # We may have a command to perform different functions on different # columns at the same time. We attempt to handle this as efficiently as # possible here. Functions that use this in the dictionary format must # accept a keyword argument `func_dict`. if dict_indices is not None: def local_to_global_idx(partition_id, local_idx): if partition_id == 0: return local_idx if axis == 0: cumulative_axis = np.cumsum(self.block_widths) else: cumulative_axis = np.cumsum(self.block_lengths) return cumulative_axis[partition_id - 1] + local_idx if not keep_remaining: result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[o_idx], func_dict={ i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)] for i_idx in list_to_apply if i_idx >= 0 }, ) for o_idx, list_to_apply in partitions_dict ] ) else: result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], func_dict={ idx: dict_indices[local_to_global_idx(i, idx)] for idx in partitions_dict[i] if idx >= 0 }, ) for i in range(len(partitions_for_apply)) ] ) else: if not keep_remaining: # We are passing internal indices in here. In order for func to # actually be able to use this information, it must be able to take in # the internal indices. This might mean an iloc in the case of Pandas # or some other way to index into the internal representation. result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[idx], internal_indices=list_to_apply, ) for idx, list_to_apply in partitions_dict ] ) else: # The difference here is that we modify a subset and return the # remaining (non-updated) blocks in their original position. result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], internal_indices=partitions_dict[i], ) for i in range(len(partitions_for_apply)) ] ) return ( self.__constructor__(result.T) if not axis else self.__constructor__(result) )
[ "def", "apply_func_to_select_indices", "(", "self", ",", "axis", ",", "func", ",", "indices", ",", "keep_remaining", "=", "False", ")", ":", "if", "self", ".", "partitions", ".", "size", "==", "0", ":", "return", "np", ".", "array", "(", "[", "[", "]", "]", ")", "# Handling dictionaries has to be done differently, but we still want", "# to figure out the partitions that need to be applied to, so we will", "# store the dictionary in a separate variable and assign `indices` to", "# the keys to handle it the same as we normally would.", "if", "isinstance", "(", "indices", ",", "dict", ")", ":", "dict_indices", "=", "indices", "indices", "=", "list", "(", "indices", ".", "keys", "(", ")", ")", "else", ":", "dict_indices", "=", "None", "if", "not", "isinstance", "(", "indices", ",", "list", ")", ":", "indices", "=", "[", "indices", "]", "partitions_dict", "=", "self", ".", "_get_dict_of_block_index", "(", "axis", ",", "indices", ",", "ordered", "=", "not", "keep_remaining", ")", "if", "not", "axis", ":", "partitions_for_apply", "=", "self", ".", "partitions", ".", "T", "else", ":", "partitions_for_apply", "=", "self", ".", "partitions", "# We may have a command to perform different functions on different", "# columns at the same time. We attempt to handle this as efficiently as", "# possible here. Functions that use this in the dictionary format must", "# accept a keyword argument `func_dict`.", "if", "dict_indices", "is", "not", "None", ":", "def", "local_to_global_idx", "(", "partition_id", ",", "local_idx", ")", ":", "if", "partition_id", "==", "0", ":", "return", "local_idx", "if", "axis", "==", "0", ":", "cumulative_axis", "=", "np", ".", "cumsum", "(", "self", ".", "block_widths", ")", "else", ":", "cumulative_axis", "=", "np", ".", "cumsum", "(", "self", ".", "block_lengths", ")", "return", "cumulative_axis", "[", "partition_id", "-", "1", "]", "+", "local_idx", "if", "not", "keep_remaining", ":", "result", "=", "np", ".", "array", "(", "[", "self", ".", "_apply_func_to_list_of_partitions", "(", "func", ",", "partitions_for_apply", "[", "o_idx", "]", ",", "func_dict", "=", "{", "i_idx", ":", "dict_indices", "[", "local_to_global_idx", "(", "o_idx", ",", "i_idx", ")", "]", "for", "i_idx", "in", "list_to_apply", "if", "i_idx", ">=", "0", "}", ",", ")", "for", "o_idx", ",", "list_to_apply", "in", "partitions_dict", "]", ")", "else", ":", "result", "=", "np", ".", "array", "(", "[", "partitions_for_apply", "[", "i", "]", "if", "i", "not", "in", "partitions_dict", "else", "self", ".", "_apply_func_to_list_of_partitions", "(", "func", ",", "partitions_for_apply", "[", "i", "]", ",", "func_dict", "=", "{", "idx", ":", "dict_indices", "[", "local_to_global_idx", "(", "i", ",", "idx", ")", "]", "for", "idx", "in", "partitions_dict", "[", "i", "]", "if", "idx", ">=", "0", "}", ",", ")", "for", "i", "in", "range", "(", "len", "(", "partitions_for_apply", ")", ")", "]", ")", "else", ":", "if", "not", "keep_remaining", ":", "# We are passing internal indices in here. In order for func to", "# actually be able to use this information, it must be able to take in", "# the internal indices. This might mean an iloc in the case of Pandas", "# or some other way to index into the internal representation.", "result", "=", "np", ".", "array", "(", "[", "self", ".", "_apply_func_to_list_of_partitions", "(", "func", ",", "partitions_for_apply", "[", "idx", "]", ",", "internal_indices", "=", "list_to_apply", ",", ")", "for", "idx", ",", "list_to_apply", "in", "partitions_dict", "]", ")", "else", ":", "# The difference here is that we modify a subset and return the", "# remaining (non-updated) blocks in their original position.", "result", "=", "np", ".", "array", "(", "[", "partitions_for_apply", "[", "i", "]", "if", "i", "not", "in", "partitions_dict", "else", "self", ".", "_apply_func_to_list_of_partitions", "(", "func", ",", "partitions_for_apply", "[", "i", "]", ",", "internal_indices", "=", "partitions_dict", "[", "i", "]", ",", ")", "for", "i", "in", "range", "(", "len", "(", "partitions_for_apply", ")", ")", "]", ")", "return", "(", "self", ".", "__constructor__", "(", "result", ".", "T", ")", "if", "not", "axis", "else", "self", ".", "__constructor__", "(", "result", ")", ")" ]
Applies a function to select indices. Note: Your internal function must take a kwarg `internal_indices` for this to work correctly. This prevents information leakage of the internal index to the external representation. Args: axis: The axis to apply the func over. func: The function to apply to these indices. indices: The indices to apply the function to. keep_remaining: Whether or not to keep the other partitions. Some operations may want to drop the remaining partitions and keep only the results. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Applies", "a", "function", "to", "select", "indices", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L685-L803
27,486
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.apply_func_to_indices_both_axis
def apply_func_to_indices_both_axis( self, func, row_indices, col_indices, lazy=False, keep_remaining=True, mutate=False, item_to_distribute=None, ): """ Apply a function to along both axis Important: For your func to operate directly on the indices provided, it must use `row_internal_indices, col_internal_indices` as keyword arguments. """ if keep_remaining: row_partitions_list = self._get_dict_of_block_index(1, row_indices).items() col_partitions_list = self._get_dict_of_block_index(0, col_indices).items() else: row_partitions_list = self._get_dict_of_block_index( 1, row_indices, ordered=True ) col_partitions_list = self._get_dict_of_block_index( 0, col_indices, ordered=True ) result = np.empty( (len(row_partitions_list), len(col_partitions_list)), dtype=type(self) ) if not mutate: partition_copy = self.partitions.copy() else: partition_copy = self.partitions row_position_counter = 0 for row_idx, row_values in enumerate(row_partitions_list): row_blk_idx, row_internal_idx = row_values col_position_counter = 0 for col_idx, col_values in enumerate(col_partitions_list): col_blk_idx, col_internal_idx = col_values remote_part = partition_copy[row_blk_idx, col_blk_idx] if item_to_distribute is not None: item = item_to_distribute[ row_position_counter : row_position_counter + len(row_internal_idx), col_position_counter : col_position_counter + len(col_internal_idx), ] item = {"item": item} else: item = {} if lazy: block_result = remote_part.add_to_apply_calls( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) else: block_result = remote_part.apply( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) if keep_remaining: partition_copy[row_blk_idx, col_blk_idx] = block_result else: result[row_idx][col_idx] = block_result col_position_counter += len(col_internal_idx) row_position_counter += len(row_internal_idx) if keep_remaining: return self.__constructor__(partition_copy) else: return self.__constructor__(result)
python
def apply_func_to_indices_both_axis( self, func, row_indices, col_indices, lazy=False, keep_remaining=True, mutate=False, item_to_distribute=None, ): """ Apply a function to along both axis Important: For your func to operate directly on the indices provided, it must use `row_internal_indices, col_internal_indices` as keyword arguments. """ if keep_remaining: row_partitions_list = self._get_dict_of_block_index(1, row_indices).items() col_partitions_list = self._get_dict_of_block_index(0, col_indices).items() else: row_partitions_list = self._get_dict_of_block_index( 1, row_indices, ordered=True ) col_partitions_list = self._get_dict_of_block_index( 0, col_indices, ordered=True ) result = np.empty( (len(row_partitions_list), len(col_partitions_list)), dtype=type(self) ) if not mutate: partition_copy = self.partitions.copy() else: partition_copy = self.partitions row_position_counter = 0 for row_idx, row_values in enumerate(row_partitions_list): row_blk_idx, row_internal_idx = row_values col_position_counter = 0 for col_idx, col_values in enumerate(col_partitions_list): col_blk_idx, col_internal_idx = col_values remote_part = partition_copy[row_blk_idx, col_blk_idx] if item_to_distribute is not None: item = item_to_distribute[ row_position_counter : row_position_counter + len(row_internal_idx), col_position_counter : col_position_counter + len(col_internal_idx), ] item = {"item": item} else: item = {} if lazy: block_result = remote_part.add_to_apply_calls( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) else: block_result = remote_part.apply( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) if keep_remaining: partition_copy[row_blk_idx, col_blk_idx] = block_result else: result[row_idx][col_idx] = block_result col_position_counter += len(col_internal_idx) row_position_counter += len(row_internal_idx) if keep_remaining: return self.__constructor__(partition_copy) else: return self.__constructor__(result)
[ "def", "apply_func_to_indices_both_axis", "(", "self", ",", "func", ",", "row_indices", ",", "col_indices", ",", "lazy", "=", "False", ",", "keep_remaining", "=", "True", ",", "mutate", "=", "False", ",", "item_to_distribute", "=", "None", ",", ")", ":", "if", "keep_remaining", ":", "row_partitions_list", "=", "self", ".", "_get_dict_of_block_index", "(", "1", ",", "row_indices", ")", ".", "items", "(", ")", "col_partitions_list", "=", "self", ".", "_get_dict_of_block_index", "(", "0", ",", "col_indices", ")", ".", "items", "(", ")", "else", ":", "row_partitions_list", "=", "self", ".", "_get_dict_of_block_index", "(", "1", ",", "row_indices", ",", "ordered", "=", "True", ")", "col_partitions_list", "=", "self", ".", "_get_dict_of_block_index", "(", "0", ",", "col_indices", ",", "ordered", "=", "True", ")", "result", "=", "np", ".", "empty", "(", "(", "len", "(", "row_partitions_list", ")", ",", "len", "(", "col_partitions_list", ")", ")", ",", "dtype", "=", "type", "(", "self", ")", ")", "if", "not", "mutate", ":", "partition_copy", "=", "self", ".", "partitions", ".", "copy", "(", ")", "else", ":", "partition_copy", "=", "self", ".", "partitions", "row_position_counter", "=", "0", "for", "row_idx", ",", "row_values", "in", "enumerate", "(", "row_partitions_list", ")", ":", "row_blk_idx", ",", "row_internal_idx", "=", "row_values", "col_position_counter", "=", "0", "for", "col_idx", ",", "col_values", "in", "enumerate", "(", "col_partitions_list", ")", ":", "col_blk_idx", ",", "col_internal_idx", "=", "col_values", "remote_part", "=", "partition_copy", "[", "row_blk_idx", ",", "col_blk_idx", "]", "if", "item_to_distribute", "is", "not", "None", ":", "item", "=", "item_to_distribute", "[", "row_position_counter", ":", "row_position_counter", "+", "len", "(", "row_internal_idx", ")", ",", "col_position_counter", ":", "col_position_counter", "+", "len", "(", "col_internal_idx", ")", ",", "]", "item", "=", "{", "\"item\"", ":", "item", "}", "else", ":", "item", "=", "{", "}", "if", "lazy", ":", "block_result", "=", "remote_part", ".", "add_to_apply_calls", "(", "func", ",", "row_internal_indices", "=", "row_internal_idx", ",", "col_internal_indices", "=", "col_internal_idx", ",", "*", "*", "item", ")", "else", ":", "block_result", "=", "remote_part", ".", "apply", "(", "func", ",", "row_internal_indices", "=", "row_internal_idx", ",", "col_internal_indices", "=", "col_internal_idx", ",", "*", "*", "item", ")", "if", "keep_remaining", ":", "partition_copy", "[", "row_blk_idx", ",", "col_blk_idx", "]", "=", "block_result", "else", ":", "result", "[", "row_idx", "]", "[", "col_idx", "]", "=", "block_result", "col_position_counter", "+=", "len", "(", "col_internal_idx", ")", "row_position_counter", "+=", "len", "(", "row_internal_idx", ")", "if", "keep_remaining", ":", "return", "self", ".", "__constructor__", "(", "partition_copy", ")", "else", ":", "return", "self", ".", "__constructor__", "(", "result", ")" ]
Apply a function to along both axis Important: For your func to operate directly on the indices provided, it must use `row_internal_indices, col_internal_indices` as keyword arguments.
[ "Apply", "a", "function", "to", "along", "both", "axis" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L907-L987
27,487
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.inter_data_operation
def inter_data_operation(self, axis, func, other): """Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this. """ if axis: partitions = self.row_partitions other_partitions = other.row_partitions else: partitions = self.column_partitions other_partitions = other.column_partitions func = self.preprocess_func(func) result = np.array( [ partitions[i].apply( func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i], ) for i in range(len(partitions)) ] ) return self.__constructor__(result) if axis else self.__constructor__(result.T)
python
def inter_data_operation(self, axis, func, other): """Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this. """ if axis: partitions = self.row_partitions other_partitions = other.row_partitions else: partitions = self.column_partitions other_partitions = other.column_partitions func = self.preprocess_func(func) result = np.array( [ partitions[i].apply( func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i], ) for i in range(len(partitions)) ] ) return self.__constructor__(result) if axis else self.__constructor__(result.T)
[ "def", "inter_data_operation", "(", "self", ",", "axis", ",", "func", ",", "other", ")", ":", "if", "axis", ":", "partitions", "=", "self", ".", "row_partitions", "other_partitions", "=", "other", ".", "row_partitions", "else", ":", "partitions", "=", "self", ".", "column_partitions", "other_partitions", "=", "other", ".", "column_partitions", "func", "=", "self", ".", "preprocess_func", "(", "func", ")", "result", "=", "np", ".", "array", "(", "[", "partitions", "[", "i", "]", ".", "apply", "(", "func", ",", "num_splits", "=", "self", ".", "_compute_num_partitions", "(", ")", ",", "other_axis_partition", "=", "other_partitions", "[", "i", "]", ",", ")", "for", "i", "in", "range", "(", "len", "(", "partitions", ")", ")", "]", ")", "return", "self", ".", "__constructor__", "(", "result", ")", "if", "axis", "else", "self", ".", "__constructor__", "(", "result", ".", "T", ")" ]
Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Apply", "a", "function", "that", "requires", "two", "BaseFrameManager", "objects", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L989-L1017
27,488
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.manual_shuffle
def manual_shuffle(self, axis, shuffle_func, lengths): """Shuffle the partitions based on the `shuffle_func`. Args: axis: The axis to shuffle across. shuffle_func: The function to apply before splitting the result. lengths: The length of each partition to split the result into. Returns: A new BaseFrameManager object, the type of object that called this. """ if axis: partitions = self.row_partitions else: partitions = self.column_partitions func = self.preprocess_func(shuffle_func) result = np.array([part.shuffle(func, lengths) for part in partitions]) return self.__constructor__(result) if axis else self.__constructor__(result.T)
python
def manual_shuffle(self, axis, shuffle_func, lengths): """Shuffle the partitions based on the `shuffle_func`. Args: axis: The axis to shuffle across. shuffle_func: The function to apply before splitting the result. lengths: The length of each partition to split the result into. Returns: A new BaseFrameManager object, the type of object that called this. """ if axis: partitions = self.row_partitions else: partitions = self.column_partitions func = self.preprocess_func(shuffle_func) result = np.array([part.shuffle(func, lengths) for part in partitions]) return self.__constructor__(result) if axis else self.__constructor__(result.T)
[ "def", "manual_shuffle", "(", "self", ",", "axis", ",", "shuffle_func", ",", "lengths", ")", ":", "if", "axis", ":", "partitions", "=", "self", ".", "row_partitions", "else", ":", "partitions", "=", "self", ".", "column_partitions", "func", "=", "self", ".", "preprocess_func", "(", "shuffle_func", ")", "result", "=", "np", ".", "array", "(", "[", "part", ".", "shuffle", "(", "func", ",", "lengths", ")", "for", "part", "in", "partitions", "]", ")", "return", "self", ".", "__constructor__", "(", "result", ")", "if", "axis", "else", "self", ".", "__constructor__", "(", "result", ".", "T", ")" ]
Shuffle the partitions based on the `shuffle_func`. Args: axis: The axis to shuffle across. shuffle_func: The function to apply before splitting the result. lengths: The length of each partition to split the result into. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Shuffle", "the", "partitions", "based", "on", "the", "shuffle_func", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L1019-L1036
27,489
modin-project/modin
modin/pandas/io.py
_make_parser_func
def _make_parser_func(sep): """Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object. """ def parser_func( filepath_or_buffer, sep=sep, delimiter=None, header="infer", names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression="infer", thousands=None, decimal=b".", lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) if not kwargs.get("sep", sep): kwargs["sep"] = "\t" return _read(**kwargs) return parser_func
python
def _make_parser_func(sep): """Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object. """ def parser_func( filepath_or_buffer, sep=sep, delimiter=None, header="infer", names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression="infer", thousands=None, decimal=b".", lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) if not kwargs.get("sep", sep): kwargs["sep"] = "\t" return _read(**kwargs) return parser_func
[ "def", "_make_parser_func", "(", "sep", ")", ":", "def", "parser_func", "(", "filepath_or_buffer", ",", "sep", "=", "sep", ",", "delimiter", "=", "None", ",", "header", "=", "\"infer\"", ",", "names", "=", "None", ",", "index_col", "=", "None", ",", "usecols", "=", "None", ",", "squeeze", "=", "False", ",", "prefix", "=", "None", ",", "mangle_dupe_cols", "=", "True", ",", "dtype", "=", "None", ",", "engine", "=", "None", ",", "converters", "=", "None", ",", "true_values", "=", "None", ",", "false_values", "=", "None", ",", "skipinitialspace", "=", "False", ",", "skiprows", "=", "None", ",", "nrows", "=", "None", ",", "na_values", "=", "None", ",", "keep_default_na", "=", "True", ",", "na_filter", "=", "True", ",", "verbose", "=", "False", ",", "skip_blank_lines", "=", "True", ",", "parse_dates", "=", "False", ",", "infer_datetime_format", "=", "False", ",", "keep_date_col", "=", "False", ",", "date_parser", "=", "None", ",", "dayfirst", "=", "False", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "compression", "=", "\"infer\"", ",", "thousands", "=", "None", ",", "decimal", "=", "b\".\"", ",", "lineterminator", "=", "None", ",", "quotechar", "=", "'\"'", ",", "quoting", "=", "0", ",", "escapechar", "=", "None", ",", "comment", "=", "None", ",", "encoding", "=", "None", ",", "dialect", "=", "None", ",", "tupleize_cols", "=", "None", ",", "error_bad_lines", "=", "True", ",", "warn_bad_lines", "=", "True", ",", "skipfooter", "=", "0", ",", "doublequote", "=", "True", ",", "delim_whitespace", "=", "False", ",", "low_memory", "=", "True", ",", "memory_map", "=", "False", ",", "float_precision", "=", "None", ",", ")", ":", "_", ",", "_", ",", "_", ",", "kwargs", "=", "inspect", ".", "getargvalues", "(", "inspect", ".", "currentframe", "(", ")", ")", "if", "not", "kwargs", ".", "get", "(", "\"sep\"", ",", "sep", ")", ":", "kwargs", "[", "\"sep\"", "]", "=", "\"\\t\"", "return", "_read", "(", "*", "*", "kwargs", ")", "return", "parser_func" ]
Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object.
[ "Creates", "a", "parser", "function", "from", "the", "given", "sep", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/io.py#L35-L101
27,490
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
auto_select_categorical_features
def auto_select_categorical_features(X, threshold=10): """Make a feature mask of categorical features in X. Features with less than 10 unique values are considered categorical. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. threshold : int Maximum number of unique values per feature to consider the feature to be categorical. Returns ------- feature_mask : array of booleans of size {n_features, } """ feature_mask = [] for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) feature_mask.append(len(unique) <= threshold) return feature_mask
python
def auto_select_categorical_features(X, threshold=10): """Make a feature mask of categorical features in X. Features with less than 10 unique values are considered categorical. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. threshold : int Maximum number of unique values per feature to consider the feature to be categorical. Returns ------- feature_mask : array of booleans of size {n_features, } """ feature_mask = [] for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) feature_mask.append(len(unique) <= threshold) return feature_mask
[ "def", "auto_select_categorical_features", "(", "X", ",", "threshold", "=", "10", ")", ":", "feature_mask", "=", "[", "]", "for", "column", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "if", "sparse", ".", "issparse", "(", "X", ")", ":", "indptr_start", "=", "X", ".", "indptr", "[", "column", "]", "indptr_end", "=", "X", ".", "indptr", "[", "column", "+", "1", "]", "unique", "=", "np", ".", "unique", "(", "X", ".", "data", "[", "indptr_start", ":", "indptr_end", "]", ")", "else", ":", "unique", "=", "np", ".", "unique", "(", "X", "[", ":", ",", "column", "]", ")", "feature_mask", ".", "append", "(", "len", "(", "unique", ")", "<=", "threshold", ")", "return", "feature_mask" ]
Make a feature mask of categorical features in X. Features with less than 10 unique values are considered categorical. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. threshold : int Maximum number of unique values per feature to consider the feature to be categorical. Returns ------- feature_mask : array of booleans of size {n_features, }
[ "Make", "a", "feature", "mask", "of", "categorical", "features", "in", "X", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L45-L75
27,491
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
_X_selected
def _X_selected(X, selected): """Split X into selected features and other features""" n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True non_sel = np.logical_not(sel) n_selected = np.sum(sel) X_sel = X[:, ind[sel]] X_not_sel = X[:, ind[non_sel]] return X_sel, X_not_sel, n_selected, n_features
python
def _X_selected(X, selected): """Split X into selected features and other features""" n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True non_sel = np.logical_not(sel) n_selected = np.sum(sel) X_sel = X[:, ind[sel]] X_not_sel = X[:, ind[non_sel]] return X_sel, X_not_sel, n_selected, n_features
[ "def", "_X_selected", "(", "X", ",", "selected", ")", ":", "n_features", "=", "X", ".", "shape", "[", "1", "]", "ind", "=", "np", ".", "arange", "(", "n_features", ")", "sel", "=", "np", ".", "zeros", "(", "n_features", ",", "dtype", "=", "bool", ")", "sel", "[", "np", ".", "asarray", "(", "selected", ")", "]", "=", "True", "non_sel", "=", "np", ".", "logical_not", "(", "sel", ")", "n_selected", "=", "np", ".", "sum", "(", "sel", ")", "X_sel", "=", "X", "[", ":", ",", "ind", "[", "sel", "]", "]", "X_not_sel", "=", "X", "[", ":", ",", "ind", "[", "non_sel", "]", "]", "return", "X_sel", ",", "X_not_sel", ",", "n_selected", ",", "n_features" ]
Split X into selected features and other features
[ "Split", "X", "into", "selected", "features", "and", "other", "features" ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L78-L88
27,492
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
_transform_selected
def _transform_selected(X, transform, selected, copy=True): """Apply a transform function to portion of selected features. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all", "auto" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) if len(selected) == 0: return X X = check_array(X, accept_sparse='csc', force_all_finite=False) X_sel, X_not_sel, n_selected, n_features = _X_selected(X, selected) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X_sel) if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel), format='csr') else: return np.hstack((X_sel, X_not_sel))
python
def _transform_selected(X, transform, selected, copy=True): """Apply a transform function to portion of selected features. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all", "auto" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) if len(selected) == 0: return X X = check_array(X, accept_sparse='csc', force_all_finite=False) X_sel, X_not_sel, n_selected, n_features = _X_selected(X, selected) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X_sel) if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel), format='csr') else: return np.hstack((X_sel, X_not_sel))
[ "def", "_transform_selected", "(", "X", ",", "transform", ",", "selected", ",", "copy", "=", "True", ")", ":", "if", "selected", "==", "\"all\"", ":", "return", "transform", "(", "X", ")", "if", "len", "(", "selected", ")", "==", "0", ":", "return", "X", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csc'", ",", "force_all_finite", "=", "False", ")", "X_sel", ",", "X_not_sel", ",", "n_selected", ",", "n_features", "=", "_X_selected", "(", "X", ",", "selected", ")", "if", "n_selected", "==", "0", ":", "# No features selected.", "return", "X", "elif", "n_selected", "==", "n_features", ":", "# All features selected.", "return", "transform", "(", "X", ")", "else", ":", "X_sel", "=", "transform", "(", "X_sel", ")", "if", "sparse", ".", "issparse", "(", "X_sel", ")", "or", "sparse", ".", "issparse", "(", "X_not_sel", ")", ":", "return", "sparse", ".", "hstack", "(", "(", "X_sel", ",", "X_not_sel", ")", ",", "format", "=", "'csr'", ")", "else", ":", "return", "np", ".", "hstack", "(", "(", "X_sel", ",", "X_not_sel", ")", ")" ]
Apply a transform function to portion of selected features. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all", "auto" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new)
[ "Apply", "a", "transform", "function", "to", "portion", "of", "selected", "features", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L91-L133
27,493
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
OneHotEncoder._matrix_adjust
def _matrix_adjust(self, X): """Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities. """ data_matrix = X.data if sparse.issparse(X) else X # Shift all values to specially encode for NAN/infinity/OTHER and 0 # Old value New Value # --------- --------- # N (0..int_max) N + 3 # np.NaN 2 # infinity 2 # *other* 1 # # A value of 0 is reserved, as that is specially handled in sparse # matrices. data_matrix += len(SPARSE_ENCODINGS) + 1 data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN'] return X
python
def _matrix_adjust(self, X): """Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities. """ data_matrix = X.data if sparse.issparse(X) else X # Shift all values to specially encode for NAN/infinity/OTHER and 0 # Old value New Value # --------- --------- # N (0..int_max) N + 3 # np.NaN 2 # infinity 2 # *other* 1 # # A value of 0 is reserved, as that is specially handled in sparse # matrices. data_matrix += len(SPARSE_ENCODINGS) + 1 data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN'] return X
[ "def", "_matrix_adjust", "(", "self", ",", "X", ")", ":", "data_matrix", "=", "X", ".", "data", "if", "sparse", ".", "issparse", "(", "X", ")", "else", "X", "# Shift all values to specially encode for NAN/infinity/OTHER and 0", "# Old value New Value", "# --------- ---------", "# N (0..int_max) N + 3", "# np.NaN 2", "# infinity 2", "# *other* 1", "#", "# A value of 0 is reserved, as that is specially handled in sparse", "# matrices.", "data_matrix", "+=", "len", "(", "SPARSE_ENCODINGS", ")", "+", "1", "data_matrix", "[", "~", "np", ".", "isfinite", "(", "data_matrix", ")", "]", "=", "SPARSE_ENCODINGS", "[", "'NAN'", "]", "return", "X" ]
Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities.
[ "Adjust", "all", "values", "in", "X", "to", "encode", "for", "NaNs", "and", "infinities", "in", "the", "data", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L239-L267
27,494
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
OneHotEncoder.fit_transform
def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. y: array-like {n_samples,} (Optional, ignored) Feature labels """ if self.categorical_features == "auto": self.categorical_features = auto_select_categorical_features(X, threshold=self.threshold) return _transform_selected( X, self._fit_transform, self.categorical_features, copy=True )
python
def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. y: array-like {n_samples,} (Optional, ignored) Feature labels """ if self.categorical_features == "auto": self.categorical_features = auto_select_categorical_features(X, threshold=self.threshold) return _transform_selected( X, self._fit_transform, self.categorical_features, copy=True )
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "self", ".", "categorical_features", "==", "\"auto\"", ":", "self", ".", "categorical_features", "=", "auto_select_categorical_features", "(", "X", ",", "threshold", "=", "self", ".", "threshold", ")", "return", "_transform_selected", "(", "X", ",", "self", ".", "_fit_transform", ",", "self", ".", "categorical_features", ",", "copy", "=", "True", ")" ]
Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. y: array-like {n_samples,} (Optional, ignored) Feature labels
[ "Fit", "OneHotEncoder", "to", "X", "then", "transform", "X", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L376-L397
27,495
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
OneHotEncoder.transform
def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected( X, self._transform, self.categorical_features, copy=True )
python
def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected( X, self._transform, self.categorical_features, copy=True )
[ "def", "transform", "(", "self", ",", "X", ")", ":", "return", "_transform_selected", "(", "X", ",", "self", ".", "_transform", ",", "self", ".", "categorical_features", ",", "copy", "=", "True", ")" ]
Transform X using one-hot encoding. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input.
[ "Transform", "X", "using", "one", "-", "hot", "encoding", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L481-L498
27,496
EpistasisLab/tpot
tpot/base.py
TPOTBase._setup_memory
def _setup_memory(self): """Setup Memory object for memory caching. """ if self.memory: if isinstance(self.memory, str): if self.memory == "auto": # Create a temporary folder to store the transformers of the pipeline self._cachedir = mkdtemp() else: if not os.path.isdir(self.memory): try: os.makedirs(self.memory) except: raise ValueError( 'Could not create directory for memory caching: {}'.format(self.memory) ) self._cachedir = self.memory self._memory = Memory(cachedir=self._cachedir, verbose=0) elif isinstance(self.memory, Memory): self._memory = self.memory else: raise ValueError( 'Could not recognize Memory object for pipeline caching. ' 'Please provide an instance of sklearn.external.joblib.Memory,' ' a path to a directory on your system, or \"auto\".' )
python
def _setup_memory(self): """Setup Memory object for memory caching. """ if self.memory: if isinstance(self.memory, str): if self.memory == "auto": # Create a temporary folder to store the transformers of the pipeline self._cachedir = mkdtemp() else: if not os.path.isdir(self.memory): try: os.makedirs(self.memory) except: raise ValueError( 'Could not create directory for memory caching: {}'.format(self.memory) ) self._cachedir = self.memory self._memory = Memory(cachedir=self._cachedir, verbose=0) elif isinstance(self.memory, Memory): self._memory = self.memory else: raise ValueError( 'Could not recognize Memory object for pipeline caching. ' 'Please provide an instance of sklearn.external.joblib.Memory,' ' a path to a directory on your system, or \"auto\".' )
[ "def", "_setup_memory", "(", "self", ")", ":", "if", "self", ".", "memory", ":", "if", "isinstance", "(", "self", ".", "memory", ",", "str", ")", ":", "if", "self", ".", "memory", "==", "\"auto\"", ":", "# Create a temporary folder to store the transformers of the pipeline", "self", ".", "_cachedir", "=", "mkdtemp", "(", ")", "else", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "memory", ")", ":", "try", ":", "os", ".", "makedirs", "(", "self", ".", "memory", ")", "except", ":", "raise", "ValueError", "(", "'Could not create directory for memory caching: {}'", ".", "format", "(", "self", ".", "memory", ")", ")", "self", ".", "_cachedir", "=", "self", ".", "memory", "self", ".", "_memory", "=", "Memory", "(", "cachedir", "=", "self", ".", "_cachedir", ",", "verbose", "=", "0", ")", "elif", "isinstance", "(", "self", ".", "memory", ",", "Memory", ")", ":", "self", ".", "_memory", "=", "self", ".", "memory", "else", ":", "raise", "ValueError", "(", "'Could not recognize Memory object for pipeline caching. '", "'Please provide an instance of sklearn.external.joblib.Memory,'", "' a path to a directory on your system, or \\\"auto\\\".'", ")" ]
Setup Memory object for memory caching.
[ "Setup", "Memory", "object", "for", "memory", "caching", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/base.py#L783-L809
27,497
EpistasisLab/tpot
tpot/base.py
TPOTBase._update_top_pipeline
def _update_top_pipeline(self): """Helper function to update the _optimized_pipeline field.""" # Store the pipeline with the highest internal testing score if self._pareto_front: self._optimized_pipeline_score = -float('inf') for pipeline, pipeline_scores in zip(self._pareto_front.items, reversed(self._pareto_front.keys)): if pipeline_scores.wvalues[1] > self._optimized_pipeline_score: self._optimized_pipeline = pipeline self._optimized_pipeline_score = pipeline_scores.wvalues[1] if not self._optimized_pipeline: raise RuntimeError('There was an error in the TPOT optimization ' 'process. This could be because the data was ' 'not formatted properly, or because data for ' 'a regression problem was provided to the ' 'TPOTClassifier object. Please make sure you ' 'passed the data to TPOT correctly.') else: pareto_front_wvalues = [pipeline_scores.wvalues[1] for pipeline_scores in self._pareto_front.keys] if not self._last_optimized_pareto_front: self._last_optimized_pareto_front = pareto_front_wvalues elif self._last_optimized_pareto_front == pareto_front_wvalues: self._last_optimized_pareto_front_n_gens += 1 else: self._last_optimized_pareto_front = pareto_front_wvalues self._last_optimized_pareto_front_n_gens = 0 else: # If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet. # need raise RuntimeError because no pipeline has been optimized raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
python
def _update_top_pipeline(self): """Helper function to update the _optimized_pipeline field.""" # Store the pipeline with the highest internal testing score if self._pareto_front: self._optimized_pipeline_score = -float('inf') for pipeline, pipeline_scores in zip(self._pareto_front.items, reversed(self._pareto_front.keys)): if pipeline_scores.wvalues[1] > self._optimized_pipeline_score: self._optimized_pipeline = pipeline self._optimized_pipeline_score = pipeline_scores.wvalues[1] if not self._optimized_pipeline: raise RuntimeError('There was an error in the TPOT optimization ' 'process. This could be because the data was ' 'not formatted properly, or because data for ' 'a regression problem was provided to the ' 'TPOTClassifier object. Please make sure you ' 'passed the data to TPOT correctly.') else: pareto_front_wvalues = [pipeline_scores.wvalues[1] for pipeline_scores in self._pareto_front.keys] if not self._last_optimized_pareto_front: self._last_optimized_pareto_front = pareto_front_wvalues elif self._last_optimized_pareto_front == pareto_front_wvalues: self._last_optimized_pareto_front_n_gens += 1 else: self._last_optimized_pareto_front = pareto_front_wvalues self._last_optimized_pareto_front_n_gens = 0 else: # If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet. # need raise RuntimeError because no pipeline has been optimized raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
[ "def", "_update_top_pipeline", "(", "self", ")", ":", "# Store the pipeline with the highest internal testing score", "if", "self", ".", "_pareto_front", ":", "self", ".", "_optimized_pipeline_score", "=", "-", "float", "(", "'inf'", ")", "for", "pipeline", ",", "pipeline_scores", "in", "zip", "(", "self", ".", "_pareto_front", ".", "items", ",", "reversed", "(", "self", ".", "_pareto_front", ".", "keys", ")", ")", ":", "if", "pipeline_scores", ".", "wvalues", "[", "1", "]", ">", "self", ".", "_optimized_pipeline_score", ":", "self", ".", "_optimized_pipeline", "=", "pipeline", "self", ".", "_optimized_pipeline_score", "=", "pipeline_scores", ".", "wvalues", "[", "1", "]", "if", "not", "self", ".", "_optimized_pipeline", ":", "raise", "RuntimeError", "(", "'There was an error in the TPOT optimization '", "'process. This could be because the data was '", "'not formatted properly, or because data for '", "'a regression problem was provided to the '", "'TPOTClassifier object. Please make sure you '", "'passed the data to TPOT correctly.'", ")", "else", ":", "pareto_front_wvalues", "=", "[", "pipeline_scores", ".", "wvalues", "[", "1", "]", "for", "pipeline_scores", "in", "self", ".", "_pareto_front", ".", "keys", "]", "if", "not", "self", ".", "_last_optimized_pareto_front", ":", "self", ".", "_last_optimized_pareto_front", "=", "pareto_front_wvalues", "elif", "self", ".", "_last_optimized_pareto_front", "==", "pareto_front_wvalues", ":", "self", ".", "_last_optimized_pareto_front_n_gens", "+=", "1", "else", ":", "self", ".", "_last_optimized_pareto_front", "=", "pareto_front_wvalues", "self", ".", "_last_optimized_pareto_front_n_gens", "=", "0", "else", ":", "# If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet.", "# need raise RuntimeError because no pipeline has been optimized", "raise", "RuntimeError", "(", "'A pipeline has not yet been optimized. Please call fit() first.'", ")" ]
Helper function to update the _optimized_pipeline field.
[ "Helper", "function", "to", "update", "the", "_optimized_pipeline", "field", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/base.py#L819-L848
27,498
EpistasisLab/tpot
tpot/base.py
TPOTBase._summary_of_best_pipeline
def _summary_of_best_pipeline(self, features, target): """Print out best pipeline at the end of optimization process. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix target: array-like {n_samples} List of class labels for prediction Returns ------- self: object Returns a copy of the fitted TPOT object """ if not self._optimized_pipeline: raise RuntimeError('There was an error in the TPOT optimization ' 'process. This could be because the data was ' 'not formatted properly, or because data for ' 'a regression problem was provided to the ' 'TPOTClassifier object. Please make sure you ' 'passed the data to TPOT correctly.') else: self.fitted_pipeline_ = self._toolbox.compile(expr=self._optimized_pipeline) with warnings.catch_warnings(): warnings.simplefilter('ignore') self.fitted_pipeline_.fit(features, target) if self.verbosity in [1, 2]: # Add an extra line of spacing if the progress bar was used if self.verbosity >= 2: print('') optimized_pipeline_str = self.clean_pipeline_string(self._optimized_pipeline) print('Best pipeline:', optimized_pipeline_str) # Store and fit the entire Pareto front as fitted models for convenience self.pareto_front_fitted_pipelines_ = {} for pipeline in self._pareto_front.items: self.pareto_front_fitted_pipelines_[str(pipeline)] = self._toolbox.compile(expr=pipeline) with warnings.catch_warnings(): warnings.simplefilter('ignore') self.pareto_front_fitted_pipelines_[str(pipeline)].fit(features, target)
python
def _summary_of_best_pipeline(self, features, target): """Print out best pipeline at the end of optimization process. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix target: array-like {n_samples} List of class labels for prediction Returns ------- self: object Returns a copy of the fitted TPOT object """ if not self._optimized_pipeline: raise RuntimeError('There was an error in the TPOT optimization ' 'process. This could be because the data was ' 'not formatted properly, or because data for ' 'a regression problem was provided to the ' 'TPOTClassifier object. Please make sure you ' 'passed the data to TPOT correctly.') else: self.fitted_pipeline_ = self._toolbox.compile(expr=self._optimized_pipeline) with warnings.catch_warnings(): warnings.simplefilter('ignore') self.fitted_pipeline_.fit(features, target) if self.verbosity in [1, 2]: # Add an extra line of spacing if the progress bar was used if self.verbosity >= 2: print('') optimized_pipeline_str = self.clean_pipeline_string(self._optimized_pipeline) print('Best pipeline:', optimized_pipeline_str) # Store and fit the entire Pareto front as fitted models for convenience self.pareto_front_fitted_pipelines_ = {} for pipeline in self._pareto_front.items: self.pareto_front_fitted_pipelines_[str(pipeline)] = self._toolbox.compile(expr=pipeline) with warnings.catch_warnings(): warnings.simplefilter('ignore') self.pareto_front_fitted_pipelines_[str(pipeline)].fit(features, target)
[ "def", "_summary_of_best_pipeline", "(", "self", ",", "features", ",", "target", ")", ":", "if", "not", "self", ".", "_optimized_pipeline", ":", "raise", "RuntimeError", "(", "'There was an error in the TPOT optimization '", "'process. This could be because the data was '", "'not formatted properly, or because data for '", "'a regression problem was provided to the '", "'TPOTClassifier object. Please make sure you '", "'passed the data to TPOT correctly.'", ")", "else", ":", "self", ".", "fitted_pipeline_", "=", "self", ".", "_toolbox", ".", "compile", "(", "expr", "=", "self", ".", "_optimized_pipeline", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "self", ".", "fitted_pipeline_", ".", "fit", "(", "features", ",", "target", ")", "if", "self", ".", "verbosity", "in", "[", "1", ",", "2", "]", ":", "# Add an extra line of spacing if the progress bar was used", "if", "self", ".", "verbosity", ">=", "2", ":", "print", "(", "''", ")", "optimized_pipeline_str", "=", "self", ".", "clean_pipeline_string", "(", "self", ".", "_optimized_pipeline", ")", "print", "(", "'Best pipeline:'", ",", "optimized_pipeline_str", ")", "# Store and fit the entire Pareto front as fitted models for convenience", "self", ".", "pareto_front_fitted_pipelines_", "=", "{", "}", "for", "pipeline", "in", "self", ".", "_pareto_front", ".", "items", ":", "self", ".", "pareto_front_fitted_pipelines_", "[", "str", "(", "pipeline", ")", "]", "=", "self", ".", "_toolbox", ".", "compile", "(", "expr", "=", "pipeline", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "self", ".", "pareto_front_fitted_pipelines_", "[", "str", "(", "pipeline", ")", "]", ".", "fit", "(", "features", ",", "target", ")" ]
Print out best pipeline at the end of optimization process. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix target: array-like {n_samples} List of class labels for prediction Returns ------- self: object Returns a copy of the fitted TPOT object
[ "Print", "out", "best", "pipeline", "at", "the", "end", "of", "optimization", "process", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/base.py#L850-L895
27,499
EpistasisLab/tpot
tpot/base.py
TPOTBase.predict
def predict(self, features): """Use the optimized pipeline to predict the target for a feature set. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix Returns ---------- array-like: {n_samples} Predicted target for the samples in the feature matrix """ if not self.fitted_pipeline_: raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.') features = self._check_dataset(features, target=None, sample_weight=None) return self.fitted_pipeline_.predict(features)
python
def predict(self, features): """Use the optimized pipeline to predict the target for a feature set. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix Returns ---------- array-like: {n_samples} Predicted target for the samples in the feature matrix """ if not self.fitted_pipeline_: raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.') features = self._check_dataset(features, target=None, sample_weight=None) return self.fitted_pipeline_.predict(features)
[ "def", "predict", "(", "self", ",", "features", ")", ":", "if", "not", "self", ".", "fitted_pipeline_", ":", "raise", "RuntimeError", "(", "'A pipeline has not yet been optimized. Please call fit() first.'", ")", "features", "=", "self", ".", "_check_dataset", "(", "features", ",", "target", "=", "None", ",", "sample_weight", "=", "None", ")", "return", "self", ".", "fitted_pipeline_", ".", "predict", "(", "features", ")" ]
Use the optimized pipeline to predict the target for a feature set. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix Returns ---------- array-like: {n_samples} Predicted target for the samples in the feature matrix
[ "Use", "the", "optimized", "pipeline", "to", "predict", "the", "target", "for", "a", "feature", "set", "." ]
b626271e6b5896a73fb9d7d29bebc7aa9100772e
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/base.py#L897-L916