id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,900
robmarkcole/HASS-data-detective
detective/config.py
_stub_tag
def _stub_tag(constructor, node): """Stub a constructor with a dictionary.""" seen = getattr(constructor, "_stub_seen", None) if seen is None: seen = constructor._stub_seen = set() if node.tag not in seen: print("YAML tag {} is not supported".format(node.tag)) seen.add(node.tag) return {}
python
def _stub_tag(constructor, node): """Stub a constructor with a dictionary.""" seen = getattr(constructor, "_stub_seen", None) if seen is None: seen = constructor._stub_seen = set() if node.tag not in seen: print("YAML tag {} is not supported".format(node.tag)) seen.add(node.tag) return {}
[ "def", "_stub_tag", "(", "constructor", ",", "node", ")", ":", "seen", "=", "getattr", "(", "constructor", ",", "\"_stub_seen\"", ",", "None", ")", "if", "seen", "is", "None", ":", "seen", "=", "constructor", ".", "_stub_seen", "=", "set", "(", ")", "if", "node", ".", "tag", "not", "in", "seen", ":", "print", "(", "\"YAML tag {} is not supported\"", ".", "format", "(", "node", ".", "tag", ")", ")", "seen", ".", "add", "(", "node", ".", "tag", ")", "return", "{", "}" ]
Stub a constructor with a dictionary.
[ "Stub", "a", "constructor", "with", "a", "dictionary", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/config.py#L61-L72
7,901
robmarkcole/HASS-data-detective
detective/config.py
load_yaml
def load_yaml(fname): """Load a YAML file.""" yaml = YAML(typ="safe") # Compat with HASS yaml.allow_duplicate_keys = True # Stub HASS constructors HassSafeConstructor.name = fname yaml.Constructor = HassSafeConstructor with open(fname, encoding="utf-8") as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file) or {}
python
def load_yaml(fname): """Load a YAML file.""" yaml = YAML(typ="safe") # Compat with HASS yaml.allow_duplicate_keys = True # Stub HASS constructors HassSafeConstructor.name = fname yaml.Constructor = HassSafeConstructor with open(fname, encoding="utf-8") as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file) or {}
[ "def", "load_yaml", "(", "fname", ")", ":", "yaml", "=", "YAML", "(", "typ", "=", "\"safe\"", ")", "# Compat with HASS", "yaml", ".", "allow_duplicate_keys", "=", "True", "# Stub HASS constructors", "HassSafeConstructor", ".", "name", "=", "fname", "yaml", ".", "Constructor", "=", "HassSafeConstructor", "with", "open", "(", "fname", ",", "encoding", "=", "\"utf-8\"", ")", "as", "conf_file", ":", "# If configuration file is empty YAML returns None", "# We convert that to an empty dict", "return", "yaml", ".", "load", "(", "conf_file", ")", "or", "{", "}" ]
Load a YAML file.
[ "Load", "a", "YAML", "file", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/config.py#L89-L101
7,902
robmarkcole/HASS-data-detective
detective/config.py
db_url_from_hass_config
def db_url_from_hass_config(path): """Find the recorder database url from a HASS config dir.""" config = load_hass_config(path) default_path = os.path.join(path, "home-assistant_v2.db") default_url = "sqlite:///{}".format(default_path) recorder = config.get("recorder") if recorder: db_url = recorder.get("db_url") if db_url is not None: return db_url if not os.path.isfile(default_path): raise ValueError( "Unable to determine DB url from hass config at {}".format(path) ) return default_url
python
def db_url_from_hass_config(path): """Find the recorder database url from a HASS config dir.""" config = load_hass_config(path) default_path = os.path.join(path, "home-assistant_v2.db") default_url = "sqlite:///{}".format(default_path) recorder = config.get("recorder") if recorder: db_url = recorder.get("db_url") if db_url is not None: return db_url if not os.path.isfile(default_path): raise ValueError( "Unable to determine DB url from hass config at {}".format(path) ) return default_url
[ "def", "db_url_from_hass_config", "(", "path", ")", ":", "config", "=", "load_hass_config", "(", "path", ")", "default_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"home-assistant_v2.db\"", ")", "default_url", "=", "\"sqlite:///{}\"", ".", "format", "(", "default_path", ")", "recorder", "=", "config", ".", "get", "(", "\"recorder\"", ")", "if", "recorder", ":", "db_url", "=", "recorder", ".", "get", "(", "\"db_url\"", ")", "if", "db_url", "is", "not", "None", ":", "return", "db_url", "if", "not", "os", ".", "path", ".", "isfile", "(", "default_path", ")", ":", "raise", "ValueError", "(", "\"Unable to determine DB url from hass config at {}\"", ".", "format", "(", "path", ")", ")", "return", "default_url" ]
Find the recorder database url from a HASS config dir.
[ "Find", "the", "recorder", "database", "url", "from", "a", "HASS", "config", "dir", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/config.py#L104-L122
7,903
robmarkcole/HASS-data-detective
detective/time.py
localize
def localize(dt): """Localize a datetime object to local time.""" if dt.tzinfo is UTC: return (dt + LOCAL_UTC_OFFSET).replace(tzinfo=None) # No TZ info so not going to assume anything, return as-is. return dt
python
def localize(dt): """Localize a datetime object to local time.""" if dt.tzinfo is UTC: return (dt + LOCAL_UTC_OFFSET).replace(tzinfo=None) # No TZ info so not going to assume anything, return as-is. return dt
[ "def", "localize", "(", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "UTC", ":", "return", "(", "dt", "+", "LOCAL_UTC_OFFSET", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "# No TZ info so not going to assume anything, return as-is.", "return", "dt" ]
Localize a datetime object to local time.
[ "Localize", "a", "datetime", "object", "to", "local", "time", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/time.py#L19-L24
7,904
robmarkcole/HASS-data-detective
detective/time.py
sqlalch_datetime
def sqlalch_datetime(dt): """Convert a SQLAlchemy datetime string to a datetime object.""" if isinstance(dt, str): return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=UTC) if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: return dt.astimezone(UTC) return dt.replace(tzinfo=UTC)
python
def sqlalch_datetime(dt): """Convert a SQLAlchemy datetime string to a datetime object.""" if isinstance(dt, str): return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=UTC) if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: return dt.astimezone(UTC) return dt.replace(tzinfo=UTC)
[ "def", "sqlalch_datetime", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "str", ")", ":", "return", "datetime", ".", "strptime", "(", "dt", ",", "\"%Y-%m-%d %H:%M:%S.%f\"", ")", ".", "replace", "(", "tzinfo", "=", "UTC", ")", "if", "dt", ".", "tzinfo", "is", "not", "None", "and", "dt", ".", "tzinfo", ".", "utcoffset", "(", "dt", ")", "is", "not", "None", ":", "return", "dt", ".", "astimezone", "(", "UTC", ")", "return", "dt", ".", "replace", "(", "tzinfo", "=", "UTC", ")" ]
Convert a SQLAlchemy datetime string to a datetime object.
[ "Convert", "a", "SQLAlchemy", "datetime", "string", "to", "a", "datetime", "object", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/time.py#L44-L50
7,905
robmarkcole/HASS-data-detective
detective/core.py
db_from_hass_config
def db_from_hass_config(path=None, **kwargs): """Initialize a database from HASS config.""" if path is None: path = config.find_hass_config() url = config.db_url_from_hass_config(path) return HassDatabase(url, **kwargs)
python
def db_from_hass_config(path=None, **kwargs): """Initialize a database from HASS config.""" if path is None: path = config.find_hass_config() url = config.db_url_from_hass_config(path) return HassDatabase(url, **kwargs)
[ "def", "db_from_hass_config", "(", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "path", "is", "None", ":", "path", "=", "config", ".", "find_hass_config", "(", ")", "url", "=", "config", ".", "db_url_from_hass_config", "(", "path", ")", "return", "HassDatabase", "(", "url", ",", "*", "*", "kwargs", ")" ]
Initialize a database from HASS config.
[ "Initialize", "a", "database", "from", "HASS", "config", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L14-L20
7,906
robmarkcole/HASS-data-detective
detective/core.py
stripped_db_url
def stripped_db_url(url): """Return a version of the DB url with the password stripped out.""" parsed = urlparse(url) if parsed.password is None: return url return parsed._replace( netloc="{}:***@{}".format(parsed.username, parsed.hostname) ).geturl()
python
def stripped_db_url(url): """Return a version of the DB url with the password stripped out.""" parsed = urlparse(url) if parsed.password is None: return url return parsed._replace( netloc="{}:***@{}".format(parsed.username, parsed.hostname) ).geturl()
[ "def", "stripped_db_url", "(", "url", ")", ":", "parsed", "=", "urlparse", "(", "url", ")", "if", "parsed", ".", "password", "is", "None", ":", "return", "url", "return", "parsed", ".", "_replace", "(", "netloc", "=", "\"{}:***@{}\"", ".", "format", "(", "parsed", ".", "username", ",", "parsed", ".", "hostname", ")", ")", ".", "geturl", "(", ")" ]
Return a version of the DB url with the password stripped out.
[ "Return", "a", "version", "of", "the", "DB", "url", "with", "the", "password", "stripped", "out", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L27-L36
7,907
robmarkcole/HASS-data-detective
detective/core.py
HassDatabase.perform_query
def perform_query(self, query, **params): """Perform a query, where query is a string.""" try: return self.engine.execute(query, params) except: print("Error with query: {}".format(query)) raise
python
def perform_query(self, query, **params): """Perform a query, where query is a string.""" try: return self.engine.execute(query, params) except: print("Error with query: {}".format(query)) raise
[ "def", "perform_query", "(", "self", ",", "query", ",", "*", "*", "params", ")", ":", "try", ":", "return", "self", ".", "engine", ".", "execute", "(", "query", ",", "params", ")", "except", ":", "print", "(", "\"Error with query: {}\"", ".", "format", "(", "query", ")", ")", "raise" ]
Perform a query, where query is a string.
[ "Perform", "a", "query", "where", "query", "is", "a", "string", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L74-L80
7,908
robmarkcole/HASS-data-detective
detective/core.py
HassDatabase.fetch_entities
def fetch_entities(self): """Fetch entities for which we have data.""" query = text( """ SELECT entity_id FROM states GROUP BY entity_id """ ) response = self.perform_query(query) # Parse the domains from the entities. entities = {} domains = set() for [entity] in response: domain = entity.split(".")[0] domains.add(domain) entities.setdefault(domain, []).append(entity) self._domains = list(domains) self._entities = entities print("There are {} entities with data".format(len(entities)))
python
def fetch_entities(self): """Fetch entities for which we have data.""" query = text( """ SELECT entity_id FROM states GROUP BY entity_id """ ) response = self.perform_query(query) # Parse the domains from the entities. entities = {} domains = set() for [entity] in response: domain = entity.split(".")[0] domains.add(domain) entities.setdefault(domain, []).append(entity) self._domains = list(domains) self._entities = entities print("There are {} entities with data".format(len(entities)))
[ "def", "fetch_entities", "(", "self", ")", ":", "query", "=", "text", "(", "\"\"\"\n SELECT entity_id\n FROM states\n GROUP BY entity_id\n \"\"\"", ")", "response", "=", "self", ".", "perform_query", "(", "query", ")", "# Parse the domains from the entities.", "entities", "=", "{", "}", "domains", "=", "set", "(", ")", "for", "[", "entity", "]", "in", "response", ":", "domain", "=", "entity", ".", "split", "(", "\".\"", ")", "[", "0", "]", "domains", ".", "add", "(", "domain", ")", "entities", ".", "setdefault", "(", "domain", ",", "[", "]", ")", ".", "append", "(", "entity", ")", "self", ".", "_domains", "=", "list", "(", "domains", ")", "self", ".", "_entities", "=", "entities", "print", "(", "\"There are {} entities with data\"", ".", "format", "(", "len", "(", "entities", ")", ")", ")" ]
Fetch entities for which we have data.
[ "Fetch", "entities", "for", "which", "we", "have", "data", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L82-L104
7,909
robmarkcole/HASS-data-detective
detective/core.py
HassDatabase.fetch_all_data
def fetch_all_data(self, limit=50000): """ Fetch data for all entities. """ # Query text query = text( """ SELECT domain, entity_id, state, last_changed FROM states WHERE state NOT IN ('unknown', 'unavailable') ORDER BY last_changed DESC LIMIT :limit """ ) try: print("Querying the database, this could take a while") response = self.perform_query(query, limit=limit) master_df = pd.DataFrame(response.fetchall()) print("master_df created successfully.") self._master_df = master_df.copy() self.parse_all_data() except: raise ValueError("Error querying the database.")
python
def fetch_all_data(self, limit=50000): """ Fetch data for all entities. """ # Query text query = text( """ SELECT domain, entity_id, state, last_changed FROM states WHERE state NOT IN ('unknown', 'unavailable') ORDER BY last_changed DESC LIMIT :limit """ ) try: print("Querying the database, this could take a while") response = self.perform_query(query, limit=limit) master_df = pd.DataFrame(response.fetchall()) print("master_df created successfully.") self._master_df = master_df.copy() self.parse_all_data() except: raise ValueError("Error querying the database.")
[ "def", "fetch_all_data", "(", "self", ",", "limit", "=", "50000", ")", ":", "# Query text", "query", "=", "text", "(", "\"\"\"\n SELECT domain, entity_id, state, last_changed\n FROM states\n WHERE\n state NOT IN ('unknown', 'unavailable')\n ORDER BY last_changed DESC\n LIMIT :limit\n \"\"\"", ")", "try", ":", "print", "(", "\"Querying the database, this could take a while\"", ")", "response", "=", "self", ".", "perform_query", "(", "query", ",", "limit", "=", "limit", ")", "master_df", "=", "pd", ".", "DataFrame", "(", "response", ".", "fetchall", "(", ")", ")", "print", "(", "\"master_df created successfully.\"", ")", "self", ".", "_master_df", "=", "master_df", ".", "copy", "(", ")", "self", ".", "parse_all_data", "(", ")", "except", ":", "raise", "ValueError", "(", "\"Error querying the database.\"", ")" ]
Fetch data for all entities.
[ "Fetch", "data", "for", "all", "entities", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L155-L179
7,910
robmarkcole/HASS-data-detective
detective/core.py
HassDatabase.parse_all_data
def parse_all_data(self): """Parses the master df.""" self._master_df.columns = ["domain", "entity", "state", "last_changed"] # Check if state is float and store in numericals category. self._master_df["numerical"] = self._master_df["state"].apply( lambda x: functions.isfloat(x) ) # Multiindexing self._master_df.set_index( ["domain", "entity", "numerical", "last_changed"], inplace=True )
python
def parse_all_data(self): """Parses the master df.""" self._master_df.columns = ["domain", "entity", "state", "last_changed"] # Check if state is float and store in numericals category. self._master_df["numerical"] = self._master_df["state"].apply( lambda x: functions.isfloat(x) ) # Multiindexing self._master_df.set_index( ["domain", "entity", "numerical", "last_changed"], inplace=True )
[ "def", "parse_all_data", "(", "self", ")", ":", "self", ".", "_master_df", ".", "columns", "=", "[", "\"domain\"", ",", "\"entity\"", ",", "\"state\"", ",", "\"last_changed\"", "]", "# Check if state is float and store in numericals category.", "self", ".", "_master_df", "[", "\"numerical\"", "]", "=", "self", ".", "_master_df", "[", "\"state\"", "]", ".", "apply", "(", "lambda", "x", ":", "functions", ".", "isfloat", "(", "x", ")", ")", "# Multiindexing", "self", ".", "_master_df", ".", "set_index", "(", "[", "\"domain\"", ",", "\"entity\"", ",", "\"numerical\"", ",", "\"last_changed\"", "]", ",", "inplace", "=", "True", ")" ]
Parses the master df.
[ "Parses", "the", "master", "df", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L181-L193
7,911
robmarkcole/HASS-data-detective
detective/core.py
NumericalSensors.correlations
def correlations(self): """ Calculate the correlation coefficients. """ corr_df = self._sensors_num_df.corr() corr_names = [] corrs = [] for i in range(len(corr_df.index)): for j in range(len(corr_df.index)): c_name = corr_df.index[i] r_name = corr_df.columns[j] corr_names.append("%s-%s" % (c_name, r_name)) corrs.append(corr_df.ix[i, j]) corrs_all = pd.DataFrame(index=corr_names) corrs_all["value"] = corrs corrs_all = corrs_all.dropna().drop( corrs_all[(corrs_all["value"] == float(1))].index ) corrs_all = corrs_all.drop(corrs_all[corrs_all["value"] == float(-1)].index) corrs_all = corrs_all.sort_values("value", ascending=False) corrs_all = corrs_all.drop_duplicates() return corrs_all
python
def correlations(self): """ Calculate the correlation coefficients. """ corr_df = self._sensors_num_df.corr() corr_names = [] corrs = [] for i in range(len(corr_df.index)): for j in range(len(corr_df.index)): c_name = corr_df.index[i] r_name = corr_df.columns[j] corr_names.append("%s-%s" % (c_name, r_name)) corrs.append(corr_df.ix[i, j]) corrs_all = pd.DataFrame(index=corr_names) corrs_all["value"] = corrs corrs_all = corrs_all.dropna().drop( corrs_all[(corrs_all["value"] == float(1))].index ) corrs_all = corrs_all.drop(corrs_all[corrs_all["value"] == float(-1)].index) corrs_all = corrs_all.sort_values("value", ascending=False) corrs_all = corrs_all.drop_duplicates() return corrs_all
[ "def", "correlations", "(", "self", ")", ":", "corr_df", "=", "self", ".", "_sensors_num_df", ".", "corr", "(", ")", "corr_names", "=", "[", "]", "corrs", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "corr_df", ".", "index", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "corr_df", ".", "index", ")", ")", ":", "c_name", "=", "corr_df", ".", "index", "[", "i", "]", "r_name", "=", "corr_df", ".", "columns", "[", "j", "]", "corr_names", ".", "append", "(", "\"%s-%s\"", "%", "(", "c_name", ",", "r_name", ")", ")", "corrs", ".", "append", "(", "corr_df", ".", "ix", "[", "i", ",", "j", "]", ")", "corrs_all", "=", "pd", ".", "DataFrame", "(", "index", "=", "corr_names", ")", "corrs_all", "[", "\"value\"", "]", "=", "corrs", "corrs_all", "=", "corrs_all", ".", "dropna", "(", ")", ".", "drop", "(", "corrs_all", "[", "(", "corrs_all", "[", "\"value\"", "]", "==", "float", "(", "1", ")", ")", "]", ".", "index", ")", "corrs_all", "=", "corrs_all", ".", "drop", "(", "corrs_all", "[", "corrs_all", "[", "\"value\"", "]", "==", "float", "(", "-", "1", ")", "]", ".", "index", ")", "corrs_all", "=", "corrs_all", ".", "sort_values", "(", "\"value\"", ",", "ascending", "=", "False", ")", "corrs_all", "=", "corrs_all", ".", "drop_duplicates", "(", ")", "return", "corrs_all" ]
Calculate the correlation coefficients.
[ "Calculate", "the", "correlation", "coefficients", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L248-L272
7,912
robmarkcole/HASS-data-detective
detective/core.py
NumericalSensors.plot
def plot(self, entities: List[str]): """ Basic plot of a numerical sensor data. Parameters ---------- entities : a list of entities """ ax = self._sensors_num_df[entities].plot(figsize=[12, 6]) ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Date") ax.set_ylabel("Reading") return
python
def plot(self, entities: List[str]): """ Basic plot of a numerical sensor data. Parameters ---------- entities : a list of entities """ ax = self._sensors_num_df[entities].plot(figsize=[12, 6]) ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Date") ax.set_ylabel("Reading") return
[ "def", "plot", "(", "self", ",", "entities", ":", "List", "[", "str", "]", ")", ":", "ax", "=", "self", ".", "_sensors_num_df", "[", "entities", "]", ".", "plot", "(", "figsize", "=", "[", "12", ",", "6", "]", ")", "ax", ".", "legend", "(", "loc", "=", "\"center left\"", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ")", "ax", ".", "set_xlabel", "(", "\"Date\"", ")", "ax", ".", "set_ylabel", "(", "\"Reading\"", ")", "return" ]
Basic plot of a numerical sensor data. Parameters ---------- entities : a list of entities
[ "Basic", "plot", "of", "a", "numerical", "sensor", "data", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L292-L305
7,913
robmarkcole/HASS-data-detective
detective/core.py
BinarySensors.plot
def plot(self, entity): """ Basic plot of a single binary sensor data. Parameters ---------- entity : string The entity to plot """ df = self._binary_df[[entity]] resampled = df.resample("s").ffill() # Sample at seconds and ffill resampled.columns = ["value"] fig, ax = plt.subplots(1, 1, figsize=(16, 2)) ax.fill_between(resampled.index, y1=0, y2=1, facecolor="royalblue", label="off") ax.fill_between( resampled.index, y1=0, y2=1, where=(resampled["value"] > 0), facecolor="red", label="on", ) ax.set_title(entity) ax.set_xlabel("Date") ax.set_frame_on(False) ax.set_yticks([]) plt.legend(loc=(1.01, 0.7)) plt.show() return
python
def plot(self, entity): """ Basic plot of a single binary sensor data. Parameters ---------- entity : string The entity to plot """ df = self._binary_df[[entity]] resampled = df.resample("s").ffill() # Sample at seconds and ffill resampled.columns = ["value"] fig, ax = plt.subplots(1, 1, figsize=(16, 2)) ax.fill_between(resampled.index, y1=0, y2=1, facecolor="royalblue", label="off") ax.fill_between( resampled.index, y1=0, y2=1, where=(resampled["value"] > 0), facecolor="red", label="on", ) ax.set_title(entity) ax.set_xlabel("Date") ax.set_frame_on(False) ax.set_yticks([]) plt.legend(loc=(1.01, 0.7)) plt.show() return
[ "def", "plot", "(", "self", ",", "entity", ")", ":", "df", "=", "self", ".", "_binary_df", "[", "[", "entity", "]", "]", "resampled", "=", "df", ".", "resample", "(", "\"s\"", ")", ".", "ffill", "(", ")", "# Sample at seconds and ffill", "resampled", ".", "columns", "=", "[", "\"value\"", "]", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "(", "16", ",", "2", ")", ")", "ax", ".", "fill_between", "(", "resampled", ".", "index", ",", "y1", "=", "0", ",", "y2", "=", "1", ",", "facecolor", "=", "\"royalblue\"", ",", "label", "=", "\"off\"", ")", "ax", ".", "fill_between", "(", "resampled", ".", "index", ",", "y1", "=", "0", ",", "y2", "=", "1", ",", "where", "=", "(", "resampled", "[", "\"value\"", "]", ">", "0", ")", ",", "facecolor", "=", "\"red\"", ",", "label", "=", "\"on\"", ",", ")", "ax", ".", "set_title", "(", "entity", ")", "ax", ".", "set_xlabel", "(", "\"Date\"", ")", "ax", ".", "set_frame_on", "(", "False", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "plt", ".", "legend", "(", "loc", "=", "(", "1.01", ",", "0.7", ")", ")", "plt", ".", "show", "(", ")", "return" ]
Basic plot of a single binary sensor data. Parameters ---------- entity : string The entity to plot
[ "Basic", "plot", "of", "a", "single", "binary", "sensor", "data", "." ]
f67dfde9dd63a3af411944d1857b0835632617c5
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L353-L381
7,914
django-salesforce/django-salesforce
salesforce/router.py
is_sf_database
def is_sf_database(db, model=None): """The alias is a Salesforce database.""" from django.db import connections if db is None: return getattr(model, '_salesforce_object', False) engine = connections[db].settings_dict['ENGINE'] return engine == 'salesforce.backend' or connections[db].vendor == 'salesforce'
python
def is_sf_database(db, model=None): """The alias is a Salesforce database.""" from django.db import connections if db is None: return getattr(model, '_salesforce_object', False) engine = connections[db].settings_dict['ENGINE'] return engine == 'salesforce.backend' or connections[db].vendor == 'salesforce'
[ "def", "is_sf_database", "(", "db", ",", "model", "=", "None", ")", ":", "from", "django", ".", "db", "import", "connections", "if", "db", "is", "None", ":", "return", "getattr", "(", "model", ",", "'_salesforce_object'", ",", "False", ")", "engine", "=", "connections", "[", "db", "]", ".", "settings_dict", "[", "'ENGINE'", "]", "return", "engine", "==", "'salesforce.backend'", "or", "connections", "[", "db", "]", ".", "vendor", "==", "'salesforce'" ]
The alias is a Salesforce database.
[ "The", "alias", "is", "a", "Salesforce", "database", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/router.py#L16-L22
7,915
django-salesforce/django-salesforce
salesforce/router.py
ModelRouter.allow_migrate
def allow_migrate(self, db, app_label, model_name=None, **hints): """ Don't attempt to sync SF models to non SF databases and vice versa. """ if model_name: model = apps.get_model(app_label, model_name) else: # hints are used with less priority, because many hints are dynamic # models made by migrations on a '__fake__' module which are not # SalesforceModels model = hints.get('model') if hasattr(model, '_salesforce_object'): # SF models can be migrated if SALESFORCE_DB_ALIAS is e.g. # a sqlite3 database or any non-SF database. if not (is_sf_database(db) or db == self.sf_alias): return False else: if is_sf_database(db) or self.sf_alias != 'default' and db == self.sf_alias: return False # TODO: It is usual that "migrate" is currently disallowed for SF. # In the future it can be implemented to do a deep check by # introspection of compatibily between Django models and SF database. if hasattr(model, '_salesforce_object'): # return False pass
python
def allow_migrate(self, db, app_label, model_name=None, **hints): """ Don't attempt to sync SF models to non SF databases and vice versa. """ if model_name: model = apps.get_model(app_label, model_name) else: # hints are used with less priority, because many hints are dynamic # models made by migrations on a '__fake__' module which are not # SalesforceModels model = hints.get('model') if hasattr(model, '_salesforce_object'): # SF models can be migrated if SALESFORCE_DB_ALIAS is e.g. # a sqlite3 database or any non-SF database. if not (is_sf_database(db) or db == self.sf_alias): return False else: if is_sf_database(db) or self.sf_alias != 'default' and db == self.sf_alias: return False # TODO: It is usual that "migrate" is currently disallowed for SF. # In the future it can be implemented to do a deep check by # introspection of compatibily between Django models and SF database. if hasattr(model, '_salesforce_object'): # return False pass
[ "def", "allow_migrate", "(", "self", ",", "db", ",", "app_label", ",", "model_name", "=", "None", ",", "*", "*", "hints", ")", ":", "if", "model_name", ":", "model", "=", "apps", ".", "get_model", "(", "app_label", ",", "model_name", ")", "else", ":", "# hints are used with less priority, because many hints are dynamic", "# models made by migrations on a '__fake__' module which are not", "# SalesforceModels", "model", "=", "hints", ".", "get", "(", "'model'", ")", "if", "hasattr", "(", "model", ",", "'_salesforce_object'", ")", ":", "# SF models can be migrated if SALESFORCE_DB_ALIAS is e.g.", "# a sqlite3 database or any non-SF database.", "if", "not", "(", "is_sf_database", "(", "db", ")", "or", "db", "==", "self", ".", "sf_alias", ")", ":", "return", "False", "else", ":", "if", "is_sf_database", "(", "db", ")", "or", "self", ".", "sf_alias", "!=", "'default'", "and", "db", "==", "self", ".", "sf_alias", ":", "return", "False", "# TODO: It is usual that \"migrate\" is currently disallowed for SF.", "# In the future it can be implemented to do a deep check by", "# introspection of compatibily between Django models and SF database.", "if", "hasattr", "(", "model", ",", "'_salesforce_object'", ")", ":", "# return False", "pass" ]
Don't attempt to sync SF models to non SF databases and vice versa.
[ "Don", "t", "attempt", "to", "sync", "SF", "models", "to", "non", "SF", "databases", "and", "vice", "versa", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/router.py#L60-L85
7,916
django-salesforce/django-salesforce
salesforce/backend/indep.py
LazyField.update
def update(self, **kwargs): """Customize the lazy field""" assert not self.called self.kw.update(kwargs) return self
python
def update(self, **kwargs): """Customize the lazy field""" assert not self.called self.kw.update(kwargs) return self
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "assert", "not", "self", ".", "called", "self", ".", "kw", ".", "update", "(", "kwargs", ")", "return", "self" ]
Customize the lazy field
[ "Customize", "the", "lazy", "field" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/indep.py#L32-L36
7,917
django-salesforce/django-salesforce
salesforce/backend/indep.py
LazyField.create
def create(self): """Create a normal field from the lazy field""" assert not self.called return self.klass(*self.args, **self.kw)
python
def create(self): """Create a normal field from the lazy field""" assert not self.called return self.klass(*self.args, **self.kw)
[ "def", "create", "(", "self", ")", ":", "assert", "not", "self", ".", "called", "return", "self", ".", "klass", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kw", ")" ]
Create a normal field from the lazy field
[ "Create", "a", "normal", "field", "from", "the", "lazy", "field" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/indep.py#L38-L41
7,918
django-salesforce/django-salesforce
salesforce/backend/manager.py
SalesforceManager.get_queryset
def get_queryset(self): """ Returns a QuerySet which access remote SF objects. """ if router.is_sf_database(self.db): q = models_sql_query.SalesforceQuery(self.model, where=compiler.SalesforceWhereNode) return query.SalesforceQuerySet(self.model, query=q, using=self.db) return super(SalesforceManager, self).get_queryset()
python
def get_queryset(self): """ Returns a QuerySet which access remote SF objects. """ if router.is_sf_database(self.db): q = models_sql_query.SalesforceQuery(self.model, where=compiler.SalesforceWhereNode) return query.SalesforceQuerySet(self.model, query=q, using=self.db) return super(SalesforceManager, self).get_queryset()
[ "def", "get_queryset", "(", "self", ")", ":", "if", "router", ".", "is_sf_database", "(", "self", ".", "db", ")", ":", "q", "=", "models_sql_query", ".", "SalesforceQuery", "(", "self", ".", "model", ",", "where", "=", "compiler", ".", "SalesforceWhereNode", ")", "return", "query", ".", "SalesforceQuerySet", "(", "self", ".", "model", ",", "query", "=", "q", ",", "using", "=", "self", ".", "db", ")", "return", "super", "(", "SalesforceManager", ",", "self", ")", ".", "get_queryset", "(", ")" ]
Returns a QuerySet which access remote SF objects.
[ "Returns", "a", "QuerySet", "which", "access", "remote", "SF", "objects", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/manager.py#L27-L34
7,919
django-salesforce/django-salesforce
salesforce/fields.py
SfField.get_attname_column
def get_attname_column(self): """ Get the database column name automatically in most cases. """ # See "A guide to Field parameters": django/db/models/fields/__init__.py # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. attname = self.get_attname() if self.db_column is not None: # explicit name column = self.db_column else: if not self.name.islower(): # a Salesforce style name e.g. 'LastName' or 'MyCustomField' column = self.name else: # a Django style name like 'last_name' or 'my_custom_field' column = self.name.title().replace('_', '') # Fix custom fields if self.sf_custom: column = self.sf_namespace + column + '__c' return attname, column
python
def get_attname_column(self): """ Get the database column name automatically in most cases. """ # See "A guide to Field parameters": django/db/models/fields/__init__.py # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. attname = self.get_attname() if self.db_column is not None: # explicit name column = self.db_column else: if not self.name.islower(): # a Salesforce style name e.g. 'LastName' or 'MyCustomField' column = self.name else: # a Django style name like 'last_name' or 'my_custom_field' column = self.name.title().replace('_', '') # Fix custom fields if self.sf_custom: column = self.sf_namespace + column + '__c' return attname, column
[ "def", "get_attname_column", "(", "self", ")", ":", "# See \"A guide to Field parameters\": django/db/models/fields/__init__.py", "# * attname: The attribute to use on the model object. This is the same as", "# \"name\", except in the case of ForeignKeys, where \"_id\" is", "# appended.", "# * column: The database column for this field. This is the same as", "# \"attname\", except if db_column is specified.", "attname", "=", "self", ".", "get_attname", "(", ")", "if", "self", ".", "db_column", "is", "not", "None", ":", "# explicit name", "column", "=", "self", ".", "db_column", "else", ":", "if", "not", "self", ".", "name", ".", "islower", "(", ")", ":", "# a Salesforce style name e.g. 'LastName' or 'MyCustomField'", "column", "=", "self", ".", "name", "else", ":", "# a Django style name like 'last_name' or 'my_custom_field'", "column", "=", "self", ".", "name", ".", "title", "(", ")", ".", "replace", "(", "'_'", ",", "''", ")", "# Fix custom fields", "if", "self", ".", "sf_custom", ":", "column", "=", "self", ".", "sf_namespace", "+", "column", "+", "'__c'", "return", "attname", ",", "column" ]
Get the database column name automatically in most cases.
[ "Get", "the", "database", "column", "name", "automatically", "in", "most", "cases", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/fields.py#L109-L133
7,920
django-salesforce/django-salesforce
salesforce/backend/utils.py
extract_values
def extract_values(query): """ Extract values from insert or update query. Supports bulk_create """ # pylint if isinstance(query, subqueries.UpdateQuery): row = query.values return extract_values_inner(row, query) if isinstance(query, subqueries.InsertQuery): ret = [] for row in query.objs: ret.append(extract_values_inner(row, query)) return ret raise NotSupportedError
python
def extract_values(query): """ Extract values from insert or update query. Supports bulk_create """ # pylint if isinstance(query, subqueries.UpdateQuery): row = query.values return extract_values_inner(row, query) if isinstance(query, subqueries.InsertQuery): ret = [] for row in query.objs: ret.append(extract_values_inner(row, query)) return ret raise NotSupportedError
[ "def", "extract_values", "(", "query", ")", ":", "# pylint", "if", "isinstance", "(", "query", ",", "subqueries", ".", "UpdateQuery", ")", ":", "row", "=", "query", ".", "values", "return", "extract_values_inner", "(", "row", ",", "query", ")", "if", "isinstance", "(", "query", ",", "subqueries", ".", "InsertQuery", ")", ":", "ret", "=", "[", "]", "for", "row", "in", "query", ".", "objs", ":", "ret", ".", "append", "(", "extract_values_inner", "(", "row", ",", "query", ")", ")", "return", "ret", "raise", "NotSupportedError" ]
Extract values from insert or update query. Supports bulk_create
[ "Extract", "values", "from", "insert", "or", "update", "query", ".", "Supports", "bulk_create" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L94-L108
7,921
django-salesforce/django-salesforce
salesforce/backend/utils.py
CursorWrapper.execute
def execute(self, q, args=()): """ Send a query to the Salesforce API. """ # pylint:disable=too-many-branches self.rowcount = None response = None if self.query is None: self.execute_select(q, args) else: response = self.execute_django(q, args) if isinstance(response, list): return # the encoding is detected automatically, e.g. from headers if response and response.text: # parse_float set to decimal.Decimal to avoid precision errors when # converting from the json number to a float and then to a Decimal object # on a model's DecimalField. This converts from json number directly # to a Decimal object data = response.json(parse_float=decimal.Decimal) # a SELECT query if 'totalSize' in data: # SELECT self.rowcount = data['totalSize'] # a successful INSERT query, return after getting PK elif('success' in data and 'id' in data): self.lastrowid = data['id'] return elif 'compositeResponse' in data: # TODO treat error reporting for composite requests self.lastrowid = [x['body']['id'] if x['body'] is not None else x['referenceId'] for x in data['compositeResponse']] return elif data['hasErrors'] is False: # it is from Composite Batch request # save id from bulk_create even if Django don't use it if data['results'] and data['results'][0]['result']: self.lastrowid = [item['result']['id'] for item in data['results']] return # something we don't recognize else: raise DatabaseError(data) if not q.upper().startswith('SELECT COUNT() FROM'): self.first_row = data['records'][0] if data['records'] else None
python
def execute(self, q, args=()): """ Send a query to the Salesforce API. """ # pylint:disable=too-many-branches self.rowcount = None response = None if self.query is None: self.execute_select(q, args) else: response = self.execute_django(q, args) if isinstance(response, list): return # the encoding is detected automatically, e.g. from headers if response and response.text: # parse_float set to decimal.Decimal to avoid precision errors when # converting from the json number to a float and then to a Decimal object # on a model's DecimalField. This converts from json number directly # to a Decimal object data = response.json(parse_float=decimal.Decimal) # a SELECT query if 'totalSize' in data: # SELECT self.rowcount = data['totalSize'] # a successful INSERT query, return after getting PK elif('success' in data and 'id' in data): self.lastrowid = data['id'] return elif 'compositeResponse' in data: # TODO treat error reporting for composite requests self.lastrowid = [x['body']['id'] if x['body'] is not None else x['referenceId'] for x in data['compositeResponse']] return elif data['hasErrors'] is False: # it is from Composite Batch request # save id from bulk_create even if Django don't use it if data['results'] and data['results'][0]['result']: self.lastrowid = [item['result']['id'] for item in data['results']] return # something we don't recognize else: raise DatabaseError(data) if not q.upper().startswith('SELECT COUNT() FROM'): self.first_row = data['records'][0] if data['records'] else None
[ "def", "execute", "(", "self", ",", "q", ",", "args", "=", "(", ")", ")", ":", "# pylint:disable=too-many-branches", "self", ".", "rowcount", "=", "None", "response", "=", "None", "if", "self", ".", "query", "is", "None", ":", "self", ".", "execute_select", "(", "q", ",", "args", ")", "else", ":", "response", "=", "self", ".", "execute_django", "(", "q", ",", "args", ")", "if", "isinstance", "(", "response", ",", "list", ")", ":", "return", "# the encoding is detected automatically, e.g. from headers", "if", "response", "and", "response", ".", "text", ":", "# parse_float set to decimal.Decimal to avoid precision errors when", "# converting from the json number to a float and then to a Decimal object", "# on a model's DecimalField. This converts from json number directly", "# to a Decimal object", "data", "=", "response", ".", "json", "(", "parse_float", "=", "decimal", ".", "Decimal", ")", "# a SELECT query", "if", "'totalSize'", "in", "data", ":", "# SELECT", "self", ".", "rowcount", "=", "data", "[", "'totalSize'", "]", "# a successful INSERT query, return after getting PK", "elif", "(", "'success'", "in", "data", "and", "'id'", "in", "data", ")", ":", "self", ".", "lastrowid", "=", "data", "[", "'id'", "]", "return", "elif", "'compositeResponse'", "in", "data", ":", "# TODO treat error reporting for composite requests", "self", ".", "lastrowid", "=", "[", "x", "[", "'body'", "]", "[", "'id'", "]", "if", "x", "[", "'body'", "]", "is", "not", "None", "else", "x", "[", "'referenceId'", "]", "for", "x", "in", "data", "[", "'compositeResponse'", "]", "]", "return", "elif", "data", "[", "'hasErrors'", "]", "is", "False", ":", "# it is from Composite Batch request", "# save id from bulk_create even if Django don't use it", "if", "data", "[", "'results'", "]", "and", "data", "[", "'results'", "]", "[", "0", "]", "[", "'result'", "]", ":", "self", ".", "lastrowid", "=", "[", "item", "[", "'result'", "]", "[", "'id'", "]", "for", "item", "in", "data", "[", "'results'", "]", "]", "return", "# something we don't recognize", "else", ":", "raise", "DatabaseError", "(", "data", ")", "if", "not", "q", ".", "upper", "(", ")", ".", "startswith", "(", "'SELECT COUNT() FROM'", ")", ":", "self", ".", "first_row", "=", "data", "[", "'records'", "]", "[", "0", "]", "if", "data", "[", "'records'", "]", "else", "None" ]
Send a query to the Salesforce API.
[ "Send", "a", "query", "to", "the", "Salesforce", "API", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L173-L218
7,922
django-salesforce/django-salesforce
salesforce/backend/utils.py
CursorWrapper.execute_django
def execute_django(self, soql, args=()): """ Fixed execute for queries coming from Django query compilers """ response = None sqltype = soql.split(None, 1)[0].upper() if isinstance(self.query, subqueries.InsertQuery): response = self.execute_insert(self.query) elif isinstance(self.query, subqueries.UpdateQuery): response = self.execute_update(self.query) elif isinstance(self.query, subqueries.DeleteQuery): response = self.execute_delete(self.query) elif isinstance(self.query, RawQuery): self.execute_select(soql, args) elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'): log.info("Ignored SQL command '%s'", sqltype) return elif isinstance(self.query, Query): self.execute_select(soql, args) else: raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query)) return response
python
def execute_django(self, soql, args=()): """ Fixed execute for queries coming from Django query compilers """ response = None sqltype = soql.split(None, 1)[0].upper() if isinstance(self.query, subqueries.InsertQuery): response = self.execute_insert(self.query) elif isinstance(self.query, subqueries.UpdateQuery): response = self.execute_update(self.query) elif isinstance(self.query, subqueries.DeleteQuery): response = self.execute_delete(self.query) elif isinstance(self.query, RawQuery): self.execute_select(soql, args) elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'): log.info("Ignored SQL command '%s'", sqltype) return elif isinstance(self.query, Query): self.execute_select(soql, args) else: raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query)) return response
[ "def", "execute_django", "(", "self", ",", "soql", ",", "args", "=", "(", ")", ")", ":", "response", "=", "None", "sqltype", "=", "soql", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", ".", "upper", "(", ")", "if", "isinstance", "(", "self", ".", "query", ",", "subqueries", ".", "InsertQuery", ")", ":", "response", "=", "self", ".", "execute_insert", "(", "self", ".", "query", ")", "elif", "isinstance", "(", "self", ".", "query", ",", "subqueries", ".", "UpdateQuery", ")", ":", "response", "=", "self", ".", "execute_update", "(", "self", ".", "query", ")", "elif", "isinstance", "(", "self", ".", "query", ",", "subqueries", ".", "DeleteQuery", ")", ":", "response", "=", "self", ".", "execute_delete", "(", "self", ".", "query", ")", "elif", "isinstance", "(", "self", ".", "query", ",", "RawQuery", ")", ":", "self", ".", "execute_select", "(", "soql", ",", "args", ")", "elif", "sqltype", "in", "(", "'SAVEPOINT'", ",", "'ROLLBACK'", ",", "'RELEASE'", ")", ":", "log", ".", "info", "(", "\"Ignored SQL command '%s'\"", ",", "sqltype", ")", "return", "elif", "isinstance", "(", "self", ".", "query", ",", "Query", ")", ":", "self", ".", "execute_select", "(", "soql", ",", "args", ")", "else", ":", "raise", "DatabaseError", "(", "\"Unsupported query: type %s: %s\"", "%", "(", "type", "(", "self", ".", "query", ")", ",", "self", ".", "query", ")", ")", "return", "response" ]
Fixed execute for queries coming from Django query compilers
[ "Fixed", "execute", "for", "queries", "coming", "from", "Django", "query", "compilers" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L223-L244
7,923
django-salesforce/django-salesforce
salesforce/backend/utils.py
CursorWrapper.get_pks_from_query
def get_pks_from_query(self, query): """Prepare primary keys for update and delete queries""" where = query.where sql = None if where.connector == 'AND' and not where.negated and len(where.children) == 1: # simple cases are optimized, especially because a suboptimal # nested query based on the same table is not allowed by SF child = where.children[0] if (child.lookup_name in ('exact', 'in') and child.lhs.target.column == 'Id' and not child.bilateral_transforms and child.lhs.target.model is self.query.model): pks = child.rhs if child.lookup_name == 'exact': assert isinstance(pks, text_type) return [pks] # lookup_name 'in' assert not child.bilateral_transforms if isinstance(pks, (tuple, list)): return pks if DJANGO_111_PLUS: assert isinstance(pks, Query) and type(pks).__name__ == 'SalesforceQuery' # # alternative solution: # return list(salesforce.backend.query.SalesforceQuerySet(pk.model, query=pk, using=pk._db)) sql, params = pks.get_compiler('salesforce').as_sql() else: assert isinstance(pks, salesforce.backend.query.SalesforceQuerySet) return [x.pk for x in pks] if not sql: # a subquery is necessary in this case where_sql, params = where.as_sql(query.get_compiler('salesforce'), self.db.connection) sql = "SELECT Id FROM {} WHERE {}".format(query.model._meta.db_table, where_sql) with self.db.cursor() as cur: cur.execute(sql, params) assert len(cur.description) == 1 and cur.description[0][0] == 'Id' return [x[0] for x in cur]
python
def get_pks_from_query(self, query): """Prepare primary keys for update and delete queries""" where = query.where sql = None if where.connector == 'AND' and not where.negated and len(where.children) == 1: # simple cases are optimized, especially because a suboptimal # nested query based on the same table is not allowed by SF child = where.children[0] if (child.lookup_name in ('exact', 'in') and child.lhs.target.column == 'Id' and not child.bilateral_transforms and child.lhs.target.model is self.query.model): pks = child.rhs if child.lookup_name == 'exact': assert isinstance(pks, text_type) return [pks] # lookup_name 'in' assert not child.bilateral_transforms if isinstance(pks, (tuple, list)): return pks if DJANGO_111_PLUS: assert isinstance(pks, Query) and type(pks).__name__ == 'SalesforceQuery' # # alternative solution: # return list(salesforce.backend.query.SalesforceQuerySet(pk.model, query=pk, using=pk._db)) sql, params = pks.get_compiler('salesforce').as_sql() else: assert isinstance(pks, salesforce.backend.query.SalesforceQuerySet) return [x.pk for x in pks] if not sql: # a subquery is necessary in this case where_sql, params = where.as_sql(query.get_compiler('salesforce'), self.db.connection) sql = "SELECT Id FROM {} WHERE {}".format(query.model._meta.db_table, where_sql) with self.db.cursor() as cur: cur.execute(sql, params) assert len(cur.description) == 1 and cur.description[0][0] == 'Id' return [x[0] for x in cur]
[ "def", "get_pks_from_query", "(", "self", ",", "query", ")", ":", "where", "=", "query", ".", "where", "sql", "=", "None", "if", "where", ".", "connector", "==", "'AND'", "and", "not", "where", ".", "negated", "and", "len", "(", "where", ".", "children", ")", "==", "1", ":", "# simple cases are optimized, especially because a suboptimal", "# nested query based on the same table is not allowed by SF", "child", "=", "where", ".", "children", "[", "0", "]", "if", "(", "child", ".", "lookup_name", "in", "(", "'exact'", ",", "'in'", ")", "and", "child", ".", "lhs", ".", "target", ".", "column", "==", "'Id'", "and", "not", "child", ".", "bilateral_transforms", "and", "child", ".", "lhs", ".", "target", ".", "model", "is", "self", ".", "query", ".", "model", ")", ":", "pks", "=", "child", ".", "rhs", "if", "child", ".", "lookup_name", "==", "'exact'", ":", "assert", "isinstance", "(", "pks", ",", "text_type", ")", "return", "[", "pks", "]", "# lookup_name 'in'", "assert", "not", "child", ".", "bilateral_transforms", "if", "isinstance", "(", "pks", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "pks", "if", "DJANGO_111_PLUS", ":", "assert", "isinstance", "(", "pks", ",", "Query", ")", "and", "type", "(", "pks", ")", ".", "__name__", "==", "'SalesforceQuery'", "# # alternative solution:", "# return list(salesforce.backend.query.SalesforceQuerySet(pk.model, query=pk, using=pk._db))", "sql", ",", "params", "=", "pks", ".", "get_compiler", "(", "'salesforce'", ")", ".", "as_sql", "(", ")", "else", ":", "assert", "isinstance", "(", "pks", ",", "salesforce", ".", "backend", ".", "query", ".", "SalesforceQuerySet", ")", "return", "[", "x", ".", "pk", "for", "x", "in", "pks", "]", "if", "not", "sql", ":", "# a subquery is necessary in this case", "where_sql", ",", "params", "=", "where", ".", "as_sql", "(", "query", ".", "get_compiler", "(", "'salesforce'", ")", ",", "self", ".", "db", ".", "connection", ")", "sql", "=", "\"SELECT Id FROM {} WHERE {}\"", ".", "format", "(", "query", ".", "model", ".", "_meta", ".", "db_table", ",", "where_sql", ")", "with", "self", ".", "db", ".", "cursor", "(", ")", "as", "cur", ":", "cur", ".", "execute", "(", "sql", ",", "params", ")", "assert", "len", "(", "cur", ".", "description", ")", "==", "1", "and", "cur", ".", "description", "[", "0", "]", "[", "0", "]", "==", "'Id'", "return", "[", "x", "[", "0", "]", "for", "x", "in", "cur", "]" ]
Prepare primary keys for update and delete queries
[ "Prepare", "primary", "keys", "for", "update", "and", "delete", "queries" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L286-L320
7,924
django-salesforce/django-salesforce
salesforce/backend/utils.py
CursorWrapper.versions_request
def versions_request(self): """List Available REST API Versions""" ret = self.handle_api_exceptions('GET', '', api_ver='') return [str_dict(x) for x in ret.json()]
python
def versions_request(self): """List Available REST API Versions""" ret = self.handle_api_exceptions('GET', '', api_ver='') return [str_dict(x) for x in ret.json()]
[ "def", "versions_request", "(", "self", ")", ":", "ret", "=", "self", ".", "handle_api_exceptions", "(", "'GET'", ",", "''", ",", "api_ver", "=", "''", ")", "return", "[", "str_dict", "(", "x", ")", "for", "x", "in", "ret", ".", "json", "(", ")", "]" ]
List Available REST API Versions
[ "List", "Available", "REST", "API", "Versions" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L401-L404
7,925
django-salesforce/django-salesforce
salesforce/management/commands/inspectdb.py
fix_international
def fix_international(text): "Fix excaped international characters back to utf-8" class SmartInternational(str): def __new__(cls, text): return str.__new__(cls, text) def endswith(self, string): return super(SmartInternational, self).endswith(str(string)) if PY3: return text out = [] last = 0 for match in re.finditer(r'(?<=[^\\])(?:\\x[0-9a-f]{2}|\\u[0-9a-f]{4})', text): start, end, group = match.start(), match.end(), match.group() out.append(text[last:start]) c = group.decode('unicode_escape') out.append(c if ord(c) > 160 and ord(c) != 173 else group) last = end out.append(text[last:]) return SmartInternational(''.join(out).encode('utf-8'))
python
def fix_international(text): "Fix excaped international characters back to utf-8" class SmartInternational(str): def __new__(cls, text): return str.__new__(cls, text) def endswith(self, string): return super(SmartInternational, self).endswith(str(string)) if PY3: return text out = [] last = 0 for match in re.finditer(r'(?<=[^\\])(?:\\x[0-9a-f]{2}|\\u[0-9a-f]{4})', text): start, end, group = match.start(), match.end(), match.group() out.append(text[last:start]) c = group.decode('unicode_escape') out.append(c if ord(c) > 160 and ord(c) != 173 else group) last = end out.append(text[last:]) return SmartInternational(''.join(out).encode('utf-8'))
[ "def", "fix_international", "(", "text", ")", ":", "class", "SmartInternational", "(", "str", ")", ":", "def", "__new__", "(", "cls", ",", "text", ")", ":", "return", "str", ".", "__new__", "(", "cls", ",", "text", ")", "def", "endswith", "(", "self", ",", "string", ")", ":", "return", "super", "(", "SmartInternational", ",", "self", ")", ".", "endswith", "(", "str", "(", "string", ")", ")", "if", "PY3", ":", "return", "text", "out", "=", "[", "]", "last", "=", "0", "for", "match", "in", "re", ".", "finditer", "(", "r'(?<=[^\\\\])(?:\\\\x[0-9a-f]{2}|\\\\u[0-9a-f]{4})'", ",", "text", ")", ":", "start", ",", "end", ",", "group", "=", "match", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", ",", "match", ".", "group", "(", ")", "out", ".", "append", "(", "text", "[", "last", ":", "start", "]", ")", "c", "=", "group", ".", "decode", "(", "'unicode_escape'", ")", "out", ".", "append", "(", "c", "if", "ord", "(", "c", ")", ">", "160", "and", "ord", "(", "c", ")", "!=", "173", "else", "group", ")", "last", "=", "end", "out", ".", "append", "(", "text", "[", "last", ":", "]", ")", "return", "SmartInternational", "(", "''", ".", "join", "(", "out", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
Fix excaped international characters back to utf-8
[ "Fix", "excaped", "international", "characters", "back", "to", "utf", "-", "8" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/management/commands/inspectdb.py#L46-L65
7,926
django-salesforce/django-salesforce
salesforce/management/commands/inspectdb.py
Command.get_meta
def get_meta(self, table_name, constraints=None, column_to_field_name=None, is_view=False, is_partition=None): """ Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name. """ # pylint:disable=arguments-differ,too-many-arguments,unused-argument meta = [" class Meta(models.Model.Meta):", " db_table = '%s'" % table_name] if self.connection.vendor == 'salesforce': for line in self.connection.introspection.get_additional_meta(table_name): meta.append(" " + line) meta.append("") return meta
python
def get_meta(self, table_name, constraints=None, column_to_field_name=None, is_view=False, is_partition=None): """ Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name. """ # pylint:disable=arguments-differ,too-many-arguments,unused-argument meta = [" class Meta(models.Model.Meta):", " db_table = '%s'" % table_name] if self.connection.vendor == 'salesforce': for line in self.connection.introspection.get_additional_meta(table_name): meta.append(" " + line) meta.append("") return meta
[ "def", "get_meta", "(", "self", ",", "table_name", ",", "constraints", "=", "None", ",", "column_to_field_name", "=", "None", ",", "is_view", "=", "False", ",", "is_partition", "=", "None", ")", ":", "# pylint:disable=arguments-differ,too-many-arguments,unused-argument", "meta", "=", "[", "\" class Meta(models.Model.Meta):\"", ",", "\" db_table = '%s'\"", "%", "table_name", "]", "if", "self", ".", "connection", ".", "vendor", "==", "'salesforce'", ":", "for", "line", "in", "self", ".", "connection", ".", "introspection", ".", "get_additional_meta", "(", "table_name", ")", ":", "meta", ".", "append", "(", "\" \"", "+", "line", ")", "meta", ".", "append", "(", "\"\"", ")", "return", "meta" ]
Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name.
[ "Return", "a", "sequence", "comprising", "the", "lines", "of", "code", "necessary", "to", "construct", "the", "inner", "Meta", "class", "for", "the", "model", "corresponding", "to", "the", "given", "database", "table", "name", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/management/commands/inspectdb.py#L141-L154
7,927
django-salesforce/django-salesforce
setup.py
relative_path
def relative_path(path): """ Return the given path relative to this file. """ return os.path.join(os.path.dirname(__file__), path)
python
def relative_path(path): """ Return the given path relative to this file. """ return os.path.join(os.path.dirname(__file__), path)
[ "def", "relative_path", "(", "path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "path", ")" ]
Return the given path relative to this file.
[ "Return", "the", "given", "path", "relative", "to", "this", "file", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/setup.py#L16-L20
7,928
django-salesforce/django-salesforce
setup.py
get_tagged_version
def get_tagged_version(): """ Determine the current version of this package. Precise long version numbers are used if the Git repository is found. They contain: the Git tag, the commit serial and a short commit id. otherwise a short version number is used if installed from Pypi. """ with open(relative_path('salesforce/__init__.py'), 'r') as fd: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) return version
python
def get_tagged_version(): """ Determine the current version of this package. Precise long version numbers are used if the Git repository is found. They contain: the Git tag, the commit serial and a short commit id. otherwise a short version number is used if installed from Pypi. """ with open(relative_path('salesforce/__init__.py'), 'r') as fd: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) return version
[ "def", "get_tagged_version", "(", ")", ":", "with", "open", "(", "relative_path", "(", "'salesforce/__init__.py'", ")", ",", "'r'", ")", "as", "fd", ":", "version", "=", "re", ".", "search", "(", "r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'", ",", "fd", ".", "read", "(", ")", ",", "re", ".", "MULTILINE", ")", ".", "group", "(", "1", ")", "return", "version" ]
Determine the current version of this package. Precise long version numbers are used if the Git repository is found. They contain: the Git tag, the commit serial and a short commit id. otherwise a short version number is used if installed from Pypi.
[ "Determine", "the", "current", "version", "of", "this", "package", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/setup.py#L23-L34
7,929
django-salesforce/django-salesforce
salesforce/auth.py
SalesforceAuth.dynamic_start
def dynamic_start(self, access_token, instance_url=None, **kw): """ Set the access token dynamically according to the current user. More parameters can be set. """ self.dynamic = {'access_token': str(access_token), 'instance_url': str(instance_url)} self.dynamic.update(kw)
python
def dynamic_start(self, access_token, instance_url=None, **kw): """ Set the access token dynamically according to the current user. More parameters can be set. """ self.dynamic = {'access_token': str(access_token), 'instance_url': str(instance_url)} self.dynamic.update(kw)
[ "def", "dynamic_start", "(", "self", ",", "access_token", ",", "instance_url", "=", "None", ",", "*", "*", "kw", ")", ":", "self", ".", "dynamic", "=", "{", "'access_token'", ":", "str", "(", "access_token", ")", ",", "'instance_url'", ":", "str", "(", "instance_url", ")", "}", "self", ".", "dynamic", ".", "update", "(", "kw", ")" ]
Set the access token dynamically according to the current user. More parameters can be set.
[ "Set", "the", "access", "token", "dynamically", "according", "to", "the", "current", "user", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/auth.py#L144-L151
7,930
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
mark_quoted_strings
def mark_quoted_strings(sql): """Mark all quoted strings in the SOQL by '@' and get them as params, with respect to all escaped backslashes and quotes. """ # pattern of a string parameter (pm), a char escaped by backslash (bs) # out_pattern: characters valid in SOQL pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'") bs_pattern = re.compile(r"\\([\\'])") out_pattern = re.compile(r"^(?:[-!()*+,.:<=>\w\s|%s])*$") missing_apostrophe = "invalid character in SOQL or a missing apostrophe" start = 0 out = [] params = [] for match in pm_pattern.finditer(sql): out.append(sql[start:match.start()]) assert out_pattern.match(sql[start:match.start()]), missing_apostrophe params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() - 1])) start = match.end() out.append(sql[start:]) assert out_pattern.match(sql[start:]), missing_apostrophe return '@'.join(out), params
python
def mark_quoted_strings(sql): """Mark all quoted strings in the SOQL by '@' and get them as params, with respect to all escaped backslashes and quotes. """ # pattern of a string parameter (pm), a char escaped by backslash (bs) # out_pattern: characters valid in SOQL pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'") bs_pattern = re.compile(r"\\([\\'])") out_pattern = re.compile(r"^(?:[-!()*+,.:<=>\w\s|%s])*$") missing_apostrophe = "invalid character in SOQL or a missing apostrophe" start = 0 out = [] params = [] for match in pm_pattern.finditer(sql): out.append(sql[start:match.start()]) assert out_pattern.match(sql[start:match.start()]), missing_apostrophe params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() - 1])) start = match.end() out.append(sql[start:]) assert out_pattern.match(sql[start:]), missing_apostrophe return '@'.join(out), params
[ "def", "mark_quoted_strings", "(", "sql", ")", ":", "# pattern of a string parameter (pm), a char escaped by backslash (bs)", "# out_pattern: characters valid in SOQL", "pm_pattern", "=", "re", ".", "compile", "(", "r\"'[^\\\\']*(?:\\\\[\\\\'][^\\\\']*)*'\"", ")", "bs_pattern", "=", "re", ".", "compile", "(", "r\"\\\\([\\\\'])\"", ")", "out_pattern", "=", "re", ".", "compile", "(", "r\"^(?:[-!()*+,.:<=>\\w\\s|%s])*$\"", ")", "missing_apostrophe", "=", "\"invalid character in SOQL or a missing apostrophe\"", "start", "=", "0", "out", "=", "[", "]", "params", "=", "[", "]", "for", "match", "in", "pm_pattern", ".", "finditer", "(", "sql", ")", ":", "out", ".", "append", "(", "sql", "[", "start", ":", "match", ".", "start", "(", ")", "]", ")", "assert", "out_pattern", ".", "match", "(", "sql", "[", "start", ":", "match", ".", "start", "(", ")", "]", ")", ",", "missing_apostrophe", "params", ".", "append", "(", "bs_pattern", ".", "sub", "(", "'\\\\1'", ",", "sql", "[", "match", ".", "start", "(", ")", "+", "1", ":", "match", ".", "end", "(", ")", "-", "1", "]", ")", ")", "start", "=", "match", ".", "end", "(", ")", "out", ".", "append", "(", "sql", "[", "start", ":", "]", ")", "assert", "out_pattern", ".", "match", "(", "sql", "[", "start", ":", "]", ")", ",", "missing_apostrophe", "return", "'@'", ".", "join", "(", "out", ")", ",", "params" ]
Mark all quoted strings in the SOQL by '@' and get them as params, with respect to all escaped backslashes and quotes.
[ "Mark", "all", "quoted", "strings", "in", "the", "SOQL", "by" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L194-L214
7,931
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
subst_quoted_strings
def subst_quoted_strings(sql, params): """Reverse operation to mark_quoted_strings - substitutes '@' by params. """ parts = sql.split('@') params_dont_match = "number of parameters doesn' match the transformed query" assert len(parts) == len(params) + 1, params_dont_match # would be internal error out = [] for i, param in enumerate(params): out.append(parts[i]) out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'")) out.append(parts[-1]) return ''.join(out)
python
def subst_quoted_strings(sql, params): """Reverse operation to mark_quoted_strings - substitutes '@' by params. """ parts = sql.split('@') params_dont_match = "number of parameters doesn' match the transformed query" assert len(parts) == len(params) + 1, params_dont_match # would be internal error out = [] for i, param in enumerate(params): out.append(parts[i]) out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'")) out.append(parts[-1]) return ''.join(out)
[ "def", "subst_quoted_strings", "(", "sql", ",", "params", ")", ":", "parts", "=", "sql", ".", "split", "(", "'@'", ")", "params_dont_match", "=", "\"number of parameters doesn' match the transformed query\"", "assert", "len", "(", "parts", ")", "==", "len", "(", "params", ")", "+", "1", ",", "params_dont_match", "# would be internal error", "out", "=", "[", "]", "for", "i", ",", "param", "in", "enumerate", "(", "params", ")", ":", "out", ".", "append", "(", "parts", "[", "i", "]", ")", "out", ".", "append", "(", "\"'%s'\"", "%", "param", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "\"\\'\"", ",", "\"\\\\\\'\"", ")", ")", "out", ".", "append", "(", "parts", "[", "-", "1", "]", ")", "return", "''", ".", "join", "(", "out", ")" ]
Reverse operation to mark_quoted_strings - substitutes '@' by params.
[ "Reverse", "operation", "to", "mark_quoted_strings", "-", "substitutes" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L217-L228
7,932
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
find_closing_parenthesis
def find_closing_parenthesis(sql, startpos): """Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None. """ pattern = re.compile(r'[()]') level = 0 opening = [] for match in pattern.finditer(sql, startpos): par = match.group() if par == '(': if level == 0: opening = match.start() level += 1 if par == ')': assert level > 0, "missing '(' before ')'" level -= 1 if level == 0: closing = match.end() return opening, closing
python
def find_closing_parenthesis(sql, startpos): """Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None. """ pattern = re.compile(r'[()]') level = 0 opening = [] for match in pattern.finditer(sql, startpos): par = match.group() if par == '(': if level == 0: opening = match.start() level += 1 if par == ')': assert level > 0, "missing '(' before ')'" level -= 1 if level == 0: closing = match.end() return opening, closing
[ "def", "find_closing_parenthesis", "(", "sql", ",", "startpos", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'[()]'", ")", "level", "=", "0", "opening", "=", "[", "]", "for", "match", "in", "pattern", ".", "finditer", "(", "sql", ",", "startpos", ")", ":", "par", "=", "match", ".", "group", "(", ")", "if", "par", "==", "'('", ":", "if", "level", "==", "0", ":", "opening", "=", "match", ".", "start", "(", ")", "level", "+=", "1", "if", "par", "==", "')'", ":", "assert", "level", ">", "0", ",", "\"missing '(' before ')'\"", "level", "-=", "1", "if", "level", "==", "0", ":", "closing", "=", "match", ".", "end", "(", ")", "return", "opening", ",", "closing" ]
Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
[ "Find", "the", "pair", "of", "opening", "and", "closing", "parentheses", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L231-L251
7,933
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
split_subquery
def split_subquery(sql): """Split on subqueries and replace them by '&'.""" sql, params = mark_quoted_strings(sql) sql = simplify_expression(sql) _ = params # NOQA start = 0 out = [] subqueries = [] pattern = re.compile(r'\(SELECT\b', re.I) match = pattern.search(sql, start) while match: out.append(sql[start:match.start() + 1] + '&') start, pos = find_closing_parenthesis(sql, match.start()) start, pos = start + 1, pos - 1 subqueries.append(split_subquery(sql[start:pos])) start = pos match = pattern.search(sql, start) out.append(sql[start:len(sql)]) return ''.join(out), subqueries
python
def split_subquery(sql): """Split on subqueries and replace them by '&'.""" sql, params = mark_quoted_strings(sql) sql = simplify_expression(sql) _ = params # NOQA start = 0 out = [] subqueries = [] pattern = re.compile(r'\(SELECT\b', re.I) match = pattern.search(sql, start) while match: out.append(sql[start:match.start() + 1] + '&') start, pos = find_closing_parenthesis(sql, match.start()) start, pos = start + 1, pos - 1 subqueries.append(split_subquery(sql[start:pos])) start = pos match = pattern.search(sql, start) out.append(sql[start:len(sql)]) return ''.join(out), subqueries
[ "def", "split_subquery", "(", "sql", ")", ":", "sql", ",", "params", "=", "mark_quoted_strings", "(", "sql", ")", "sql", "=", "simplify_expression", "(", "sql", ")", "_", "=", "params", "# NOQA", "start", "=", "0", "out", "=", "[", "]", "subqueries", "=", "[", "]", "pattern", "=", "re", ".", "compile", "(", "r'\\(SELECT\\b'", ",", "re", ".", "I", ")", "match", "=", "pattern", ".", "search", "(", "sql", ",", "start", ")", "while", "match", ":", "out", ".", "append", "(", "sql", "[", "start", ":", "match", ".", "start", "(", ")", "+", "1", "]", "+", "'&'", ")", "start", ",", "pos", "=", "find_closing_parenthesis", "(", "sql", ",", "match", ".", "start", "(", ")", ")", "start", ",", "pos", "=", "start", "+", "1", ",", "pos", "-", "1", "subqueries", ".", "append", "(", "split_subquery", "(", "sql", "[", "start", ":", "pos", "]", ")", ")", "start", "=", "pos", "match", "=", "pattern", ".", "search", "(", "sql", ",", "start", ")", "out", ".", "append", "(", "sql", "[", "start", ":", "len", "(", "sql", ")", "]", ")", "return", "''", ".", "join", "(", "out", ")", ",", "subqueries" ]
Split on subqueries and replace them by '&'.
[ "Split", "on", "subqueries", "and", "replace", "them", "by", "&", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L268-L286
7,934
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
simplify_expression
def simplify_expression(txt): """Remove all unecessary whitespace and some very usual space""" minimal = re.sub(r'\s', ' ', re.sub(r'\s(?=\W)', '', re.sub(r'(?<=\W)\s', '', txt.strip()))) # add space before some "(" and after some ")" return re.sub(r'\)(?=\w)', ') ', re.sub(r'(,|\b(?:{}))\('.format('|'.join(RESERVED_WORDS)), '\\1 (', minimal) )
python
def simplify_expression(txt): """Remove all unecessary whitespace and some very usual space""" minimal = re.sub(r'\s', ' ', re.sub(r'\s(?=\W)', '', re.sub(r'(?<=\W)\s', '', txt.strip()))) # add space before some "(" and after some ")" return re.sub(r'\)(?=\w)', ') ', re.sub(r'(,|\b(?:{}))\('.format('|'.join(RESERVED_WORDS)), '\\1 (', minimal) )
[ "def", "simplify_expression", "(", "txt", ")", ":", "minimal", "=", "re", ".", "sub", "(", "r'\\s'", ",", "' '", ",", "re", ".", "sub", "(", "r'\\s(?=\\W)'", ",", "''", ",", "re", ".", "sub", "(", "r'(?<=\\W)\\s'", ",", "''", ",", "txt", ".", "strip", "(", ")", ")", ")", ")", "# add space before some \"(\" and after some \")\"", "return", "re", ".", "sub", "(", "r'\\)(?=\\w)'", ",", "') '", ",", "re", ".", "sub", "(", "r'(,|\\b(?:{}))\\('", ".", "format", "(", "'|'", ".", "join", "(", "RESERVED_WORDS", ")", ")", ",", "'\\\\1 ('", ",", "minimal", ")", ")" ]
Remove all unecessary whitespace and some very usual space
[ "Remove", "all", "unecessary", "whitespace", "and", "some", "very", "usual", "space" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L289-L298
7,935
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
QQuery._make_flat
def _make_flat(self, row_dict, path, subroots): """Replace the nested dict objects by a flat dict with keys "object.object.name".""" # can get a cursor parameter, if introspection should be possible on the fly out = {} for k, v in row_dict.items(): klc = k.lower() # "key lower case" if (not (isinstance(v, dict) and 'attributes' in v) or ('done' in v and 'records' in v and 'totalSize' in v)): # : if klc not in subroots: out[klc] = v else: strpath = '.'.join(path + (klc,)) + '.' strip_pos = len(strpath) - len(klc + '.') for alias in self.aliases: if alias.lower().startswith(strpath): out[alias.lower()[strip_pos:]] = None # empty outer join field names else: new_subroots = subroots[klc] if k != 'attributes' else {} for sub_k, sub_v in self._make_flat(v, path + (klc,), new_subroots).items(): out[k.lower() + '.' + sub_k] = sub_v return out
python
def _make_flat(self, row_dict, path, subroots): """Replace the nested dict objects by a flat dict with keys "object.object.name".""" # can get a cursor parameter, if introspection should be possible on the fly out = {} for k, v in row_dict.items(): klc = k.lower() # "key lower case" if (not (isinstance(v, dict) and 'attributes' in v) or ('done' in v and 'records' in v and 'totalSize' in v)): # : if klc not in subroots: out[klc] = v else: strpath = '.'.join(path + (klc,)) + '.' strip_pos = len(strpath) - len(klc + '.') for alias in self.aliases: if alias.lower().startswith(strpath): out[alias.lower()[strip_pos:]] = None # empty outer join field names else: new_subroots = subroots[klc] if k != 'attributes' else {} for sub_k, sub_v in self._make_flat(v, path + (klc,), new_subroots).items(): out[k.lower() + '.' + sub_k] = sub_v return out
[ "def", "_make_flat", "(", "self", ",", "row_dict", ",", "path", ",", "subroots", ")", ":", "# can get a cursor parameter, if introspection should be possible on the fly", "out", "=", "{", "}", "for", "k", ",", "v", "in", "row_dict", ".", "items", "(", ")", ":", "klc", "=", "k", ".", "lower", "(", ")", "# \"key lower case\"", "if", "(", "not", "(", "isinstance", "(", "v", ",", "dict", ")", "and", "'attributes'", "in", "v", ")", "or", "(", "'done'", "in", "v", "and", "'records'", "in", "v", "and", "'totalSize'", "in", "v", ")", ")", ":", "# :", "if", "klc", "not", "in", "subroots", ":", "out", "[", "klc", "]", "=", "v", "else", ":", "strpath", "=", "'.'", ".", "join", "(", "path", "+", "(", "klc", ",", ")", ")", "+", "'.'", "strip_pos", "=", "len", "(", "strpath", ")", "-", "len", "(", "klc", "+", "'.'", ")", "for", "alias", "in", "self", ".", "aliases", ":", "if", "alias", ".", "lower", "(", ")", ".", "startswith", "(", "strpath", ")", ":", "out", "[", "alias", ".", "lower", "(", ")", "[", "strip_pos", ":", "]", "]", "=", "None", "# empty outer join field names", "else", ":", "new_subroots", "=", "subroots", "[", "klc", "]", "if", "k", "!=", "'attributes'", "else", "{", "}", "for", "sub_k", ",", "sub_v", "in", "self", ".", "_make_flat", "(", "v", ",", "path", "+", "(", "klc", ",", ")", ",", "new_subroots", ")", ".", "items", "(", ")", ":", "out", "[", "k", ".", "lower", "(", ")", "+", "'.'", "+", "sub_k", "]", "=", "sub_v", "return", "out" ]
Replace the nested dict objects by a flat dict with keys "object.object.name".
[ "Replace", "the", "nested", "dict", "objects", "by", "a", "flat", "dict", "with", "keys", "object", ".", "object", ".", "name", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L128-L149
7,936
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
QQuery.parse_rest_response
def parse_rest_response(self, records, rowcount, row_type=list): """Parse the REST API response to DB API cursor flat response""" if self.is_plain_count: # result of "SELECT COUNT() FROM ... WHERE ..." assert list(records) == [] yield rowcount # originally [resp.json()['totalSize']] else: while True: for row_deep in records: assert self.is_aggregation == (row_deep['attributes']['type'] == 'AggregateResult') row_flat = self._make_flat(row_deep, path=(), subroots=self.subroots) # TODO Will be the expression "or x['done']" really correct also for long subrequests? assert all(not isinstance(x, dict) or x['done'] for x in row_flat) if issubclass(row_type, dict): yield {k: fix_data_type(row_flat[k.lower()]) for k in self.aliases} else: yield [fix_data_type(row_flat[k.lower()]) for k in self.aliases] # if not resp['done']: # if not cursor: # raise ProgrammingError("Must get a cursor") # resp = cursor.query_more(resp['nextRecordsUrl']).json() # else: # break break
python
def parse_rest_response(self, records, rowcount, row_type=list): """Parse the REST API response to DB API cursor flat response""" if self.is_plain_count: # result of "SELECT COUNT() FROM ... WHERE ..." assert list(records) == [] yield rowcount # originally [resp.json()['totalSize']] else: while True: for row_deep in records: assert self.is_aggregation == (row_deep['attributes']['type'] == 'AggregateResult') row_flat = self._make_flat(row_deep, path=(), subroots=self.subroots) # TODO Will be the expression "or x['done']" really correct also for long subrequests? assert all(not isinstance(x, dict) or x['done'] for x in row_flat) if issubclass(row_type, dict): yield {k: fix_data_type(row_flat[k.lower()]) for k in self.aliases} else: yield [fix_data_type(row_flat[k.lower()]) for k in self.aliases] # if not resp['done']: # if not cursor: # raise ProgrammingError("Must get a cursor") # resp = cursor.query_more(resp['nextRecordsUrl']).json() # else: # break break
[ "def", "parse_rest_response", "(", "self", ",", "records", ",", "rowcount", ",", "row_type", "=", "list", ")", ":", "if", "self", ".", "is_plain_count", ":", "# result of \"SELECT COUNT() FROM ... WHERE ...\"", "assert", "list", "(", "records", ")", "==", "[", "]", "yield", "rowcount", "# originally [resp.json()['totalSize']]", "else", ":", "while", "True", ":", "for", "row_deep", "in", "records", ":", "assert", "self", ".", "is_aggregation", "==", "(", "row_deep", "[", "'attributes'", "]", "[", "'type'", "]", "==", "'AggregateResult'", ")", "row_flat", "=", "self", ".", "_make_flat", "(", "row_deep", ",", "path", "=", "(", ")", ",", "subroots", "=", "self", ".", "subroots", ")", "# TODO Will be the expression \"or x['done']\" really correct also for long subrequests?", "assert", "all", "(", "not", "isinstance", "(", "x", ",", "dict", ")", "or", "x", "[", "'done'", "]", "for", "x", "in", "row_flat", ")", "if", "issubclass", "(", "row_type", ",", "dict", ")", ":", "yield", "{", "k", ":", "fix_data_type", "(", "row_flat", "[", "k", ".", "lower", "(", ")", "]", ")", "for", "k", "in", "self", ".", "aliases", "}", "else", ":", "yield", "[", "fix_data_type", "(", "row_flat", "[", "k", ".", "lower", "(", ")", "]", ")", "for", "k", "in", "self", ".", "aliases", "]", "# if not resp['done']:", "# if not cursor:", "# raise ProgrammingError(\"Must get a cursor\")", "# resp = cursor.query_more(resp['nextRecordsUrl']).json()", "# else:", "# break", "break" ]
Parse the REST API response to DB API cursor flat response
[ "Parse", "the", "REST", "API", "response", "to", "DB", "API", "cursor", "flat", "response" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L151-L174
7,937
django-salesforce/django-salesforce
salesforce/models.py
make_dynamic_fields
def make_dynamic_fields(pattern_module, dynamic_field_patterns, attrs): """Add some Salesforce fields from a pattern_module models.py Parameters: pattern_module: Module where to search additional fields settings. It is an imported module created by introspection (inspectdb), usually named `models_template.py`. (You will probably not add it to version control for you because the diffs are frequent and huge.) dynamic_field_patterns: List of regular expression for Salesforce field names that should be included automatically into the model. attrs: Input/Output dictionary of model attributes. (no need to worry, added automatically) The patterns are applied sequentionally. If the pattern starts with "-" then the matched names are excluded. The search stops after the first match. A normal field that exists directly in a class is never rewritten by a dynamic field.. All ForeingKey fields should be created explicitely. (For now to prevent possible issues and also for better readibility of the model. The automatic "dynamic" fields are intended especially for "maybe can be useful" fields and will work with ForeignKey in simple cases, e.g. without Proxy models etc. Works good for me.) This is useful for development: Many fields or all fields can be easily accessed by the model without a huge code. Finally all wildcard fields except the explicit names can be removed when the development is ready or . If you create migrations, you probably want to disable "dynamic_field_patterns" by setting them empty. Example: Meta: db_table = 'Contact' dynamic_patterns = exported.models, ['Last', '.*Date$'] """ # pylint:disable=invalid-name,too-many-branches,too-many-locals import re attr_meta = attrs['Meta'] db_table = getattr(attr_meta, 'db_table', None) if not db_table: raise RuntimeError('The "db_table" must be set in Meta if "dynamic_field_patterns" is used.') is_custom_model = getattr(attr_meta, 'custom', False) patterns = [] for pat in dynamic_field_patterns: enabled = True if pat.startswith('-'): enabled = False pat = pat[1:] patterns.append((enabled, re.compile(r'^(?:{})$'.format(pat), re.I))) used_columns = [] for name, attr in attrs.items(): if isinstance(attr, SfField): field = attr if field.sf_custom is None and is_custom_model: field.sf_custom = True if not field.name: field.name = name attname, column = field.get_attname_column() # pylint:disable=unused-variable used_columns.append(column) if not pattern_module: raise RuntimeError("a pattern_module is required for dynamic fields.") for name, obj in vars(pattern_module).items(): if not name.startswith('_') and isclass(obj) and issubclass(obj, ModelTemplate): default_table = obj.__name__ if getattr(getattr(obj, 'Meta', None), 'db_table', default_table) == db_table: cls = obj break else: # not found db_table model, but decide between warning or exception if any(not x.startswith('__') for x in dir(pattern_module)): raise RuntimeError("No Model for table '%s' found in the module '%s'" % (db_table, pattern_module.__name__)) warnings.warn("The module '%s' is empty. (It is OK if you are " "rewriting new Models by pipe from inspectdb command.)" % pattern_module.__name__) return lazy_fields = [(name, obj) for name, obj in vars(cls).items() if isinstance(obj, LazyField) and issubclass(obj.klass, SfField) ] for name, obj in sorted(lazy_fields, key=lambda name_obj: name_obj[1].counter): for enabled, pat in patterns: if pat.match(name): break else: enabled = False if enabled: if issubclass(obj.klass, ForeignKey): to = obj.kw['to'] if isclass(to) and issubclass(to, ModelTemplate): obj.kw['to'] = to.__name__ field = obj.create() attrs[name] = field assert pattern_module
python
def make_dynamic_fields(pattern_module, dynamic_field_patterns, attrs): """Add some Salesforce fields from a pattern_module models.py Parameters: pattern_module: Module where to search additional fields settings. It is an imported module created by introspection (inspectdb), usually named `models_template.py`. (You will probably not add it to version control for you because the diffs are frequent and huge.) dynamic_field_patterns: List of regular expression for Salesforce field names that should be included automatically into the model. attrs: Input/Output dictionary of model attributes. (no need to worry, added automatically) The patterns are applied sequentionally. If the pattern starts with "-" then the matched names are excluded. The search stops after the first match. A normal field that exists directly in a class is never rewritten by a dynamic field.. All ForeingKey fields should be created explicitely. (For now to prevent possible issues and also for better readibility of the model. The automatic "dynamic" fields are intended especially for "maybe can be useful" fields and will work with ForeignKey in simple cases, e.g. without Proxy models etc. Works good for me.) This is useful for development: Many fields or all fields can be easily accessed by the model without a huge code. Finally all wildcard fields except the explicit names can be removed when the development is ready or . If you create migrations, you probably want to disable "dynamic_field_patterns" by setting them empty. Example: Meta: db_table = 'Contact' dynamic_patterns = exported.models, ['Last', '.*Date$'] """ # pylint:disable=invalid-name,too-many-branches,too-many-locals import re attr_meta = attrs['Meta'] db_table = getattr(attr_meta, 'db_table', None) if not db_table: raise RuntimeError('The "db_table" must be set in Meta if "dynamic_field_patterns" is used.') is_custom_model = getattr(attr_meta, 'custom', False) patterns = [] for pat in dynamic_field_patterns: enabled = True if pat.startswith('-'): enabled = False pat = pat[1:] patterns.append((enabled, re.compile(r'^(?:{})$'.format(pat), re.I))) used_columns = [] for name, attr in attrs.items(): if isinstance(attr, SfField): field = attr if field.sf_custom is None and is_custom_model: field.sf_custom = True if not field.name: field.name = name attname, column = field.get_attname_column() # pylint:disable=unused-variable used_columns.append(column) if not pattern_module: raise RuntimeError("a pattern_module is required for dynamic fields.") for name, obj in vars(pattern_module).items(): if not name.startswith('_') and isclass(obj) and issubclass(obj, ModelTemplate): default_table = obj.__name__ if getattr(getattr(obj, 'Meta', None), 'db_table', default_table) == db_table: cls = obj break else: # not found db_table model, but decide between warning or exception if any(not x.startswith('__') for x in dir(pattern_module)): raise RuntimeError("No Model for table '%s' found in the module '%s'" % (db_table, pattern_module.__name__)) warnings.warn("The module '%s' is empty. (It is OK if you are " "rewriting new Models by pipe from inspectdb command.)" % pattern_module.__name__) return lazy_fields = [(name, obj) for name, obj in vars(cls).items() if isinstance(obj, LazyField) and issubclass(obj.klass, SfField) ] for name, obj in sorted(lazy_fields, key=lambda name_obj: name_obj[1].counter): for enabled, pat in patterns: if pat.match(name): break else: enabled = False if enabled: if issubclass(obj.klass, ForeignKey): to = obj.kw['to'] if isclass(to) and issubclass(to, ModelTemplate): obj.kw['to'] = to.__name__ field = obj.create() attrs[name] = field assert pattern_module
[ "def", "make_dynamic_fields", "(", "pattern_module", ",", "dynamic_field_patterns", ",", "attrs", ")", ":", "# pylint:disable=invalid-name,too-many-branches,too-many-locals", "import", "re", "attr_meta", "=", "attrs", "[", "'Meta'", "]", "db_table", "=", "getattr", "(", "attr_meta", ",", "'db_table'", ",", "None", ")", "if", "not", "db_table", ":", "raise", "RuntimeError", "(", "'The \"db_table\" must be set in Meta if \"dynamic_field_patterns\" is used.'", ")", "is_custom_model", "=", "getattr", "(", "attr_meta", ",", "'custom'", ",", "False", ")", "patterns", "=", "[", "]", "for", "pat", "in", "dynamic_field_patterns", ":", "enabled", "=", "True", "if", "pat", ".", "startswith", "(", "'-'", ")", ":", "enabled", "=", "False", "pat", "=", "pat", "[", "1", ":", "]", "patterns", ".", "append", "(", "(", "enabled", ",", "re", ".", "compile", "(", "r'^(?:{})$'", ".", "format", "(", "pat", ")", ",", "re", ".", "I", ")", ")", ")", "used_columns", "=", "[", "]", "for", "name", ",", "attr", "in", "attrs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "attr", ",", "SfField", ")", ":", "field", "=", "attr", "if", "field", ".", "sf_custom", "is", "None", "and", "is_custom_model", ":", "field", ".", "sf_custom", "=", "True", "if", "not", "field", ".", "name", ":", "field", ".", "name", "=", "name", "attname", ",", "column", "=", "field", ".", "get_attname_column", "(", ")", "# pylint:disable=unused-variable", "used_columns", ".", "append", "(", "column", ")", "if", "not", "pattern_module", ":", "raise", "RuntimeError", "(", "\"a pattern_module is required for dynamic fields.\"", ")", "for", "name", ",", "obj", "in", "vars", "(", "pattern_module", ")", ".", "items", "(", ")", ":", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "and", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "ModelTemplate", ")", ":", "default_table", "=", "obj", ".", "__name__", "if", "getattr", "(", "getattr", "(", "obj", ",", "'Meta'", ",", "None", ")", ",", "'db_table'", ",", "default_table", ")", "==", "db_table", ":", "cls", "=", "obj", "break", "else", ":", "# not found db_table model, but decide between warning or exception", "if", "any", "(", "not", "x", ".", "startswith", "(", "'__'", ")", "for", "x", "in", "dir", "(", "pattern_module", ")", ")", ":", "raise", "RuntimeError", "(", "\"No Model for table '%s' found in the module '%s'\"", "%", "(", "db_table", ",", "pattern_module", ".", "__name__", ")", ")", "warnings", ".", "warn", "(", "\"The module '%s' is empty. (It is OK if you are \"", "\"rewriting new Models by pipe from inspectdb command.)\"", "%", "pattern_module", ".", "__name__", ")", "return", "lazy_fields", "=", "[", "(", "name", ",", "obj", ")", "for", "name", ",", "obj", "in", "vars", "(", "cls", ")", ".", "items", "(", ")", "if", "isinstance", "(", "obj", ",", "LazyField", ")", "and", "issubclass", "(", "obj", ".", "klass", ",", "SfField", ")", "]", "for", "name", ",", "obj", "in", "sorted", "(", "lazy_fields", ",", "key", "=", "lambda", "name_obj", ":", "name_obj", "[", "1", "]", ".", "counter", ")", ":", "for", "enabled", ",", "pat", "in", "patterns", ":", "if", "pat", ".", "match", "(", "name", ")", ":", "break", "else", ":", "enabled", "=", "False", "if", "enabled", ":", "if", "issubclass", "(", "obj", ".", "klass", ",", "ForeignKey", ")", ":", "to", "=", "obj", ".", "kw", "[", "'to'", "]", "if", "isclass", "(", "to", ")", "and", "issubclass", "(", "to", ",", "ModelTemplate", ")", ":", "obj", ".", "kw", "[", "'to'", "]", "=", "to", ".", "__name__", "field", "=", "obj", ".", "create", "(", ")", "attrs", "[", "name", "]", "=", "field", "assert", "pattern_module" ]
Add some Salesforce fields from a pattern_module models.py Parameters: pattern_module: Module where to search additional fields settings. It is an imported module created by introspection (inspectdb), usually named `models_template.py`. (You will probably not add it to version control for you because the diffs are frequent and huge.) dynamic_field_patterns: List of regular expression for Salesforce field names that should be included automatically into the model. attrs: Input/Output dictionary of model attributes. (no need to worry, added automatically) The patterns are applied sequentionally. If the pattern starts with "-" then the matched names are excluded. The search stops after the first match. A normal field that exists directly in a class is never rewritten by a dynamic field.. All ForeingKey fields should be created explicitely. (For now to prevent possible issues and also for better readibility of the model. The automatic "dynamic" fields are intended especially for "maybe can be useful" fields and will work with ForeignKey in simple cases, e.g. without Proxy models etc. Works good for me.) This is useful for development: Many fields or all fields can be easily accessed by the model without a huge code. Finally all wildcard fields except the explicit names can be removed when the development is ready or . If you create migrations, you probably want to disable "dynamic_field_patterns" by setting them empty. Example: Meta: db_table = 'Contact' dynamic_patterns = exported.models, ['Last', '.*Date$']
[ "Add", "some", "Salesforce", "fields", "from", "a", "pattern_module", "models", ".", "py" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/models.py#L103-L199
7,938
django-salesforce/django-salesforce
salesforce/dbapi/exceptions.py
prepare_exception
def prepare_exception(obj, messages=None, response=None, verbs=None): """Prepare excetion params or only an exception message parameters: messages: list of strings, that will be separated by new line response: response from a request to SFDC REST API verbs: list of options about verbosity """ # pylint:disable=too-many-branches verbs = set(verbs or []) known_options = ['method+url'] if messages is None: messages = [] if isinstance(messages, (text_type, str)): messages = [messages] assert isinstance(messages, list) assert not verbs.difference(known_options) data = None # a boolean from a failed response is False, though error messages in json should be decoded if response is not None and 'json' in response.headers.get('Content-Type', '') and response.text: data = json.loads(response.text) if data: data_0 = data[0] if 'errorCode' in data_0: subreq = '' if 'referenceId' in data_0: subreq = " (in subrequest {!r})".format(data_0['referenceId']) messages = [data_0['errorCode'] + subreq] + messages if data_0.get('fields'): messages.append('FIELDS: {}'.format(data_0['fields'])) if len(data) > 1: messages.append('MORE_ERRORS ({})'.format(len(data))) if 'method+url' in verbs: method = response.request.method url = response.request.url if len(url) > 100: url = url[:100] + '...' data_info = '' if (method in ('POST', 'PATCH') and (not response.request.body or 'json' not in response.request.headers['content-type'])): data_info = ' (without json request data)' messages.append('in {} "{}"{}'.format(method, url, data_info)) separ = '\n ' if not PY3: messages = [x if isinstance(x, str) else x.encode('utf-8') for x in messages] messages = [x.replace('\n', separ) for x in messages] message = separ.join(messages) if obj: obj.data = data obj.response = response obj.verbs = verbs return message
python
def prepare_exception(obj, messages=None, response=None, verbs=None): """Prepare excetion params or only an exception message parameters: messages: list of strings, that will be separated by new line response: response from a request to SFDC REST API verbs: list of options about verbosity """ # pylint:disable=too-many-branches verbs = set(verbs or []) known_options = ['method+url'] if messages is None: messages = [] if isinstance(messages, (text_type, str)): messages = [messages] assert isinstance(messages, list) assert not verbs.difference(known_options) data = None # a boolean from a failed response is False, though error messages in json should be decoded if response is not None and 'json' in response.headers.get('Content-Type', '') and response.text: data = json.loads(response.text) if data: data_0 = data[0] if 'errorCode' in data_0: subreq = '' if 'referenceId' in data_0: subreq = " (in subrequest {!r})".format(data_0['referenceId']) messages = [data_0['errorCode'] + subreq] + messages if data_0.get('fields'): messages.append('FIELDS: {}'.format(data_0['fields'])) if len(data) > 1: messages.append('MORE_ERRORS ({})'.format(len(data))) if 'method+url' in verbs: method = response.request.method url = response.request.url if len(url) > 100: url = url[:100] + '...' data_info = '' if (method in ('POST', 'PATCH') and (not response.request.body or 'json' not in response.request.headers['content-type'])): data_info = ' (without json request data)' messages.append('in {} "{}"{}'.format(method, url, data_info)) separ = '\n ' if not PY3: messages = [x if isinstance(x, str) else x.encode('utf-8') for x in messages] messages = [x.replace('\n', separ) for x in messages] message = separ.join(messages) if obj: obj.data = data obj.response = response obj.verbs = verbs return message
[ "def", "prepare_exception", "(", "obj", ",", "messages", "=", "None", ",", "response", "=", "None", ",", "verbs", "=", "None", ")", ":", "# pylint:disable=too-many-branches", "verbs", "=", "set", "(", "verbs", "or", "[", "]", ")", "known_options", "=", "[", "'method+url'", "]", "if", "messages", "is", "None", ":", "messages", "=", "[", "]", "if", "isinstance", "(", "messages", ",", "(", "text_type", ",", "str", ")", ")", ":", "messages", "=", "[", "messages", "]", "assert", "isinstance", "(", "messages", ",", "list", ")", "assert", "not", "verbs", ".", "difference", "(", "known_options", ")", "data", "=", "None", "# a boolean from a failed response is False, though error messages in json should be decoded", "if", "response", "is", "not", "None", "and", "'json'", "in", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "and", "response", ".", "text", ":", "data", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "if", "data", ":", "data_0", "=", "data", "[", "0", "]", "if", "'errorCode'", "in", "data_0", ":", "subreq", "=", "''", "if", "'referenceId'", "in", "data_0", ":", "subreq", "=", "\" (in subrequest {!r})\"", ".", "format", "(", "data_0", "[", "'referenceId'", "]", ")", "messages", "=", "[", "data_0", "[", "'errorCode'", "]", "+", "subreq", "]", "+", "messages", "if", "data_0", ".", "get", "(", "'fields'", ")", ":", "messages", ".", "append", "(", "'FIELDS: {}'", ".", "format", "(", "data_0", "[", "'fields'", "]", ")", ")", "if", "len", "(", "data", ")", ">", "1", ":", "messages", ".", "append", "(", "'MORE_ERRORS ({})'", ".", "format", "(", "len", "(", "data", ")", ")", ")", "if", "'method+url'", "in", "verbs", ":", "method", "=", "response", ".", "request", ".", "method", "url", "=", "response", ".", "request", ".", "url", "if", "len", "(", "url", ")", ">", "100", ":", "url", "=", "url", "[", ":", "100", "]", "+", "'...'", "data_info", "=", "''", "if", "(", "method", "in", "(", "'POST'", ",", "'PATCH'", ")", "and", "(", "not", "response", ".", "request", ".", "body", "or", "'json'", "not", "in", "response", ".", "request", ".", "headers", "[", "'content-type'", "]", ")", ")", ":", "data_info", "=", "' (without json request data)'", "messages", ".", "append", "(", "'in {} \"{}\"{}'", ".", "format", "(", "method", ",", "url", ",", "data_info", ")", ")", "separ", "=", "'\\n '", "if", "not", "PY3", ":", "messages", "=", "[", "x", "if", "isinstance", "(", "x", ",", "str", ")", "else", "x", ".", "encode", "(", "'utf-8'", ")", "for", "x", "in", "messages", "]", "messages", "=", "[", "x", ".", "replace", "(", "'\\n'", ",", "separ", ")", "for", "x", "in", "messages", "]", "message", "=", "separ", ".", "join", "(", "messages", ")", "if", "obj", ":", "obj", ".", "data", "=", "data", "obj", ".", "response", "=", "response", "obj", ".", "verbs", "=", "verbs", "return", "message" ]
Prepare excetion params or only an exception message parameters: messages: list of strings, that will be separated by new line response: response from a request to SFDC REST API verbs: list of options about verbosity
[ "Prepare", "excetion", "params", "or", "only", "an", "exception", "message" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/exceptions.py#L67-L119
7,939
django-salesforce/django-salesforce
salesforce/dbapi/exceptions.py
warn_sf
def warn_sf(messages, response, verbs=None, klass=SalesforceWarning): """Issue a warning SalesforceWarning, with message combined from message and data from SFDC response""" warnings.warn(klass(messages, response, verbs), stacklevel=2)
python
def warn_sf(messages, response, verbs=None, klass=SalesforceWarning): """Issue a warning SalesforceWarning, with message combined from message and data from SFDC response""" warnings.warn(klass(messages, response, verbs), stacklevel=2)
[ "def", "warn_sf", "(", "messages", ",", "response", ",", "verbs", "=", "None", ",", "klass", "=", "SalesforceWarning", ")", ":", "warnings", ".", "warn", "(", "klass", "(", "messages", ",", "response", ",", "verbs", ")", ",", "stacklevel", "=", "2", ")" ]
Issue a warning SalesforceWarning, with message combined from message and data from SFDC response
[ "Issue", "a", "warning", "SalesforceWarning", "with", "message", "combined", "from", "message", "and", "data", "from", "SFDC", "response" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/exceptions.py#L122-L124
7,940
django-salesforce/django-salesforce
salesforce/backend/compiler.py
SQLCompiler.get_from_clause
def get_from_clause(self): """ Return the FROM clause, converted the SOQL dialect. It should be only the name of base object, even in parent-to-child and child-to-parent relationships queries. """ self.query_topology() root_table = self.soql_trans[self.root_alias] return [root_table], []
python
def get_from_clause(self): """ Return the FROM clause, converted the SOQL dialect. It should be only the name of base object, even in parent-to-child and child-to-parent relationships queries. """ self.query_topology() root_table = self.soql_trans[self.root_alias] return [root_table], []
[ "def", "get_from_clause", "(", "self", ")", ":", "self", ".", "query_topology", "(", ")", "root_table", "=", "self", ".", "soql_trans", "[", "self", ".", "root_alias", "]", "return", "[", "root_table", "]", ",", "[", "]" ]
Return the FROM clause, converted the SOQL dialect. It should be only the name of base object, even in parent-to-child and child-to-parent relationships queries.
[ "Return", "the", "FROM", "clause", "converted", "the", "SOQL", "dialect", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/compiler.py#L34-L43
7,941
django-salesforce/django-salesforce
salesforce/backend/compiler.py
SQLCompiler.quote_name_unless_alias
def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. Mostly used during the ORDER BY clause. """ r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r
python
def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. Mostly used during the ORDER BY clause. """ r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r
[ "def", "quote_name_unless_alias", "(", "self", ",", "name", ")", ":", "r", "=", "self", ".", "connection", ".", "ops", ".", "quote_name", "(", "name", ")", "self", ".", "quote_cache", "[", "name", "]", "=", "r", "return", "r" ]
A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. Mostly used during the ORDER BY clause.
[ "A", "wrapper", "around", "connection", ".", "ops", ".", "quote_name", "that", "doesn", "t", "quote", "aliases", "for", "table", "names", ".", "Mostly", "used", "during", "the", "ORDER", "BY", "clause", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/compiler.py#L45-L52
7,942
django-salesforce/django-salesforce
salesforce/utils.py
get_soap_client
def get_soap_client(db_alias, client_class=None): """ Create the SOAP client for the current user logged in the db_alias The default created client is "beatbox.PythonClient", but an alternative client is possible. (i.e. other subtype of beatbox.XMLClient) """ if not beatbox: raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.") if client_class is None: client_class = beatbox.PythonClient soap_client = client_class() # authenticate connection = connections[db_alias] # verify the authenticated connection, because Beatbox can not refresh the token cursor = connection.cursor() cursor.urls_request() auth_info = connections[db_alias].sf_session.auth access_token = auth_info.get_auth()['access_token'] assert access_token[15] == '!' org_id = access_token[:15] url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION, org_id=org_id) soap_client.useSession(access_token, auth_info.instance_url + url) return soap_client
python
def get_soap_client(db_alias, client_class=None): """ Create the SOAP client for the current user logged in the db_alias The default created client is "beatbox.PythonClient", but an alternative client is possible. (i.e. other subtype of beatbox.XMLClient) """ if not beatbox: raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.") if client_class is None: client_class = beatbox.PythonClient soap_client = client_class() # authenticate connection = connections[db_alias] # verify the authenticated connection, because Beatbox can not refresh the token cursor = connection.cursor() cursor.urls_request() auth_info = connections[db_alias].sf_session.auth access_token = auth_info.get_auth()['access_token'] assert access_token[15] == '!' org_id = access_token[:15] url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION, org_id=org_id) soap_client.useSession(access_token, auth_info.instance_url + url) return soap_client
[ "def", "get_soap_client", "(", "db_alias", ",", "client_class", "=", "None", ")", ":", "if", "not", "beatbox", ":", "raise", "InterfaceError", "(", "\"To use SOAP API, you'll need to install the Beatbox package.\"", ")", "if", "client_class", "is", "None", ":", "client_class", "=", "beatbox", ".", "PythonClient", "soap_client", "=", "client_class", "(", ")", "# authenticate", "connection", "=", "connections", "[", "db_alias", "]", "# verify the authenticated connection, because Beatbox can not refresh the token", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "urls_request", "(", ")", "auth_info", "=", "connections", "[", "db_alias", "]", ".", "sf_session", ".", "auth", "access_token", "=", "auth_info", ".", "get_auth", "(", ")", "[", "'access_token'", "]", "assert", "access_token", "[", "15", "]", "==", "'!'", "org_id", "=", "access_token", "[", ":", "15", "]", "url", "=", "'/services/Soap/u/{version}/{org_id}'", ".", "format", "(", "version", "=", "salesforce", ".", "API_VERSION", ",", "org_id", "=", "org_id", ")", "soap_client", ".", "useSession", "(", "access_token", ",", "auth_info", ".", "instance_url", "+", "url", ")", "return", "soap_client" ]
Create the SOAP client for the current user logged in the db_alias The default created client is "beatbox.PythonClient", but an alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
[ "Create", "the", "SOAP", "client", "for", "the", "current", "user", "logged", "in", "the", "db_alias" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/utils.py#L20-L46
7,943
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
signalize_extensions
def signalize_extensions(): """DB API 2.0 extension are reported by warnings at run-time.""" warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning) warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) # TODO warnings.warn("DB-API extension cursor.connection used", SalesforceWarning) # not implemented DB-API extension cursor.scroll(, SalesforceWarning) warnings.warn("DB-API extension cursor.messages used", SalesforceWarning) warnings.warn("DB-API extension connection.messages used", SalesforceWarning) warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning) warnings.warn("DB-API extension .errorhandler used", SalesforceWarning)
python
def signalize_extensions(): """DB API 2.0 extension are reported by warnings at run-time.""" warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning) warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) # TODO warnings.warn("DB-API extension cursor.connection used", SalesforceWarning) # not implemented DB-API extension cursor.scroll(, SalesforceWarning) warnings.warn("DB-API extension cursor.messages used", SalesforceWarning) warnings.warn("DB-API extension connection.messages used", SalesforceWarning) warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning) warnings.warn("DB-API extension .errorhandler used", SalesforceWarning)
[ "def", "signalize_extensions", "(", ")", ":", "warnings", ".", "warn", "(", "\"DB-API extension cursor.rownumber used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension connection.<exception> used\"", ",", "SalesforceWarning", ")", "# TODO", "warnings", ".", "warn", "(", "\"DB-API extension cursor.connection used\"", ",", "SalesforceWarning", ")", "# not implemented DB-API extension cursor.scroll(, SalesforceWarning)", "warnings", ".", "warn", "(", "\"DB-API extension cursor.messages used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension connection.messages used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.next(, SalesforceWarning) used\"", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.__iter__(, SalesforceWarning) used\"", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.lastrowid used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension .errorhandler used\"", ",", "SalesforceWarning", ")" ]
DB API 2.0 extension are reported by warnings at run-time.
[ "DB", "API", "2", ".", "0", "extension", "are", "reported", "by", "warnings", "at", "run", "-", "time", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L670-L681
7,944
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
arg_to_soql
def arg_to_soql(arg): """ Perform necessary SOQL quoting on the arg. """ conversion = sql_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return sql_conversions[type_](arg) return sql_conversions[str](arg)
python
def arg_to_soql(arg): """ Perform necessary SOQL quoting on the arg. """ conversion = sql_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return sql_conversions[type_](arg) return sql_conversions[str](arg)
[ "def", "arg_to_soql", "(", "arg", ")", ":", "conversion", "=", "sql_conversions", ".", "get", "(", "type", "(", "arg", ")", ")", "if", "conversion", ":", "return", "conversion", "(", "arg", ")", "for", "type_", "in", "subclass_conversions", ":", "if", "isinstance", "(", "arg", ",", "type_", ")", ":", "return", "sql_conversions", "[", "type_", "]", "(", "arg", ")", "return", "sql_conversions", "[", "str", "]", "(", "arg", ")" ]
Perform necessary SOQL quoting on the arg.
[ "Perform", "necessary", "SOQL", "quoting", "on", "the", "arg", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L735-L745
7,945
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
arg_to_json
def arg_to_json(arg): """ Perform necessary JSON conversion on the arg. """ conversion = json_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return json_conversions[type_](arg) return json_conversions[str](arg)
python
def arg_to_json(arg): """ Perform necessary JSON conversion on the arg. """ conversion = json_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return json_conversions[type_](arg) return json_conversions[str](arg)
[ "def", "arg_to_json", "(", "arg", ")", ":", "conversion", "=", "json_conversions", ".", "get", "(", "type", "(", "arg", ")", ")", "if", "conversion", ":", "return", "conversion", "(", "arg", ")", "for", "type_", "in", "subclass_conversions", ":", "if", "isinstance", "(", "arg", ",", "type_", ")", ":", "return", "json_conversions", "[", "type_", "]", "(", "arg", ")", "return", "json_conversions", "[", "str", "]", "(", "arg", ")" ]
Perform necessary JSON conversion on the arg.
[ "Perform", "necessary", "JSON", "conversion", "on", "the", "arg", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L748-L758
7,946
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
merge_dict
def merge_dict(dict_1, *other, **kw): """Merge two or more dict including kw into result dict.""" tmp = dict_1.copy() for x in other: tmp.update(x) tmp.update(kw) return tmp
python
def merge_dict(dict_1, *other, **kw): """Merge two or more dict including kw into result dict.""" tmp = dict_1.copy() for x in other: tmp.update(x) tmp.update(kw) return tmp
[ "def", "merge_dict", "(", "dict_1", ",", "*", "other", ",", "*", "*", "kw", ")", ":", "tmp", "=", "dict_1", ".", "copy", "(", ")", "for", "x", "in", "other", ":", "tmp", ".", "update", "(", "x", ")", "tmp", ".", "update", "(", "kw", ")", "return", "tmp" ]
Merge two or more dict including kw into result dict.
[ "Merge", "two", "or", "more", "dict", "including", "kw", "into", "result", "dict", "." ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L792-L798
7,947
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
RawConnection.make_session
def make_session(self): """Authenticate and get the name of assigned SFDC data server""" with connect_lock: if self._sf_session is None: sf_session = requests.Session() # TODO configurable class Salesforce***Auth sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias, settings_dict=self.settings_dict) sf_instance_url = sf_session.auth.instance_url sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries()) sf_session.mount(sf_instance_url, sf_requests_adapter) # Additional headers work, but the same are added automatically by "requests' package. # sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO self._sf_session = sf_session
python
def make_session(self): """Authenticate and get the name of assigned SFDC data server""" with connect_lock: if self._sf_session is None: sf_session = requests.Session() # TODO configurable class Salesforce***Auth sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias, settings_dict=self.settings_dict) sf_instance_url = sf_session.auth.instance_url sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries()) sf_session.mount(sf_instance_url, sf_requests_adapter) # Additional headers work, but the same are added automatically by "requests' package. # sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO self._sf_session = sf_session
[ "def", "make_session", "(", "self", ")", ":", "with", "connect_lock", ":", "if", "self", ".", "_sf_session", "is", "None", ":", "sf_session", "=", "requests", ".", "Session", "(", ")", "# TODO configurable class Salesforce***Auth", "sf_session", ".", "auth", "=", "SalesforcePasswordAuth", "(", "db_alias", "=", "self", ".", "alias", ",", "settings_dict", "=", "self", ".", "settings_dict", ")", "sf_instance_url", "=", "sf_session", ".", "auth", ".", "instance_url", "sf_requests_adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "get_max_retries", "(", ")", ")", "sf_session", ".", "mount", "(", "sf_instance_url", ",", "sf_requests_adapter", ")", "# Additional headers work, but the same are added automatically by \"requests' package.", "# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO", "self", ".", "_sf_session", "=", "sf_session" ]
Authenticate and get the name of assigned SFDC data server
[ "Authenticate", "and", "get", "the", "name", "of", "assigned", "SFDC", "data", "server" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L159-L172
7,948
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
RawConnection.rest_api_url
def rest_api_url(self, *url_parts, **kwargs): """Join the URL of REST_API parameters: upl_parts: strings that are joined to the url by "/". a REST url like https://na1.salesforce.com/services/data/v44.0/ is usually added, but not if the first string starts with https:// api_ver: API version that should be used instead of connection.api_ver default. A special api_ver="" can be used to omit api version (for request to ask for available api versions) relative: If `relative` is true then the url is without domain Examples: self.rest_api_url("query?q=select+id+from+Organization") self.rest_api_url("sobject", "Contact", id, api_ver="45.0") self.rest_api_url(api_ver="") # versions request self.rest_api_url("sobject", relative=True) self.rest_api_url("/services/data/v45.0") Output: https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA https://na1.salesforce.com/services/data /services/data/v45.0 https://na1.salesforce.com/services/data/44.0 """ url_parts = list(url_parts) if url_parts and re.match(r'^(?:https|mock)://', url_parts[0]): return '/'.join(url_parts) relative = kwargs.pop('relative', False) api_ver = kwargs.pop('api_ver', None) api_ver = api_ver if api_ver is not None else self.api_ver assert not kwargs if not relative: base = [self.sf_session.auth.instance_url] else: base = [''] if url_parts and url_parts[0].startswith('/'): prefix = [] url_parts[0] = url_parts[0][1:] else: prefix = ['services/data'] if api_ver: prefix += ['v{api_ver}'.format(api_ver=api_ver)] return '/'.join(base + prefix + url_parts)
python
def rest_api_url(self, *url_parts, **kwargs): """Join the URL of REST_API parameters: upl_parts: strings that are joined to the url by "/". a REST url like https://na1.salesforce.com/services/data/v44.0/ is usually added, but not if the first string starts with https:// api_ver: API version that should be used instead of connection.api_ver default. A special api_ver="" can be used to omit api version (for request to ask for available api versions) relative: If `relative` is true then the url is without domain Examples: self.rest_api_url("query?q=select+id+from+Organization") self.rest_api_url("sobject", "Contact", id, api_ver="45.0") self.rest_api_url(api_ver="") # versions request self.rest_api_url("sobject", relative=True) self.rest_api_url("/services/data/v45.0") Output: https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA https://na1.salesforce.com/services/data /services/data/v45.0 https://na1.salesforce.com/services/data/44.0 """ url_parts = list(url_parts) if url_parts and re.match(r'^(?:https|mock)://', url_parts[0]): return '/'.join(url_parts) relative = kwargs.pop('relative', False) api_ver = kwargs.pop('api_ver', None) api_ver = api_ver if api_ver is not None else self.api_ver assert not kwargs if not relative: base = [self.sf_session.auth.instance_url] else: base = [''] if url_parts and url_parts[0].startswith('/'): prefix = [] url_parts[0] = url_parts[0][1:] else: prefix = ['services/data'] if api_ver: prefix += ['v{api_ver}'.format(api_ver=api_ver)] return '/'.join(base + prefix + url_parts)
[ "def", "rest_api_url", "(", "self", ",", "*", "url_parts", ",", "*", "*", "kwargs", ")", ":", "url_parts", "=", "list", "(", "url_parts", ")", "if", "url_parts", "and", "re", ".", "match", "(", "r'^(?:https|mock)://'", ",", "url_parts", "[", "0", "]", ")", ":", "return", "'/'", ".", "join", "(", "url_parts", ")", "relative", "=", "kwargs", ".", "pop", "(", "'relative'", ",", "False", ")", "api_ver", "=", "kwargs", ".", "pop", "(", "'api_ver'", ",", "None", ")", "api_ver", "=", "api_ver", "if", "api_ver", "is", "not", "None", "else", "self", ".", "api_ver", "assert", "not", "kwargs", "if", "not", "relative", ":", "base", "=", "[", "self", ".", "sf_session", ".", "auth", ".", "instance_url", "]", "else", ":", "base", "=", "[", "''", "]", "if", "url_parts", "and", "url_parts", "[", "0", "]", ".", "startswith", "(", "'/'", ")", ":", "prefix", "=", "[", "]", "url_parts", "[", "0", "]", "=", "url_parts", "[", "0", "]", "[", "1", ":", "]", "else", ":", "prefix", "=", "[", "'services/data'", "]", "if", "api_ver", ":", "prefix", "+=", "[", "'v{api_ver}'", ".", "format", "(", "api_ver", "=", "api_ver", ")", "]", "return", "'/'", ".", "join", "(", "base", "+", "prefix", "+", "url_parts", ")" ]
Join the URL of REST_API parameters: upl_parts: strings that are joined to the url by "/". a REST url like https://na1.salesforce.com/services/data/v44.0/ is usually added, but not if the first string starts with https:// api_ver: API version that should be used instead of connection.api_ver default. A special api_ver="" can be used to omit api version (for request to ask for available api versions) relative: If `relative` is true then the url is without domain Examples: self.rest_api_url("query?q=select+id+from+Organization") self.rest_api_url("sobject", "Contact", id, api_ver="45.0") self.rest_api_url(api_ver="") # versions request self.rest_api_url("sobject", relative=True) self.rest_api_url("/services/data/v45.0") Output: https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA https://na1.salesforce.com/services/data /services/data/v45.0 https://na1.salesforce.com/services/data/44.0
[ "Join", "the", "URL", "of", "REST_API" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L174-L216
7,949
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
RawConnection.raise_errors
def raise_errors(self, response): """The innermost part - report errors by exceptions""" # Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500) # TODO extract a case ID for Salesforce support from code 500 messages # TODO disabled 'debug_verbs' temporarily, after writing better default messages verb = self.debug_verbs # NOQA pylint:disable=unused-variable method = response.request.method data = None is_json = 'json' in response.headers.get('Content-Type', '') and response.text if is_json: data = json.loads(response.text) if not (isinstance(data, list) and data and 'errorCode' in data[0]): messages = [response.text] if is_json else [] raise OperationalError( ['HTTP error "%d %s":' % (response.status_code, response.reason)] + messages, response, ['method+url']) # Other Errors are reported in the json body err_msg = data[0]['message'] err_code = data[0]['errorCode'] if response.status_code == 404: # ResourceNotFound if method == 'DELETE' and err_code in ('ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'): # It was a delete command and the object is in trash bin or it is # completely deleted or it could be a valid Id for this sobject type. # Then we accept it with a warning, similarly to delete by a classic database query: # DELETE FROM xy WHERE id = 'something_deleted_yet' warn_sf([err_msg, "Object is deleted before delete or update"], response, ['method+url']) # TODO add a warning and add it to messages return None if err_code in ('NOT_FOUND', # 404 e.g. invalid object type in url path or url query?q=select ... 'METHOD_NOT_ALLOWED', # 405 e.g. patch instead of post ): # both need to report the url raise SalesforceError([err_msg], response, ['method+url']) # it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE') raise SalesforceError([err_msg], response)
python
def raise_errors(self, response): """The innermost part - report errors by exceptions""" # Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500) # TODO extract a case ID for Salesforce support from code 500 messages # TODO disabled 'debug_verbs' temporarily, after writing better default messages verb = self.debug_verbs # NOQA pylint:disable=unused-variable method = response.request.method data = None is_json = 'json' in response.headers.get('Content-Type', '') and response.text if is_json: data = json.loads(response.text) if not (isinstance(data, list) and data and 'errorCode' in data[0]): messages = [response.text] if is_json else [] raise OperationalError( ['HTTP error "%d %s":' % (response.status_code, response.reason)] + messages, response, ['method+url']) # Other Errors are reported in the json body err_msg = data[0]['message'] err_code = data[0]['errorCode'] if response.status_code == 404: # ResourceNotFound if method == 'DELETE' and err_code in ('ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'): # It was a delete command and the object is in trash bin or it is # completely deleted or it could be a valid Id for this sobject type. # Then we accept it with a warning, similarly to delete by a classic database query: # DELETE FROM xy WHERE id = 'something_deleted_yet' warn_sf([err_msg, "Object is deleted before delete or update"], response, ['method+url']) # TODO add a warning and add it to messages return None if err_code in ('NOT_FOUND', # 404 e.g. invalid object type in url path or url query?q=select ... 'METHOD_NOT_ALLOWED', # 405 e.g. patch instead of post ): # both need to report the url raise SalesforceError([err_msg], response, ['method+url']) # it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE') raise SalesforceError([err_msg], response)
[ "def", "raise_errors", "(", "self", ",", "response", ")", ":", "# Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500)", "# TODO extract a case ID for Salesforce support from code 500 messages", "# TODO disabled 'debug_verbs' temporarily, after writing better default messages", "verb", "=", "self", ".", "debug_verbs", "# NOQA pylint:disable=unused-variable", "method", "=", "response", ".", "request", ".", "method", "data", "=", "None", "is_json", "=", "'json'", "in", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "and", "response", ".", "text", "if", "is_json", ":", "data", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "if", "not", "(", "isinstance", "(", "data", ",", "list", ")", "and", "data", "and", "'errorCode'", "in", "data", "[", "0", "]", ")", ":", "messages", "=", "[", "response", ".", "text", "]", "if", "is_json", "else", "[", "]", "raise", "OperationalError", "(", "[", "'HTTP error \"%d %s\":'", "%", "(", "response", ".", "status_code", ",", "response", ".", "reason", ")", "]", "+", "messages", ",", "response", ",", "[", "'method+url'", "]", ")", "# Other Errors are reported in the json body", "err_msg", "=", "data", "[", "0", "]", "[", "'message'", "]", "err_code", "=", "data", "[", "0", "]", "[", "'errorCode'", "]", "if", "response", ".", "status_code", "==", "404", ":", "# ResourceNotFound", "if", "method", "==", "'DELETE'", "and", "err_code", "in", "(", "'ENTITY_IS_DELETED'", ",", "'INVALID_CROSS_REFERENCE_KEY'", ")", ":", "# It was a delete command and the object is in trash bin or it is", "# completely deleted or it could be a valid Id for this sobject type.", "# Then we accept it with a warning, similarly to delete by a classic database query:", "# DELETE FROM xy WHERE id = 'something_deleted_yet'", "warn_sf", "(", "[", "err_msg", ",", "\"Object is deleted before delete or update\"", "]", ",", "response", ",", "[", "'method+url'", "]", ")", "# TODO add a warning and add it to messages", "return", "None", "if", "err_code", "in", "(", "'NOT_FOUND'", ",", "# 404 e.g. invalid object type in url path or url query?q=select ...", "'METHOD_NOT_ALLOWED'", ",", "# 405 e.g. patch instead of post", ")", ":", "# both need to report the url", "raise", "SalesforceError", "(", "[", "err_msg", "]", ",", "response", ",", "[", "'method+url'", "]", ")", "# it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE')", "raise", "SalesforceError", "(", "[", "err_msg", "]", ",", "response", ")" ]
The innermost part - report errors by exceptions
[ "The", "innermost", "part", "-", "report", "errors", "by", "exceptions" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L287-L322
7,950
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
RawConnection.composite_request
def composite_request(self, data): """Call a 'composite' request with subrequests, error handling A fake object for request/response is created for a subrequest in case of error, to be possible to use the same error hanler with a clear message as with an individual request. """ post_data = {'compositeRequest': data, 'allOrNone': True} resp = self.handle_api_exceptions('POST', 'composite', json=post_data) comp_resp = resp.json()['compositeResponse'] is_ok = all(x['httpStatusCode'] < 400 for x in comp_resp) if is_ok: return resp # construct an equivalent of individual bad request/response bad_responses = { i: x for i, x in enumerate(comp_resp) if not (x['httpStatusCode'] == 400 and x['body'][0]['errorCode'] in ('PROCESSING_HALTED', 'ALL_OR_NONE_OPERATION_ROLLED_BACK')) } if len(bad_responses) != 1: raise InternalError("Too much or too many subrequests with an individual error") bad_i, bad_response = bad_responses.popitem() bad_request = data[bad_i] bad_req = FakeReq(bad_request['method'], bad_request['url'], bad_request.get('body'), bad_request.get('httpHeaders', {}), context={bad_i: bad_request['referenceId']}) body = [merge_dict(x, referenceId=bad_response['referenceId']) for x in bad_response['body']] bad_resp_headers = bad_response['httpHeaders'].copy() bad_resp_headers.update({'Content-Type': resp.headers['Content-Type']}) bad_resp = FakeResp(bad_response['httpStatusCode'], json.dumps(body), bad_req, bad_resp_headers) self.raise_errors(bad_resp)
python
def composite_request(self, data): """Call a 'composite' request with subrequests, error handling A fake object for request/response is created for a subrequest in case of error, to be possible to use the same error hanler with a clear message as with an individual request. """ post_data = {'compositeRequest': data, 'allOrNone': True} resp = self.handle_api_exceptions('POST', 'composite', json=post_data) comp_resp = resp.json()['compositeResponse'] is_ok = all(x['httpStatusCode'] < 400 for x in comp_resp) if is_ok: return resp # construct an equivalent of individual bad request/response bad_responses = { i: x for i, x in enumerate(comp_resp) if not (x['httpStatusCode'] == 400 and x['body'][0]['errorCode'] in ('PROCESSING_HALTED', 'ALL_OR_NONE_OPERATION_ROLLED_BACK')) } if len(bad_responses) != 1: raise InternalError("Too much or too many subrequests with an individual error") bad_i, bad_response = bad_responses.popitem() bad_request = data[bad_i] bad_req = FakeReq(bad_request['method'], bad_request['url'], bad_request.get('body'), bad_request.get('httpHeaders', {}), context={bad_i: bad_request['referenceId']}) body = [merge_dict(x, referenceId=bad_response['referenceId']) for x in bad_response['body']] bad_resp_headers = bad_response['httpHeaders'].copy() bad_resp_headers.update({'Content-Type': resp.headers['Content-Type']}) bad_resp = FakeResp(bad_response['httpStatusCode'], json.dumps(body), bad_req, bad_resp_headers) self.raise_errors(bad_resp)
[ "def", "composite_request", "(", "self", ",", "data", ")", ":", "post_data", "=", "{", "'compositeRequest'", ":", "data", ",", "'allOrNone'", ":", "True", "}", "resp", "=", "self", ".", "handle_api_exceptions", "(", "'POST'", ",", "'composite'", ",", "json", "=", "post_data", ")", "comp_resp", "=", "resp", ".", "json", "(", ")", "[", "'compositeResponse'", "]", "is_ok", "=", "all", "(", "x", "[", "'httpStatusCode'", "]", "<", "400", "for", "x", "in", "comp_resp", ")", "if", "is_ok", ":", "return", "resp", "# construct an equivalent of individual bad request/response", "bad_responses", "=", "{", "i", ":", "x", "for", "i", ",", "x", "in", "enumerate", "(", "comp_resp", ")", "if", "not", "(", "x", "[", "'httpStatusCode'", "]", "==", "400", "and", "x", "[", "'body'", "]", "[", "0", "]", "[", "'errorCode'", "]", "in", "(", "'PROCESSING_HALTED'", ",", "'ALL_OR_NONE_OPERATION_ROLLED_BACK'", ")", ")", "}", "if", "len", "(", "bad_responses", ")", "!=", "1", ":", "raise", "InternalError", "(", "\"Too much or too many subrequests with an individual error\"", ")", "bad_i", ",", "bad_response", "=", "bad_responses", ".", "popitem", "(", ")", "bad_request", "=", "data", "[", "bad_i", "]", "bad_req", "=", "FakeReq", "(", "bad_request", "[", "'method'", "]", ",", "bad_request", "[", "'url'", "]", ",", "bad_request", ".", "get", "(", "'body'", ")", ",", "bad_request", ".", "get", "(", "'httpHeaders'", ",", "{", "}", ")", ",", "context", "=", "{", "bad_i", ":", "bad_request", "[", "'referenceId'", "]", "}", ")", "body", "=", "[", "merge_dict", "(", "x", ",", "referenceId", "=", "bad_response", "[", "'referenceId'", "]", ")", "for", "x", "in", "bad_response", "[", "'body'", "]", "]", "bad_resp_headers", "=", "bad_response", "[", "'httpHeaders'", "]", ".", "copy", "(", ")", "bad_resp_headers", ".", "update", "(", "{", "'Content-Type'", ":", "resp", ".", "headers", "[", "'Content-Type'", "]", "}", ")", "bad_resp", "=", "FakeResp", "(", "bad_response", "[", "'httpStatusCode'", "]", ",", "json", ".", "dumps", "(", "body", ")", ",", "bad_req", ",", "bad_resp_headers", ")", "self", ".", "raise_errors", "(", "bad_resp", ")" ]
Call a 'composite' request with subrequests, error handling A fake object for request/response is created for a subrequest in case of error, to be possible to use the same error hanler with a clear message as with an individual request.
[ "Call", "a", "composite", "request", "with", "subrequests", "error", "handling" ]
6fd5643dba69d49c5881de50875cf90204a8f808
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L324-L359
7,951
crs4/pydoop
pydoop/avrolib.py
SeekableDataFileReader.align_after
def align_after(self, offset): """ Search for a sync point after offset and align just after that. """ f = self.reader if offset <= 0: # FIXME what is a negative offset?? f.seek(0) self._block_count = 0 self._read_header() # FIXME we can't extimate how big it is... return sm = self.sync_marker sml = len(sm) pos = offset while pos < self.file_length - sml: f.seek(pos) data = f.read(self.FORWARD_WINDOW_SIZE) sync_offset = data.find(sm) if sync_offset > -1: f.seek(pos + sync_offset) self._block_count = 0 return pos += len(data)
python
def align_after(self, offset): """ Search for a sync point after offset and align just after that. """ f = self.reader if offset <= 0: # FIXME what is a negative offset?? f.seek(0) self._block_count = 0 self._read_header() # FIXME we can't extimate how big it is... return sm = self.sync_marker sml = len(sm) pos = offset while pos < self.file_length - sml: f.seek(pos) data = f.read(self.FORWARD_WINDOW_SIZE) sync_offset = data.find(sm) if sync_offset > -1: f.seek(pos + sync_offset) self._block_count = 0 return pos += len(data)
[ "def", "align_after", "(", "self", ",", "offset", ")", ":", "f", "=", "self", ".", "reader", "if", "offset", "<=", "0", ":", "# FIXME what is a negative offset??", "f", ".", "seek", "(", "0", ")", "self", ".", "_block_count", "=", "0", "self", ".", "_read_header", "(", ")", "# FIXME we can't extimate how big it is...", "return", "sm", "=", "self", ".", "sync_marker", "sml", "=", "len", "(", "sm", ")", "pos", "=", "offset", "while", "pos", "<", "self", ".", "file_length", "-", "sml", ":", "f", ".", "seek", "(", "pos", ")", "data", "=", "f", ".", "read", "(", "self", ".", "FORWARD_WINDOW_SIZE", ")", "sync_offset", "=", "data", ".", "find", "(", "sm", ")", "if", "sync_offset", ">", "-", "1", ":", "f", ".", "seek", "(", "pos", "+", "sync_offset", ")", "self", ".", "_block_count", "=", "0", "return", "pos", "+=", "len", "(", "data", ")" ]
Search for a sync point after offset and align just after that.
[ "Search", "for", "a", "sync", "point", "after", "offset", "and", "align", "just", "after", "that", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/avrolib.py#L77-L98
7,952
crs4/pydoop
pydoop/avrolib.py
AvroReader.get_progress
def get_progress(self): """ Give a rough estimate of the progress done. """ pos = self.reader.reader.tell() return min((pos - self.region_start) / float(self.region_end - self.region_start), 1.0)
python
def get_progress(self): """ Give a rough estimate of the progress done. """ pos = self.reader.reader.tell() return min((pos - self.region_start) / float(self.region_end - self.region_start), 1.0)
[ "def", "get_progress", "(", "self", ")", ":", "pos", "=", "self", ".", "reader", ".", "reader", ".", "tell", "(", ")", "return", "min", "(", "(", "pos", "-", "self", ".", "region_start", ")", "/", "float", "(", "self", ".", "region_end", "-", "self", ".", "region_start", ")", ",", "1.0", ")" ]
Give a rough estimate of the progress done.
[ "Give", "a", "rough", "estimate", "of", "the", "progress", "done", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/avrolib.py#L124-L131
7,953
crs4/pydoop
pydoop/hadoop_utils.py
is_exe
def is_exe(fpath): """ Path references an executable file. """ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
python
def is_exe(fpath): """ Path references an executable file. """ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
[ "def", "is_exe", "(", "fpath", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "fpath", ")", "and", "os", ".", "access", "(", "fpath", ",", "os", ".", "X_OK", ")" ]
Path references an executable file.
[ "Path", "references", "an", "executable", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L265-L269
7,954
crs4/pydoop
pydoop/hadoop_utils.py
is_readable
def is_readable(fpath): """ Path references a readable file. """ return os.path.isfile(fpath) and os.access(fpath, os.R_OK)
python
def is_readable(fpath): """ Path references a readable file. """ return os.path.isfile(fpath) and os.access(fpath, os.R_OK)
[ "def", "is_readable", "(", "fpath", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "fpath", ")", "and", "os", ".", "access", "(", "fpath", ",", "os", ".", "R_OK", ")" ]
Path references a readable file.
[ "Path", "references", "a", "readable", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L272-L276
7,955
crs4/pydoop
pydoop/hadoop_utils.py
PathFinder.is_local
def is_local(self, hadoop_conf=None, hadoop_home=None): """\ Is Hadoop configured to run in local mode? By default, it is. [pseudo-]distributed mode must be explicitly configured. """ conf = self.hadoop_params(hadoop_conf, hadoop_home) keys = ('mapreduce.framework.name', 'mapreduce.jobtracker.address', 'mapred.job.tracker') for k in keys: if conf.get(k, 'local').lower() != 'local': return False return True
python
def is_local(self, hadoop_conf=None, hadoop_home=None): """\ Is Hadoop configured to run in local mode? By default, it is. [pseudo-]distributed mode must be explicitly configured. """ conf = self.hadoop_params(hadoop_conf, hadoop_home) keys = ('mapreduce.framework.name', 'mapreduce.jobtracker.address', 'mapred.job.tracker') for k in keys: if conf.get(k, 'local').lower() != 'local': return False return True
[ "def", "is_local", "(", "self", ",", "hadoop_conf", "=", "None", ",", "hadoop_home", "=", "None", ")", ":", "conf", "=", "self", ".", "hadoop_params", "(", "hadoop_conf", ",", "hadoop_home", ")", "keys", "=", "(", "'mapreduce.framework.name'", ",", "'mapreduce.jobtracker.address'", ",", "'mapred.job.tracker'", ")", "for", "k", "in", "keys", ":", "if", "conf", ".", "get", "(", "k", ",", "'local'", ")", ".", "lower", "(", ")", "!=", "'local'", ":", "return", "False", "return", "True" ]
\ Is Hadoop configured to run in local mode? By default, it is. [pseudo-]distributed mode must be explicitly configured.
[ "\\", "Is", "Hadoop", "configured", "to", "run", "in", "local", "mode?" ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L562-L576
7,956
crs4/pydoop
pydoop/hdfs/path.py
abspath
def abspath(hdfs_path, user=None, local=False): """ Return an absolute path for ``hdfs_path``. The ``user`` arg is passed to :func:`split`. The ``local`` argument forces ``hdfs_path`` to be interpreted as an ordinary local path: .. code-block:: python >>> import os >>> os.chdir('/tmp') >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('file:/tmp') 'file:/tmp' >>> hpath.abspath('file:/tmp', local=True) 'file:/tmp/file:/tmp' Note that this function always return a full URI: .. code-block:: python >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('/tmp') 'hdfs://localhost:9000/tmp' """ if local: return 'file:%s' % os.path.abspath(hdfs_path) if isfull(hdfs_path): return hdfs_path hostname, port, path = split(hdfs_path, user=user) if hostname: fs = hdfs_fs.hdfs(hostname, port) apath = join("hdfs://%s:%s" % (fs.host, fs.port), path) fs.close() else: apath = "file:%s" % os.path.abspath(path) return apath
python
def abspath(hdfs_path, user=None, local=False): """ Return an absolute path for ``hdfs_path``. The ``user`` arg is passed to :func:`split`. The ``local`` argument forces ``hdfs_path`` to be interpreted as an ordinary local path: .. code-block:: python >>> import os >>> os.chdir('/tmp') >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('file:/tmp') 'file:/tmp' >>> hpath.abspath('file:/tmp', local=True) 'file:/tmp/file:/tmp' Note that this function always return a full URI: .. code-block:: python >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('/tmp') 'hdfs://localhost:9000/tmp' """ if local: return 'file:%s' % os.path.abspath(hdfs_path) if isfull(hdfs_path): return hdfs_path hostname, port, path = split(hdfs_path, user=user) if hostname: fs = hdfs_fs.hdfs(hostname, port) apath = join("hdfs://%s:%s" % (fs.host, fs.port), path) fs.close() else: apath = "file:%s" % os.path.abspath(path) return apath
[ "def", "abspath", "(", "hdfs_path", ",", "user", "=", "None", ",", "local", "=", "False", ")", ":", "if", "local", ":", "return", "'file:%s'", "%", "os", ".", "path", ".", "abspath", "(", "hdfs_path", ")", "if", "isfull", "(", "hdfs_path", ")", ":", "return", "hdfs_path", "hostname", ",", "port", ",", "path", "=", "split", "(", "hdfs_path", ",", "user", "=", "user", ")", "if", "hostname", ":", "fs", "=", "hdfs_fs", ".", "hdfs", "(", "hostname", ",", "port", ")", "apath", "=", "join", "(", "\"hdfs://%s:%s\"", "%", "(", "fs", ".", "host", ",", "fs", ".", "port", ")", ",", "path", ")", "fs", ".", "close", "(", ")", "else", ":", "apath", "=", "\"file:%s\"", "%", "os", ".", "path", ".", "abspath", "(", "path", ")", "return", "apath" ]
Return an absolute path for ``hdfs_path``. The ``user`` arg is passed to :func:`split`. The ``local`` argument forces ``hdfs_path`` to be interpreted as an ordinary local path: .. code-block:: python >>> import os >>> os.chdir('/tmp') >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('file:/tmp') 'file:/tmp' >>> hpath.abspath('file:/tmp', local=True) 'file:/tmp/file:/tmp' Note that this function always return a full URI: .. code-block:: python >>> import pydoop.hdfs.path as hpath >>> hpath.abspath('/tmp') 'hdfs://localhost:9000/tmp'
[ "Return", "an", "absolute", "path", "for", "hdfs_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L242-L278
7,957
crs4/pydoop
pydoop/hdfs/path.py
dirname
def dirname(hdfs_path): """ Return the directory component of ``hdfs_path``. """ scheme, netloc, path = parse(hdfs_path) return unparse(scheme, netloc, os.path.dirname(path))
python
def dirname(hdfs_path): """ Return the directory component of ``hdfs_path``. """ scheme, netloc, path = parse(hdfs_path) return unparse(scheme, netloc, os.path.dirname(path))
[ "def", "dirname", "(", "hdfs_path", ")", ":", "scheme", ",", "netloc", ",", "path", "=", "parse", "(", "hdfs_path", ")", "return", "unparse", "(", "scheme", ",", "netloc", ",", "os", ".", "path", ".", "dirname", "(", "path", ")", ")" ]
Return the directory component of ``hdfs_path``.
[ "Return", "the", "directory", "component", "of", "hdfs_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L296-L301
7,958
crs4/pydoop
pydoop/hdfs/path.py
expanduser
def expanduser(path): """ Replace initial ``~`` or ``~user`` with the user's home directory. **NOTE:** if the default file system is HDFS, the ``~user`` form is expanded regardless of the user's existence. """ if hdfs_fs.default_is_local(): return os.path.expanduser(path) m = re.match(r'^~([^/]*)', path) if m is None: return path user = m.groups()[0] or common.DEFAULT_USER return '/user/%s%s' % (user, path[m.end(1):])
python
def expanduser(path): """ Replace initial ``~`` or ``~user`` with the user's home directory. **NOTE:** if the default file system is HDFS, the ``~user`` form is expanded regardless of the user's existence. """ if hdfs_fs.default_is_local(): return os.path.expanduser(path) m = re.match(r'^~([^/]*)', path) if m is None: return path user = m.groups()[0] or common.DEFAULT_USER return '/user/%s%s' % (user, path[m.end(1):])
[ "def", "expanduser", "(", "path", ")", ":", "if", "hdfs_fs", ".", "default_is_local", "(", ")", ":", "return", "os", ".", "path", ".", "expanduser", "(", "path", ")", "m", "=", "re", ".", "match", "(", "r'^~([^/]*)'", ",", "path", ")", "if", "m", "is", "None", ":", "return", "path", "user", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "or", "common", ".", "DEFAULT_USER", "return", "'/user/%s%s'", "%", "(", "user", ",", "path", "[", "m", ".", "end", "(", "1", ")", ":", "]", ")" ]
Replace initial ``~`` or ``~user`` with the user's home directory. **NOTE:** if the default file system is HDFS, the ``~user`` form is expanded regardless of the user's existence.
[ "Replace", "initial", "~", "or", "~user", "with", "the", "user", "s", "home", "directory", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L355-L368
7,959
crs4/pydoop
pydoop/hdfs/path.py
normpath
def normpath(path): """ Normalize ``path``, collapsing redundant separators and up-level refs. """ scheme, netloc, path_ = parse(path) return unparse(scheme, netloc, os.path.normpath(path_))
python
def normpath(path): """ Normalize ``path``, collapsing redundant separators and up-level refs. """ scheme, netloc, path_ = parse(path) return unparse(scheme, netloc, os.path.normpath(path_))
[ "def", "normpath", "(", "path", ")", ":", "scheme", ",", "netloc", ",", "path_", "=", "parse", "(", "path", ")", "return", "unparse", "(", "scheme", ",", "netloc", ",", "os", ".", "path", ".", "normpath", "(", "path_", ")", ")" ]
Normalize ``path``, collapsing redundant separators and up-level refs.
[ "Normalize", "path", "collapsing", "redundant", "separators", "and", "up", "-", "level", "refs", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L480-L485
7,960
crs4/pydoop
pydoop/hdfs/path.py
realpath
def realpath(path): """ Return ``path`` with symlinks resolved. Currently this function returns non-local paths unchanged. """ scheme, netloc, path_ = parse(path) if scheme == 'file' or hdfs_fs.default_is_local(): return unparse(scheme, netloc, os.path.realpath(path_)) return path
python
def realpath(path): """ Return ``path`` with symlinks resolved. Currently this function returns non-local paths unchanged. """ scheme, netloc, path_ = parse(path) if scheme == 'file' or hdfs_fs.default_is_local(): return unparse(scheme, netloc, os.path.realpath(path_)) return path
[ "def", "realpath", "(", "path", ")", ":", "scheme", ",", "netloc", ",", "path_", "=", "parse", "(", "path", ")", "if", "scheme", "==", "'file'", "or", "hdfs_fs", ".", "default_is_local", "(", ")", ":", "return", "unparse", "(", "scheme", ",", "netloc", ",", "os", ".", "path", ".", "realpath", "(", "path_", ")", ")", "return", "path" ]
Return ``path`` with symlinks resolved. Currently this function returns non-local paths unchanged.
[ "Return", "path", "with", "symlinks", "resolved", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L488-L497
7,961
crs4/pydoop
pydoop/hdfs/fs.py
default_is_local
def default_is_local(hadoop_conf=None, hadoop_home=None): """\ Is Hadoop configured to use the local file system? By default, it is. A DFS must be explicitly configured. """ params = pydoop.hadoop_params(hadoop_conf, hadoop_home) for k in 'fs.defaultFS', 'fs.default.name': if not params.get(k, 'file:').startswith('file:'): return False return True
python
def default_is_local(hadoop_conf=None, hadoop_home=None): """\ Is Hadoop configured to use the local file system? By default, it is. A DFS must be explicitly configured. """ params = pydoop.hadoop_params(hadoop_conf, hadoop_home) for k in 'fs.defaultFS', 'fs.default.name': if not params.get(k, 'file:').startswith('file:'): return False return True
[ "def", "default_is_local", "(", "hadoop_conf", "=", "None", ",", "hadoop_home", "=", "None", ")", ":", "params", "=", "pydoop", ".", "hadoop_params", "(", "hadoop_conf", ",", "hadoop_home", ")", "for", "k", "in", "'fs.defaultFS'", ",", "'fs.default.name'", ":", "if", "not", "params", ".", "get", "(", "k", ",", "'file:'", ")", ".", "startswith", "(", "'file:'", ")", ":", "return", "False", "return", "True" ]
\ Is Hadoop configured to use the local file system? By default, it is. A DFS must be explicitly configured.
[ "\\", "Is", "Hadoop", "configured", "to", "use", "the", "local", "file", "system?" ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L93-L103
7,962
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.open_file
def open_file(self, path, mode="r", buff_size=0, replication=0, blocksize=0, encoding=None, errors=None): """ Open an HDFS file. Supported opening modes are "r", "w", "a". In addition, a trailing "t" can be added to specify text mode (e.g., "rt" = open for reading text). Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want to use the "configured" values, i.e., the ones set in the Hadoop configuration files. :type path: str :param path: the full path to the file :type mode: str :param mode: opening mode :type buff_size: int :param buff_size: read/write buffer size in bytes :type replication: int :param replication: HDFS block replication :type blocksize: int :param blocksize: HDFS block size :rtpye: :class:`~.file.hdfs_file` :return: handle to the open file """ _complain_ifclosed(self.closed) if not path: raise ValueError("Empty path") m, is_text = common.parse_mode(mode) if not self.host: fret = local_file(self, path, m) if is_text: cls = io.BufferedReader if m == "r" else io.BufferedWriter fret = TextIOWrapper(cls(fret), encoding, errors) return fret f = self.fs.open_file(path, m, buff_size, replication, blocksize) cls = FileIO if is_text else hdfs_file fret = cls(f, self, mode) return fret
python
def open_file(self, path, mode="r", buff_size=0, replication=0, blocksize=0, encoding=None, errors=None): """ Open an HDFS file. Supported opening modes are "r", "w", "a". In addition, a trailing "t" can be added to specify text mode (e.g., "rt" = open for reading text). Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want to use the "configured" values, i.e., the ones set in the Hadoop configuration files. :type path: str :param path: the full path to the file :type mode: str :param mode: opening mode :type buff_size: int :param buff_size: read/write buffer size in bytes :type replication: int :param replication: HDFS block replication :type blocksize: int :param blocksize: HDFS block size :rtpye: :class:`~.file.hdfs_file` :return: handle to the open file """ _complain_ifclosed(self.closed) if not path: raise ValueError("Empty path") m, is_text = common.parse_mode(mode) if not self.host: fret = local_file(self, path, m) if is_text: cls = io.BufferedReader if m == "r" else io.BufferedWriter fret = TextIOWrapper(cls(fret), encoding, errors) return fret f = self.fs.open_file(path, m, buff_size, replication, blocksize) cls = FileIO if is_text else hdfs_file fret = cls(f, self, mode) return fret
[ "def", "open_file", "(", "self", ",", "path", ",", "mode", "=", "\"r\"", ",", "buff_size", "=", "0", ",", "replication", "=", "0", ",", "blocksize", "=", "0", ",", "encoding", "=", "None", ",", "errors", "=", "None", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "not", "path", ":", "raise", "ValueError", "(", "\"Empty path\"", ")", "m", ",", "is_text", "=", "common", ".", "parse_mode", "(", "mode", ")", "if", "not", "self", ".", "host", ":", "fret", "=", "local_file", "(", "self", ",", "path", ",", "m", ")", "if", "is_text", ":", "cls", "=", "io", ".", "BufferedReader", "if", "m", "==", "\"r\"", "else", "io", ".", "BufferedWriter", "fret", "=", "TextIOWrapper", "(", "cls", "(", "fret", ")", ",", "encoding", ",", "errors", ")", "return", "fret", "f", "=", "self", ".", "fs", ".", "open_file", "(", "path", ",", "m", ",", "buff_size", ",", "replication", ",", "blocksize", ")", "cls", "=", "FileIO", "if", "is_text", "else", "hdfs_file", "fret", "=", "cls", "(", "f", ",", "self", ",", "mode", ")", "return", "fret" ]
Open an HDFS file. Supported opening modes are "r", "w", "a". In addition, a trailing "t" can be added to specify text mode (e.g., "rt" = open for reading text). Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want to use the "configured" values, i.e., the ones set in the Hadoop configuration files. :type path: str :param path: the full path to the file :type mode: str :param mode: opening mode :type buff_size: int :param buff_size: read/write buffer size in bytes :type replication: int :param replication: HDFS block replication :type blocksize: int :param blocksize: HDFS block size :rtpye: :class:`~.file.hdfs_file` :return: handle to the open file
[ "Open", "an", "HDFS", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L235-L280
7,963
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.capacity
def capacity(self): """ Return the raw capacity of the filesystem. :rtype: int :return: filesystem capacity """ _complain_ifclosed(self.closed) if not self.__status.host: raise RuntimeError('Capacity is not defined for a local fs') return self.fs.get_capacity()
python
def capacity(self): """ Return the raw capacity of the filesystem. :rtype: int :return: filesystem capacity """ _complain_ifclosed(self.closed) if not self.__status.host: raise RuntimeError('Capacity is not defined for a local fs') return self.fs.get_capacity()
[ "def", "capacity", "(", "self", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "not", "self", ".", "__status", ".", "host", ":", "raise", "RuntimeError", "(", "'Capacity is not defined for a local fs'", ")", "return", "self", ".", "fs", ".", "get_capacity", "(", ")" ]
Return the raw capacity of the filesystem. :rtype: int :return: filesystem capacity
[ "Return", "the", "raw", "capacity", "of", "the", "filesystem", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L282-L292
7,964
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.copy
def copy(self, from_path, to_hdfs, to_path): """ Copy file from one filesystem to another. :type from_path: str :param from_path: the path of the source file :type to_hdfs: :class:`hdfs` :param to_hdfs: destination filesystem :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) if isinstance(to_hdfs, self.__class__): to_hdfs = to_hdfs.fs return self.fs.copy(from_path, to_hdfs, to_path)
python
def copy(self, from_path, to_hdfs, to_path): """ Copy file from one filesystem to another. :type from_path: str :param from_path: the path of the source file :type to_hdfs: :class:`hdfs` :param to_hdfs: destination filesystem :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) if isinstance(to_hdfs, self.__class__): to_hdfs = to_hdfs.fs return self.fs.copy(from_path, to_hdfs, to_path)
[ "def", "copy", "(", "self", ",", "from_path", ",", "to_hdfs", ",", "to_path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "isinstance", "(", "to_hdfs", ",", "self", ".", "__class__", ")", ":", "to_hdfs", "=", "to_hdfs", ".", "fs", "return", "self", ".", "fs", ".", "copy", "(", "from_path", ",", "to_hdfs", ",", "to_path", ")" ]
Copy file from one filesystem to another. :type from_path: str :param from_path: the path of the source file :type to_hdfs: :class:`hdfs` :param to_hdfs: destination filesystem :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError`
[ "Copy", "file", "from", "one", "filesystem", "to", "another", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L294-L309
7,965
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.delete
def delete(self, path, recursive=True): """ Delete ``path``. :type path: str :param path: the path of the file or directory :type recursive: bool :param recursive: if ``path`` is a directory, delete it recursively when :obj:`True` :raises: :exc:`~exceptions.IOError` when ``recursive`` is :obj:`False` and directory is non-empty """ _complain_ifclosed(self.closed) return self.fs.delete(path, recursive)
python
def delete(self, path, recursive=True): """ Delete ``path``. :type path: str :param path: the path of the file or directory :type recursive: bool :param recursive: if ``path`` is a directory, delete it recursively when :obj:`True` :raises: :exc:`~exceptions.IOError` when ``recursive`` is :obj:`False` and directory is non-empty """ _complain_ifclosed(self.closed) return self.fs.delete(path, recursive)
[ "def", "delete", "(", "self", ",", "path", ",", "recursive", "=", "True", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "delete", "(", "path", ",", "recursive", ")" ]
Delete ``path``. :type path: str :param path: the path of the file or directory :type recursive: bool :param recursive: if ``path`` is a directory, delete it recursively when :obj:`True` :raises: :exc:`~exceptions.IOError` when ``recursive`` is :obj:`False` and directory is non-empty
[ "Delete", "path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L333-L346
7,966
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.exists
def exists(self, path): """ Check if a given path exists on the filesystem. :type path: str :param path: the path to look for :rtype: bool :return: :obj:`True` if ``path`` exists """ _complain_ifclosed(self.closed) return self.fs.exists(path)
python
def exists(self, path): """ Check if a given path exists on the filesystem. :type path: str :param path: the path to look for :rtype: bool :return: :obj:`True` if ``path`` exists """ _complain_ifclosed(self.closed) return self.fs.exists(path)
[ "def", "exists", "(", "self", ",", "path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "exists", "(", "path", ")" ]
Check if a given path exists on the filesystem. :type path: str :param path: the path to look for :rtype: bool :return: :obj:`True` if ``path`` exists
[ "Check", "if", "a", "given", "path", "exists", "on", "the", "filesystem", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L348-L358
7,967
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.get_path_info
def get_path_info(self, path): """ Get information about ``path`` as a dict of properties. The return value, based upon ``fs.FileStatus`` from the Java API, has the following fields: * ``block_size``: HDFS block size of ``path`` * ``group``: group associated with ``path`` * ``kind``: ``'file'`` or ``'directory'`` * ``last_access``: last access time of ``path`` * ``last_mod``: last modification time of ``path`` * ``name``: fully qualified path name * ``owner``: owner of ``path`` * ``permissions``: file system permissions associated with ``path`` * ``replication``: replication factor of ``path`` * ``size``: size in bytes of ``path`` :type path: str :param path: a path in the filesystem :rtype: dict :return: path information :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.get_path_info(path)
python
def get_path_info(self, path): """ Get information about ``path`` as a dict of properties. The return value, based upon ``fs.FileStatus`` from the Java API, has the following fields: * ``block_size``: HDFS block size of ``path`` * ``group``: group associated with ``path`` * ``kind``: ``'file'`` or ``'directory'`` * ``last_access``: last access time of ``path`` * ``last_mod``: last modification time of ``path`` * ``name``: fully qualified path name * ``owner``: owner of ``path`` * ``permissions``: file system permissions associated with ``path`` * ``replication``: replication factor of ``path`` * ``size``: size in bytes of ``path`` :type path: str :param path: a path in the filesystem :rtype: dict :return: path information :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.get_path_info(path)
[ "def", "get_path_info", "(", "self", ",", "path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "get_path_info", "(", "path", ")" ]
Get information about ``path`` as a dict of properties. The return value, based upon ``fs.FileStatus`` from the Java API, has the following fields: * ``block_size``: HDFS block size of ``path`` * ``group``: group associated with ``path`` * ``kind``: ``'file'`` or ``'directory'`` * ``last_access``: last access time of ``path`` * ``last_mod``: last modification time of ``path`` * ``name``: fully qualified path name * ``owner``: owner of ``path`` * ``permissions``: file system permissions associated with ``path`` * ``replication``: replication factor of ``path`` * ``size``: size in bytes of ``path`` :type path: str :param path: a path in the filesystem :rtype: dict :return: path information :raises: :exc:`~exceptions.IOError`
[ "Get", "information", "about", "path", "as", "a", "dict", "of", "properties", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L378-L403
7,968
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.list_directory
def list_directory(self, path): r""" Get list of files and directories for ``path``\ . :type path: str :param path: the path of the directory :rtype: list :return: list of files and directories in ``path`` :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.list_directory(path)
python
def list_directory(self, path): r""" Get list of files and directories for ``path``\ . :type path: str :param path: the path of the directory :rtype: list :return: list of files and directories in ``path`` :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.list_directory(path)
[ "def", "list_directory", "(", "self", ",", "path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "list_directory", "(", "path", ")" ]
r""" Get list of files and directories for ``path``\ . :type path: str :param path: the path of the directory :rtype: list :return: list of files and directories in ``path`` :raises: :exc:`~exceptions.IOError`
[ "r", "Get", "list", "of", "files", "and", "directories", "for", "path", "\\", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L405-L416
7,969
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.rename
def rename(self, from_path, to_path): """ Rename file. :type from_path: str :param from_path: the path of the source file :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.rename(from_path, to_path)
python
def rename(self, from_path, to_path): """ Rename file. :type from_path: str :param from_path: the path of the source file :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.rename(from_path, to_path)
[ "def", "rename", "(", "self", ",", "from_path", ",", "to_path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "rename", "(", "from_path", ",", "to_path", ")" ]
Rename file. :type from_path: str :param from_path: the path of the source file :type to_path: str :param to_path: the path of the destination file :raises: :exc:`~exceptions.IOError`
[ "Rename", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L435-L446
7,970
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.set_replication
def set_replication(self, path, replication): r""" Set the replication of ``path`` to ``replication``\ . :type path: str :param path: the path of the file :type replication: int :param replication: the replication value :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.set_replication(path, replication)
python
def set_replication(self, path, replication): r""" Set the replication of ``path`` to ``replication``\ . :type path: str :param path: the path of the file :type replication: int :param replication: the replication value :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.set_replication(path, replication)
[ "def", "set_replication", "(", "self", ",", "path", ",", "replication", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "set_replication", "(", "path", ",", "replication", ")" ]
r""" Set the replication of ``path`` to ``replication``\ . :type path: str :param path: the path of the file :type replication: int :param replication: the replication value :raises: :exc:`~exceptions.IOError`
[ "r", "Set", "the", "replication", "of", "path", "to", "replication", "\\", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L448-L459
7,971
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.set_working_directory
def set_working_directory(self, path): r""" Set the working directory to ``path``\ . All relative paths will be resolved relative to it. :type path: str :param path: the path of the directory :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.set_working_directory(path)
python
def set_working_directory(self, path): r""" Set the working directory to ``path``\ . All relative paths will be resolved relative to it. :type path: str :param path: the path of the directory :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.set_working_directory(path)
[ "def", "set_working_directory", "(", "self", ",", "path", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "set_working_directory", "(", "path", ")" ]
r""" Set the working directory to ``path``\ . All relative paths will be resolved relative to it. :type path: str :param path: the path of the directory :raises: :exc:`~exceptions.IOError`
[ "r", "Set", "the", "working", "directory", "to", "path", "\\", ".", "All", "relative", "paths", "will", "be", "resolved", "relative", "to", "it", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L461-L471
7,972
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.working_directory
def working_directory(self): """ Get the current working directory. :rtype: str :return: current working directory """ _complain_ifclosed(self.closed) wd = self.fs.get_working_directory() return wd
python
def working_directory(self): """ Get the current working directory. :rtype: str :return: current working directory """ _complain_ifclosed(self.closed) wd = self.fs.get_working_directory() return wd
[ "def", "working_directory", "(", "self", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "wd", "=", "self", ".", "fs", ".", "get_working_directory", "(", ")", "return", "wd" ]
Get the current working directory. :rtype: str :return: current working directory
[ "Get", "the", "current", "working", "directory", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L483-L492
7,973
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.__compute_mode_from_string
def __compute_mode_from_string(self, path, mode_string): """ Scan a unix-style mode string and apply it to ``path``. :type mode_string: str :param mode_string: see ``man chmod`` for details. ``X``, ``s`` and ``t`` modes are not supported. The string should match the following regular expression: ``[ugoa]*[-+=]([rwx]*)``. :rtype: int :return: a new mode integer resulting from applying ``mode_string`` to ``path``. :raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid. """ Char_to_perm_byte = {'r': 4, 'w': 2, 'x': 1} Fields = (('u', 6), ('g', 3), ('o', 0)) # -- m = re.match(r"\s*([ugoa]*)([-+=])([rwx]*)\s*", mode_string) if not m: raise ValueError("Invalid mode string %s" % mode_string) who = m.group(1) what_op = m.group(2) which_perm = m.group(3) # -- old_mode = self.fs.get_path_info(path)['permissions'] # The mode to be applied by the operation, repeated three # times in a list, for user, group, and other respectively. # Initially these are identical, but some may change if we # have to respect the umask setting. op_perm = [ reduce(ops.ior, [Char_to_perm_byte[c] for c in which_perm]) ] * 3 if 'a' in who: who = 'ugo' elif who == '': who = 'ugo' # erase the umask bits inverted_umask = ~self.__get_umask() for i, field in enumerate(Fields): op_perm[i] &= (inverted_umask >> field[1]) & 0x7 # for each user, compute the permission bit and set it in the mode new_mode = 0 for i, tpl in enumerate(Fields): field, shift = tpl # shift by the bits specified for the field; keep only the # 3 lowest bits old = (old_mode >> shift) & 0x7 if field in who: if what_op == '-': new = old & ~op_perm[i] elif what_op == '=': new = op_perm[i] elif what_op == '+': new = old | op_perm[i] else: raise RuntimeError( "unexpected permission operation %s" % what_op ) else: # copy the previous permissions new = old new_mode |= new << shift return new_mode
python
def __compute_mode_from_string(self, path, mode_string): """ Scan a unix-style mode string and apply it to ``path``. :type mode_string: str :param mode_string: see ``man chmod`` for details. ``X``, ``s`` and ``t`` modes are not supported. The string should match the following regular expression: ``[ugoa]*[-+=]([rwx]*)``. :rtype: int :return: a new mode integer resulting from applying ``mode_string`` to ``path``. :raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid. """ Char_to_perm_byte = {'r': 4, 'w': 2, 'x': 1} Fields = (('u', 6), ('g', 3), ('o', 0)) # -- m = re.match(r"\s*([ugoa]*)([-+=])([rwx]*)\s*", mode_string) if not m: raise ValueError("Invalid mode string %s" % mode_string) who = m.group(1) what_op = m.group(2) which_perm = m.group(3) # -- old_mode = self.fs.get_path_info(path)['permissions'] # The mode to be applied by the operation, repeated three # times in a list, for user, group, and other respectively. # Initially these are identical, but some may change if we # have to respect the umask setting. op_perm = [ reduce(ops.ior, [Char_to_perm_byte[c] for c in which_perm]) ] * 3 if 'a' in who: who = 'ugo' elif who == '': who = 'ugo' # erase the umask bits inverted_umask = ~self.__get_umask() for i, field in enumerate(Fields): op_perm[i] &= (inverted_umask >> field[1]) & 0x7 # for each user, compute the permission bit and set it in the mode new_mode = 0 for i, tpl in enumerate(Fields): field, shift = tpl # shift by the bits specified for the field; keep only the # 3 lowest bits old = (old_mode >> shift) & 0x7 if field in who: if what_op == '-': new = old & ~op_perm[i] elif what_op == '=': new = op_perm[i] elif what_op == '+': new = old | op_perm[i] else: raise RuntimeError( "unexpected permission operation %s" % what_op ) else: # copy the previous permissions new = old new_mode |= new << shift return new_mode
[ "def", "__compute_mode_from_string", "(", "self", ",", "path", ",", "mode_string", ")", ":", "Char_to_perm_byte", "=", "{", "'r'", ":", "4", ",", "'w'", ":", "2", ",", "'x'", ":", "1", "}", "Fields", "=", "(", "(", "'u'", ",", "6", ")", ",", "(", "'g'", ",", "3", ")", ",", "(", "'o'", ",", "0", ")", ")", "# --", "m", "=", "re", ".", "match", "(", "r\"\\s*([ugoa]*)([-+=])([rwx]*)\\s*\"", ",", "mode_string", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "\"Invalid mode string %s\"", "%", "mode_string", ")", "who", "=", "m", ".", "group", "(", "1", ")", "what_op", "=", "m", ".", "group", "(", "2", ")", "which_perm", "=", "m", ".", "group", "(", "3", ")", "# --", "old_mode", "=", "self", ".", "fs", ".", "get_path_info", "(", "path", ")", "[", "'permissions'", "]", "# The mode to be applied by the operation, repeated three", "# times in a list, for user, group, and other respectively.", "# Initially these are identical, but some may change if we", "# have to respect the umask setting.", "op_perm", "=", "[", "reduce", "(", "ops", ".", "ior", ",", "[", "Char_to_perm_byte", "[", "c", "]", "for", "c", "in", "which_perm", "]", ")", "]", "*", "3", "if", "'a'", "in", "who", ":", "who", "=", "'ugo'", "elif", "who", "==", "''", ":", "who", "=", "'ugo'", "# erase the umask bits", "inverted_umask", "=", "~", "self", ".", "__get_umask", "(", ")", "for", "i", ",", "field", "in", "enumerate", "(", "Fields", ")", ":", "op_perm", "[", "i", "]", "&=", "(", "inverted_umask", ">>", "field", "[", "1", "]", ")", "&", "0x7", "# for each user, compute the permission bit and set it in the mode", "new_mode", "=", "0", "for", "i", ",", "tpl", "in", "enumerate", "(", "Fields", ")", ":", "field", ",", "shift", "=", "tpl", "# shift by the bits specified for the field; keep only the", "# 3 lowest bits", "old", "=", "(", "old_mode", ">>", "shift", ")", "&", "0x7", "if", "field", "in", "who", ":", "if", "what_op", "==", "'-'", ":", "new", "=", "old", "&", "~", "op_perm", "[", "i", "]", "elif", "what_op", "==", "'='", ":", "new", "=", "op_perm", "[", "i", "]", "elif", "what_op", "==", "'+'", ":", "new", "=", "old", "|", "op_perm", "[", "i", "]", "else", ":", "raise", "RuntimeError", "(", "\"unexpected permission operation %s\"", "%", "what_op", ")", "else", ":", "# copy the previous permissions", "new", "=", "old", "new_mode", "|=", "new", "<<", "shift", "return", "new_mode" ]
Scan a unix-style mode string and apply it to ``path``. :type mode_string: str :param mode_string: see ``man chmod`` for details. ``X``, ``s`` and ``t`` modes are not supported. The string should match the following regular expression: ``[ugoa]*[-+=]([rwx]*)``. :rtype: int :return: a new mode integer resulting from applying ``mode_string`` to ``path``. :raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid.
[ "Scan", "a", "unix", "-", "style", "mode", "string", "and", "apply", "it", "to", "path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L515-L576
7,974
crs4/pydoop
pydoop/hdfs/fs.py
hdfs.utime
def utime(self, path, mtime, atime): """ Change file last access and modification times. :type path: str :param path: the path to the file or directory :type mtime: int :param mtime: new modification time in seconds :type atime: int :param atime: new access time in seconds :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.utime(path, int(mtime), int(atime))
python
def utime(self, path, mtime, atime): """ Change file last access and modification times. :type path: str :param path: the path to the file or directory :type mtime: int :param mtime: new modification time in seconds :type atime: int :param atime: new access time in seconds :raises: :exc:`~exceptions.IOError` """ _complain_ifclosed(self.closed) return self.fs.utime(path, int(mtime), int(atime))
[ "def", "utime", "(", "self", ",", "path", ",", "mtime", ",", "atime", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "fs", ".", "utime", "(", "path", ",", "int", "(", "mtime", ")", ",", "int", "(", "atime", ")", ")" ]
Change file last access and modification times. :type path: str :param path: the path to the file or directory :type mtime: int :param mtime: new modification time in seconds :type atime: int :param atime: new access time in seconds :raises: :exc:`~exceptions.IOError`
[ "Change", "file", "last", "access", "and", "modification", "times", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L595-L608
7,975
crs4/pydoop
setup.py
rm_rf
def rm_rf(path, dry_run=False): """ Remove a file or directory tree. Won't throw an exception, even if the removal fails. """ log.info("removing %s" % path) if dry_run: return try: if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) else: os.remove(path) except OSError: pass
python
def rm_rf(path, dry_run=False): """ Remove a file or directory tree. Won't throw an exception, even if the removal fails. """ log.info("removing %s" % path) if dry_run: return try: if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) else: os.remove(path) except OSError: pass
[ "def", "rm_rf", "(", "path", ",", "dry_run", "=", "False", ")", ":", "log", ".", "info", "(", "\"removing %s\"", "%", "path", ")", "if", "dry_run", ":", "return", "try", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "else", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", ":", "pass" ]
Remove a file or directory tree. Won't throw an exception, even if the removal fails.
[ "Remove", "a", "file", "or", "directory", "tree", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/setup.py#L93-L108
7,976
crs4/pydoop
setup.py
BuildPydoopExt.__finalize_hdfs
def __finalize_hdfs(self, ext): """\ Adds a few bits that depend on the specific environment. Delaying this until the build_ext phase allows non-build commands (e.g., sdist) to be run without java. """ java_home = jvm.get_java_home() jvm_lib_path, _ = jvm.get_jvm_lib_path_and_name(java_home) ext.include_dirs = jvm.get_include_dirs() + ext.include_dirs ext.libraries = jvm.get_libraries() ext.library_dirs = [os.path.join(java_home, "Libraries"), jvm_lib_path] ext.define_macros = jvm.get_macros() ext.extra_link_args = ['-Wl,-rpath,%s' % jvm_lib_path] if self.__have_better_tls(): ext.define_macros.append(("HAVE_BETTER_TLS", None)) try: # too many warnings in libhdfs self.compiler.compiler_so.remove("-Wsign-compare") except (AttributeError, ValueError): pass
python
def __finalize_hdfs(self, ext): """\ Adds a few bits that depend on the specific environment. Delaying this until the build_ext phase allows non-build commands (e.g., sdist) to be run without java. """ java_home = jvm.get_java_home() jvm_lib_path, _ = jvm.get_jvm_lib_path_and_name(java_home) ext.include_dirs = jvm.get_include_dirs() + ext.include_dirs ext.libraries = jvm.get_libraries() ext.library_dirs = [os.path.join(java_home, "Libraries"), jvm_lib_path] ext.define_macros = jvm.get_macros() ext.extra_link_args = ['-Wl,-rpath,%s' % jvm_lib_path] if self.__have_better_tls(): ext.define_macros.append(("HAVE_BETTER_TLS", None)) try: # too many warnings in libhdfs self.compiler.compiler_so.remove("-Wsign-compare") except (AttributeError, ValueError): pass
[ "def", "__finalize_hdfs", "(", "self", ",", "ext", ")", ":", "java_home", "=", "jvm", ".", "get_java_home", "(", ")", "jvm_lib_path", ",", "_", "=", "jvm", ".", "get_jvm_lib_path_and_name", "(", "java_home", ")", "ext", ".", "include_dirs", "=", "jvm", ".", "get_include_dirs", "(", ")", "+", "ext", ".", "include_dirs", "ext", ".", "libraries", "=", "jvm", ".", "get_libraries", "(", ")", "ext", ".", "library_dirs", "=", "[", "os", ".", "path", ".", "join", "(", "java_home", ",", "\"Libraries\"", ")", ",", "jvm_lib_path", "]", "ext", ".", "define_macros", "=", "jvm", ".", "get_macros", "(", ")", "ext", ".", "extra_link_args", "=", "[", "'-Wl,-rpath,%s'", "%", "jvm_lib_path", "]", "if", "self", ".", "__have_better_tls", "(", ")", ":", "ext", ".", "define_macros", ".", "append", "(", "(", "\"HAVE_BETTER_TLS\"", ",", "None", ")", ")", "try", ":", "# too many warnings in libhdfs", "self", ".", "compiler", ".", "compiler_so", ".", "remove", "(", "\"-Wsign-compare\"", ")", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "pass" ]
\ Adds a few bits that depend on the specific environment. Delaying this until the build_ext phase allows non-build commands (e.g., sdist) to be run without java.
[ "\\", "Adds", "a", "few", "bits", "that", "depend", "on", "the", "specific", "environment", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/setup.py#L286-L306
7,977
crs4/pydoop
pydoop/hadut.py
run_tool_cmd
def run_tool_cmd(tool, cmd, args=None, properties=None, hadoop_conf_dir=None, logger=None, keep_streams=True): """ Run a Hadoop command. If ``keep_streams`` is set to :obj:`True` (the default), the stdout and stderr of the command will be buffered in memory. If the command succeeds, the former will be returned; if it fails, a ``RunCmdError`` will be raised with the latter as the message. This mode is appropriate for short-running commands whose "result" is represented by their standard output (e.g., ``"dfsadmin", ["-safemode", "get"]``). If ``keep_streams`` is set to :obj:`False`, the command will write directly to the stdout and stderr of the calling process, and the return value will be empty. This mode is appropriate for long running commands that do not write their "real" output to stdout (such as pipes). .. code-block:: python >>> hadoop_classpath = run_cmd('classpath') """ if logger is None: logger = utils.NullLogger() _args = [tool] if hadoop_conf_dir: _args.extend(["--config", hadoop_conf_dir]) _args.append(cmd) if properties: _args.extend(_construct_property_args(properties)) if args: if isinstance(args, basestring): args = shlex.split(args) _merge_csv_args(args) gargs = _pop_generic_args(args) for seq in gargs, args: _args.extend(map(str, seq)) logger.debug('final args: %r', (_args,)) if keep_streams: p = subprocess.Popen( _args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) error = "" stderr_iterator = iter(p.stderr.readline, b"") for line in stderr_iterator: error += line logger.info("cmd stderr line: %s", line.strip()) output, _ = p.communicate() else: p = subprocess.Popen(_args, stdout=None, stderr=None, bufsize=1) ret = p.wait() error = 'command exited with %d status' % ret if ret else '' output = '' if p.returncode: raise RunCmdError(p.returncode, ' '.join(_args), error) return output
python
def run_tool_cmd(tool, cmd, args=None, properties=None, hadoop_conf_dir=None, logger=None, keep_streams=True): """ Run a Hadoop command. If ``keep_streams`` is set to :obj:`True` (the default), the stdout and stderr of the command will be buffered in memory. If the command succeeds, the former will be returned; if it fails, a ``RunCmdError`` will be raised with the latter as the message. This mode is appropriate for short-running commands whose "result" is represented by their standard output (e.g., ``"dfsadmin", ["-safemode", "get"]``). If ``keep_streams`` is set to :obj:`False`, the command will write directly to the stdout and stderr of the calling process, and the return value will be empty. This mode is appropriate for long running commands that do not write their "real" output to stdout (such as pipes). .. code-block:: python >>> hadoop_classpath = run_cmd('classpath') """ if logger is None: logger = utils.NullLogger() _args = [tool] if hadoop_conf_dir: _args.extend(["--config", hadoop_conf_dir]) _args.append(cmd) if properties: _args.extend(_construct_property_args(properties)) if args: if isinstance(args, basestring): args = shlex.split(args) _merge_csv_args(args) gargs = _pop_generic_args(args) for seq in gargs, args: _args.extend(map(str, seq)) logger.debug('final args: %r', (_args,)) if keep_streams: p = subprocess.Popen( _args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) error = "" stderr_iterator = iter(p.stderr.readline, b"") for line in stderr_iterator: error += line logger.info("cmd stderr line: %s", line.strip()) output, _ = p.communicate() else: p = subprocess.Popen(_args, stdout=None, stderr=None, bufsize=1) ret = p.wait() error = 'command exited with %d status' % ret if ret else '' output = '' if p.returncode: raise RunCmdError(p.returncode, ' '.join(_args), error) return output
[ "def", "run_tool_cmd", "(", "tool", ",", "cmd", ",", "args", "=", "None", ",", "properties", "=", "None", ",", "hadoop_conf_dir", "=", "None", ",", "logger", "=", "None", ",", "keep_streams", "=", "True", ")", ":", "if", "logger", "is", "None", ":", "logger", "=", "utils", ".", "NullLogger", "(", ")", "_args", "=", "[", "tool", "]", "if", "hadoop_conf_dir", ":", "_args", ".", "extend", "(", "[", "\"--config\"", ",", "hadoop_conf_dir", "]", ")", "_args", ".", "append", "(", "cmd", ")", "if", "properties", ":", "_args", ".", "extend", "(", "_construct_property_args", "(", "properties", ")", ")", "if", "args", ":", "if", "isinstance", "(", "args", ",", "basestring", ")", ":", "args", "=", "shlex", ".", "split", "(", "args", ")", "_merge_csv_args", "(", "args", ")", "gargs", "=", "_pop_generic_args", "(", "args", ")", "for", "seq", "in", "gargs", ",", "args", ":", "_args", ".", "extend", "(", "map", "(", "str", ",", "seq", ")", ")", "logger", ".", "debug", "(", "'final args: %r'", ",", "(", "_args", ",", ")", ")", "if", "keep_streams", ":", "p", "=", "subprocess", ".", "Popen", "(", "_args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "error", "=", "\"\"", "stderr_iterator", "=", "iter", "(", "p", ".", "stderr", ".", "readline", ",", "b\"\"", ")", "for", "line", "in", "stderr_iterator", ":", "error", "+=", "line", "logger", ".", "info", "(", "\"cmd stderr line: %s\"", ",", "line", ".", "strip", "(", ")", ")", "output", ",", "_", "=", "p", ".", "communicate", "(", ")", "else", ":", "p", "=", "subprocess", ".", "Popen", "(", "_args", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "bufsize", "=", "1", ")", "ret", "=", "p", ".", "wait", "(", ")", "error", "=", "'command exited with %d status'", "%", "ret", "if", "ret", "else", "''", "output", "=", "''", "if", "p", ".", "returncode", ":", "raise", "RunCmdError", "(", "p", ".", "returncode", ",", "' '", ".", "join", "(", "_args", ")", ",", "error", ")", "return", "output" ]
Run a Hadoop command. If ``keep_streams`` is set to :obj:`True` (the default), the stdout and stderr of the command will be buffered in memory. If the command succeeds, the former will be returned; if it fails, a ``RunCmdError`` will be raised with the latter as the message. This mode is appropriate for short-running commands whose "result" is represented by their standard output (e.g., ``"dfsadmin", ["-safemode", "get"]``). If ``keep_streams`` is set to :obj:`False`, the command will write directly to the stdout and stderr of the calling process, and the return value will be empty. This mode is appropriate for long running commands that do not write their "real" output to stdout (such as pipes). .. code-block:: python >>> hadoop_classpath = run_cmd('classpath')
[ "Run", "a", "Hadoop", "command", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L118-L175
7,978
crs4/pydoop
pydoop/hadut.py
get_task_trackers
def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False): """ Get the list of task trackers in the Hadoop cluster. Each element in the returned list is in the ``(host, port)`` format. All arguments are passed to :func:`run_class`. If ``offline`` is :obj:`True`, try getting the list of task trackers from the ``slaves`` file in Hadoop's configuration directory (no attempt is made to contact the Hadoop daemons). In this case, ports are set to 0. """ if offline: if not hadoop_conf_dir: hadoop_conf_dir = pydoop.hadoop_conf() slaves = os.path.join(hadoop_conf_dir, "slaves") try: with open(slaves) as f: task_trackers = [(l.strip(), 0) for l in f] except IOError: task_trackers = [] else: # run JobClient directly (avoids "hadoop job" deprecation) stdout = run_class( "org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"], properties=properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True ) task_trackers = [] for line in stdout.splitlines(): if not line: continue line = line.split(":") task_trackers.append((line[0].split("_")[1], int(line[-1]))) return task_trackers
python
def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False): """ Get the list of task trackers in the Hadoop cluster. Each element in the returned list is in the ``(host, port)`` format. All arguments are passed to :func:`run_class`. If ``offline`` is :obj:`True`, try getting the list of task trackers from the ``slaves`` file in Hadoop's configuration directory (no attempt is made to contact the Hadoop daemons). In this case, ports are set to 0. """ if offline: if not hadoop_conf_dir: hadoop_conf_dir = pydoop.hadoop_conf() slaves = os.path.join(hadoop_conf_dir, "slaves") try: with open(slaves) as f: task_trackers = [(l.strip(), 0) for l in f] except IOError: task_trackers = [] else: # run JobClient directly (avoids "hadoop job" deprecation) stdout = run_class( "org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"], properties=properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True ) task_trackers = [] for line in stdout.splitlines(): if not line: continue line = line.split(":") task_trackers.append((line[0].split("_")[1], int(line[-1]))) return task_trackers
[ "def", "get_task_trackers", "(", "properties", "=", "None", ",", "hadoop_conf_dir", "=", "None", ",", "offline", "=", "False", ")", ":", "if", "offline", ":", "if", "not", "hadoop_conf_dir", ":", "hadoop_conf_dir", "=", "pydoop", ".", "hadoop_conf", "(", ")", "slaves", "=", "os", ".", "path", ".", "join", "(", "hadoop_conf_dir", ",", "\"slaves\"", ")", "try", ":", "with", "open", "(", "slaves", ")", "as", "f", ":", "task_trackers", "=", "[", "(", "l", ".", "strip", "(", ")", ",", "0", ")", "for", "l", "in", "f", "]", "except", "IOError", ":", "task_trackers", "=", "[", "]", "else", ":", "# run JobClient directly (avoids \"hadoop job\" deprecation)", "stdout", "=", "run_class", "(", "\"org.apache.hadoop.mapred.JobClient\"", ",", "[", "\"-list-active-trackers\"", "]", ",", "properties", "=", "properties", ",", "hadoop_conf_dir", "=", "hadoop_conf_dir", ",", "keep_streams", "=", "True", ")", "task_trackers", "=", "[", "]", "for", "line", "in", "stdout", ".", "splitlines", "(", ")", ":", "if", "not", "line", ":", "continue", "line", "=", "line", ".", "split", "(", "\":\"", ")", "task_trackers", ".", "append", "(", "(", "line", "[", "0", "]", ".", "split", "(", "\"_\"", ")", "[", "1", "]", ",", "int", "(", "line", "[", "-", "1", "]", ")", ")", ")", "return", "task_trackers" ]
Get the list of task trackers in the Hadoop cluster. Each element in the returned list is in the ``(host, port)`` format. All arguments are passed to :func:`run_class`. If ``offline`` is :obj:`True`, try getting the list of task trackers from the ``slaves`` file in Hadoop's configuration directory (no attempt is made to contact the Hadoop daemons). In this case, ports are set to 0.
[ "Get", "the", "list", "of", "task", "trackers", "in", "the", "Hadoop", "cluster", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L194-L227
7,979
crs4/pydoop
pydoop/hadut.py
get_num_nodes
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False): """ Get the number of task trackers in the Hadoop cluster. All arguments are passed to :func:`get_task_trackers`. """ return len(get_task_trackers(properties, hadoop_conf_dir, offline))
python
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False): """ Get the number of task trackers in the Hadoop cluster. All arguments are passed to :func:`get_task_trackers`. """ return len(get_task_trackers(properties, hadoop_conf_dir, offline))
[ "def", "get_num_nodes", "(", "properties", "=", "None", ",", "hadoop_conf_dir", "=", "None", ",", "offline", "=", "False", ")", ":", "return", "len", "(", "get_task_trackers", "(", "properties", ",", "hadoop_conf_dir", ",", "offline", ")", ")" ]
Get the number of task trackers in the Hadoop cluster. All arguments are passed to :func:`get_task_trackers`.
[ "Get", "the", "number", "of", "task", "trackers", "in", "the", "Hadoop", "cluster", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L230-L236
7,980
crs4/pydoop
pydoop/hadut.py
dfs
def dfs(args=None, properties=None, hadoop_conf_dir=None): """ Run the Hadoop file system shell. All arguments are passed to :func:`run_class`. """ # run FsShell directly (avoids "hadoop dfs" deprecation) return run_class( "org.apache.hadoop.fs.FsShell", args, properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True )
python
def dfs(args=None, properties=None, hadoop_conf_dir=None): """ Run the Hadoop file system shell. All arguments are passed to :func:`run_class`. """ # run FsShell directly (avoids "hadoop dfs" deprecation) return run_class( "org.apache.hadoop.fs.FsShell", args, properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True )
[ "def", "dfs", "(", "args", "=", "None", ",", "properties", "=", "None", ",", "hadoop_conf_dir", "=", "None", ")", ":", "# run FsShell directly (avoids \"hadoop dfs\" deprecation)", "return", "run_class", "(", "\"org.apache.hadoop.fs.FsShell\"", ",", "args", ",", "properties", ",", "hadoop_conf_dir", "=", "hadoop_conf_dir", ",", "keep_streams", "=", "True", ")" ]
Run the Hadoop file system shell. All arguments are passed to :func:`run_class`.
[ "Run", "the", "Hadoop", "file", "system", "shell", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L239-L249
7,981
crs4/pydoop
pydoop/hadut.py
run_pipes
def run_pipes(executable, input_path, output_path, more_args=None, properties=None, force_pydoop_submitter=False, hadoop_conf_dir=None, logger=None, keep_streams=False): """ Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True. """ if logger is None: logger = utils.NullLogger() if not hdfs.path.exists(executable): raise IOError("executable %s not found" % executable) if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS): raise IOError("input path %s not found" % input_path) if properties is None: properties = {} properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true') properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true') if force_pydoop_submitter: use_pydoop_submit = True else: use_pydoop_submit = False ver = pydoop.hadoop_version_info() if ver.has_security(): if ver.is_cdh_mrv2() and hdfs.default_is_local(): raise RuntimeError("mrv2 on local fs not supported yet") use_pydoop_submit = hdfs.default_is_local() args = [ "-program", executable, "-input", input_path, "-output", output_path, ] if more_args is not None: args.extend(more_args) if use_pydoop_submit: submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop.jar_path() args.extend(("-libjars", pydoop_jar)) return run_class(submitter, args, properties, classpath=pydoop_jar, logger=logger, keep_streams=keep_streams) else: return run_mapred_cmd("pipes", args=args, properties=properties, hadoop_conf_dir=hadoop_conf_dir, logger=logger, keep_streams=keep_streams)
python
def run_pipes(executable, input_path, output_path, more_args=None, properties=None, force_pydoop_submitter=False, hadoop_conf_dir=None, logger=None, keep_streams=False): """ Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True. """ if logger is None: logger = utils.NullLogger() if not hdfs.path.exists(executable): raise IOError("executable %s not found" % executable) if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS): raise IOError("input path %s not found" % input_path) if properties is None: properties = {} properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true') properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true') if force_pydoop_submitter: use_pydoop_submit = True else: use_pydoop_submit = False ver = pydoop.hadoop_version_info() if ver.has_security(): if ver.is_cdh_mrv2() and hdfs.default_is_local(): raise RuntimeError("mrv2 on local fs not supported yet") use_pydoop_submit = hdfs.default_is_local() args = [ "-program", executable, "-input", input_path, "-output", output_path, ] if more_args is not None: args.extend(more_args) if use_pydoop_submit: submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop.jar_path() args.extend(("-libjars", pydoop_jar)) return run_class(submitter, args, properties, classpath=pydoop_jar, logger=logger, keep_streams=keep_streams) else: return run_mapred_cmd("pipes", args=args, properties=properties, hadoop_conf_dir=hadoop_conf_dir, logger=logger, keep_streams=keep_streams)
[ "def", "run_pipes", "(", "executable", ",", "input_path", ",", "output_path", ",", "more_args", "=", "None", ",", "properties", "=", "None", ",", "force_pydoop_submitter", "=", "False", ",", "hadoop_conf_dir", "=", "None", ",", "logger", "=", "None", ",", "keep_streams", "=", "False", ")", ":", "if", "logger", "is", "None", ":", "logger", "=", "utils", ".", "NullLogger", "(", ")", "if", "not", "hdfs", ".", "path", ".", "exists", "(", "executable", ")", ":", "raise", "IOError", "(", "\"executable %s not found\"", "%", "executable", ")", "if", "not", "hdfs", ".", "path", ".", "exists", "(", "input_path", ")", "and", "not", "(", "set", "(", "input_path", ")", "&", "GLOB_CHARS", ")", ":", "raise", "IOError", "(", "\"input path %s not found\"", "%", "input_path", ")", "if", "properties", "is", "None", ":", "properties", "=", "{", "}", "properties", ".", "setdefault", "(", "'mapreduce.pipes.isjavarecordreader'", ",", "'true'", ")", "properties", ".", "setdefault", "(", "'mapreduce.pipes.isjavarecordwriter'", ",", "'true'", ")", "if", "force_pydoop_submitter", ":", "use_pydoop_submit", "=", "True", "else", ":", "use_pydoop_submit", "=", "False", "ver", "=", "pydoop", ".", "hadoop_version_info", "(", ")", "if", "ver", ".", "has_security", "(", ")", ":", "if", "ver", ".", "is_cdh_mrv2", "(", ")", "and", "hdfs", ".", "default_is_local", "(", ")", ":", "raise", "RuntimeError", "(", "\"mrv2 on local fs not supported yet\"", ")", "use_pydoop_submit", "=", "hdfs", ".", "default_is_local", "(", ")", "args", "=", "[", "\"-program\"", ",", "executable", ",", "\"-input\"", ",", "input_path", ",", "\"-output\"", ",", "output_path", ",", "]", "if", "more_args", "is", "not", "None", ":", "args", ".", "extend", "(", "more_args", ")", "if", "use_pydoop_submit", ":", "submitter", "=", "\"it.crs4.pydoop.pipes.Submitter\"", "pydoop_jar", "=", "pydoop", ".", "jar_path", "(", ")", "args", ".", "extend", "(", "(", "\"-libjars\"", ",", "pydoop_jar", ")", ")", "return", "run_class", "(", "submitter", ",", "args", ",", "properties", ",", "classpath", "=", "pydoop_jar", ",", "logger", "=", "logger", ",", "keep_streams", "=", "keep_streams", ")", "else", ":", "return", "run_mapred_cmd", "(", "\"pipes\"", ",", "args", "=", "args", ",", "properties", "=", "properties", ",", "hadoop_conf_dir", "=", "hadoop_conf_dir", ",", "logger", "=", "logger", ",", "keep_streams", "=", "keep_streams", ")" ]
Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True.
[ "Run", "a", "pipes", "command", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L338-L395
7,982
crs4/pydoop
pydoop/hadut.py
collect_output
def collect_output(mr_out_dir, out_file=None): """ Return all mapreduce output in ``mr_out_dir``. Append the output to ``out_file`` if provided. Otherwise, return the result as a single string (it is the caller's responsibility to ensure that the amount of data retrieved fits into memory). """ if out_file is None: output = [] for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn, "rt") as f: output.append(f.read()) return "".join(output) else: block_size = 16777216 with open(out_file, 'a') as o: for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn) as f: data = f.read(block_size) while len(data) > 0: o.write(data) data = f.read(block_size)
python
def collect_output(mr_out_dir, out_file=None): """ Return all mapreduce output in ``mr_out_dir``. Append the output to ``out_file`` if provided. Otherwise, return the result as a single string (it is the caller's responsibility to ensure that the amount of data retrieved fits into memory). """ if out_file is None: output = [] for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn, "rt") as f: output.append(f.read()) return "".join(output) else: block_size = 16777216 with open(out_file, 'a') as o: for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn) as f: data = f.read(block_size) while len(data) > 0: o.write(data) data = f.read(block_size)
[ "def", "collect_output", "(", "mr_out_dir", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "output", "=", "[", "]", "for", "fn", "in", "iter_mr_out_files", "(", "mr_out_dir", ")", ":", "with", "hdfs", ".", "open", "(", "fn", ",", "\"rt\"", ")", "as", "f", ":", "output", ".", "append", "(", "f", ".", "read", "(", ")", ")", "return", "\"\"", ".", "join", "(", "output", ")", "else", ":", "block_size", "=", "16777216", "with", "open", "(", "out_file", ",", "'a'", ")", "as", "o", ":", "for", "fn", "in", "iter_mr_out_files", "(", "mr_out_dir", ")", ":", "with", "hdfs", ".", "open", "(", "fn", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", "block_size", ")", "while", "len", "(", "data", ")", ">", "0", ":", "o", ".", "write", "(", "data", ")", "data", "=", "f", ".", "read", "(", "block_size", ")" ]
Return all mapreduce output in ``mr_out_dir``. Append the output to ``out_file`` if provided. Otherwise, return the result as a single string (it is the caller's responsibility to ensure that the amount of data retrieved fits into memory).
[ "Return", "all", "mapreduce", "output", "in", "mr_out_dir", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L425-L447
7,983
crs4/pydoop
pydoop/hadut.py
PipesRunner.set_output
def set_output(self, output): """ Set the output path for the job. Optional if the runner has been instantiated with a prefix. """ self.output = output self.logger.info("assigning output to %s", self.output)
python
def set_output(self, output): """ Set the output path for the job. Optional if the runner has been instantiated with a prefix. """ self.output = output self.logger.info("assigning output to %s", self.output)
[ "def", "set_output", "(", "self", ",", "output", ")", ":", "self", ".", "output", "=", "output", "self", ".", "logger", ".", "info", "(", "\"assigning output to %s\"", ",", "self", ".", "output", ")" ]
Set the output path for the job. Optional if the runner has been instantiated with a prefix.
[ "Set", "the", "output", "path", "for", "the", "job", ".", "Optional", "if", "the", "runner", "has", "been", "instantiated", "with", "a", "prefix", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L504-L510
7,984
crs4/pydoop
pydoop/hadut.py
PipesRunner.set_exe
def set_exe(self, pipes_code): """ Dump launcher code to the distributed file system. """ if not self.output: raise RuntimeError("no output directory, can't create launcher") parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/"))) self.exe = hdfs.path.join(parent, utils.make_random_str()) hdfs.dump(pipes_code, self.exe)
python
def set_exe(self, pipes_code): """ Dump launcher code to the distributed file system. """ if not self.output: raise RuntimeError("no output directory, can't create launcher") parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/"))) self.exe = hdfs.path.join(parent, utils.make_random_str()) hdfs.dump(pipes_code, self.exe)
[ "def", "set_exe", "(", "self", ",", "pipes_code", ")", ":", "if", "not", "self", ".", "output", ":", "raise", "RuntimeError", "(", "\"no output directory, can't create launcher\"", ")", "parent", "=", "hdfs", ".", "path", ".", "dirname", "(", "hdfs", ".", "path", ".", "abspath", "(", "self", ".", "output", ".", "rstrip", "(", "\"/\"", ")", ")", ")", "self", ".", "exe", "=", "hdfs", ".", "path", ".", "join", "(", "parent", ",", "utils", ".", "make_random_str", "(", ")", ")", "hdfs", ".", "dump", "(", "pipes_code", ",", "self", ".", "exe", ")" ]
Dump launcher code to the distributed file system.
[ "Dump", "launcher", "code", "to", "the", "distributed", "file", "system", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L512-L520
7,985
crs4/pydoop
pydoop/hdfs/__init__.py
dump
def dump(data, hdfs_path, **kwargs): """\ Write ``data`` to ``hdfs_path``. Keyword arguments are passed to :func:`open`, except for ``mode``, which is forced to ``"w"`` (or ``"wt"`` for text data). """ kwargs["mode"] = "w" if isinstance(data, bintype) else "wt" with open(hdfs_path, **kwargs) as fo: i = 0 bufsize = common.BUFSIZE while i < len(data): fo.write(data[i: i + bufsize]) i += bufsize fo.fs.close()
python
def dump(data, hdfs_path, **kwargs): """\ Write ``data`` to ``hdfs_path``. Keyword arguments are passed to :func:`open`, except for ``mode``, which is forced to ``"w"`` (or ``"wt"`` for text data). """ kwargs["mode"] = "w" if isinstance(data, bintype) else "wt" with open(hdfs_path, **kwargs) as fo: i = 0 bufsize = common.BUFSIZE while i < len(data): fo.write(data[i: i + bufsize]) i += bufsize fo.fs.close()
[ "def", "dump", "(", "data", ",", "hdfs_path", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"mode\"", "]", "=", "\"w\"", "if", "isinstance", "(", "data", ",", "bintype", ")", "else", "\"wt\"", "with", "open", "(", "hdfs_path", ",", "*", "*", "kwargs", ")", "as", "fo", ":", "i", "=", "0", "bufsize", "=", "common", ".", "BUFSIZE", "while", "i", "<", "len", "(", "data", ")", ":", "fo", ".", "write", "(", "data", "[", "i", ":", "i", "+", "bufsize", "]", ")", "i", "+=", "bufsize", "fo", ".", "fs", ".", "close", "(", ")" ]
\ Write ``data`` to ``hdfs_path``. Keyword arguments are passed to :func:`open`, except for ``mode``, which is forced to ``"w"`` (or ``"wt"`` for text data).
[ "\\", "Write", "data", "to", "hdfs_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L129-L143
7,986
crs4/pydoop
pydoop/hdfs/__init__.py
load
def load(hdfs_path, **kwargs): """\ Read the content of ``hdfs_path`` and return it. Keyword arguments are passed to :func:`open`. The `"mode"` kwarg must be readonly. """ m, _ = common.parse_mode(kwargs.get("mode", "r")) if m != "r": raise ValueError("opening mode must be readonly") with open(hdfs_path, **kwargs) as fi: data = fi.read() fi.fs.close() return data
python
def load(hdfs_path, **kwargs): """\ Read the content of ``hdfs_path`` and return it. Keyword arguments are passed to :func:`open`. The `"mode"` kwarg must be readonly. """ m, _ = common.parse_mode(kwargs.get("mode", "r")) if m != "r": raise ValueError("opening mode must be readonly") with open(hdfs_path, **kwargs) as fi: data = fi.read() fi.fs.close() return data
[ "def", "load", "(", "hdfs_path", ",", "*", "*", "kwargs", ")", ":", "m", ",", "_", "=", "common", ".", "parse_mode", "(", "kwargs", ".", "get", "(", "\"mode\"", ",", "\"r\"", ")", ")", "if", "m", "!=", "\"r\"", ":", "raise", "ValueError", "(", "\"opening mode must be readonly\"", ")", "with", "open", "(", "hdfs_path", ",", "*", "*", "kwargs", ")", "as", "fi", ":", "data", "=", "fi", ".", "read", "(", ")", "fi", ".", "fs", ".", "close", "(", ")", "return", "data" ]
\ Read the content of ``hdfs_path`` and return it. Keyword arguments are passed to :func:`open`. The `"mode"` kwarg must be readonly.
[ "\\", "Read", "the", "content", "of", "hdfs_path", "and", "return", "it", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L146-L159
7,987
crs4/pydoop
pydoop/hdfs/__init__.py
cp
def cp(src_hdfs_path, dest_hdfs_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``. If ``src_hdfs_path`` is a directory, its contents will be copied recursively. Source file(s) are opened for reading and copies are opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ src, dest = {}, {} try: for d, p in ((src, src_hdfs_path), (dest, dest_hdfs_path)): d["host"], d["port"], d["path"] = path.split(p) d["fs"] = hdfs(d["host"], d["port"]) # --- does src exist? --- try: src["info"] = src["fs"].get_path_info(src["path"]) except IOError: raise IOError("no such file or directory: %r" % (src["path"])) # --- src exists. Does dest exist? --- try: dest["info"] = dest["fs"].get_path_info(dest["path"]) except IOError: if src["info"]["kind"] == "file": _cp_file(src["fs"], src["path"], dest["fs"], dest["path"], **kwargs) return else: dest["fs"].create_directory(dest["path"]) dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"] for item in src["fs"].list_directory(src["path"]): cp(item["name"], dest_hdfs_path, **kwargs) return # --- dest exists. Is it a file? --- if dest["info"]["kind"] == "file": raise IOError("%r already exists" % (dest["path"])) # --- dest is a directory --- dest["path"] = path.join(dest["path"], path.basename(src["path"])) if dest["fs"].exists(dest["path"]): raise IOError("%r already exists" % (dest["path"])) if src["info"]["kind"] == "file": _cp_file(src["fs"], src["path"], dest["fs"], dest["path"], **kwargs) else: dest["fs"].create_directory(dest["path"]) dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"] for item in src["fs"].list_directory(src["path"]): cp(item["name"], dest_hdfs_path, **kwargs) finally: for d in src, dest: try: d["fs"].close() except KeyError: pass
python
def cp(src_hdfs_path, dest_hdfs_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``. If ``src_hdfs_path`` is a directory, its contents will be copied recursively. Source file(s) are opened for reading and copies are opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ src, dest = {}, {} try: for d, p in ((src, src_hdfs_path), (dest, dest_hdfs_path)): d["host"], d["port"], d["path"] = path.split(p) d["fs"] = hdfs(d["host"], d["port"]) # --- does src exist? --- try: src["info"] = src["fs"].get_path_info(src["path"]) except IOError: raise IOError("no such file or directory: %r" % (src["path"])) # --- src exists. Does dest exist? --- try: dest["info"] = dest["fs"].get_path_info(dest["path"]) except IOError: if src["info"]["kind"] == "file": _cp_file(src["fs"], src["path"], dest["fs"], dest["path"], **kwargs) return else: dest["fs"].create_directory(dest["path"]) dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"] for item in src["fs"].list_directory(src["path"]): cp(item["name"], dest_hdfs_path, **kwargs) return # --- dest exists. Is it a file? --- if dest["info"]["kind"] == "file": raise IOError("%r already exists" % (dest["path"])) # --- dest is a directory --- dest["path"] = path.join(dest["path"], path.basename(src["path"])) if dest["fs"].exists(dest["path"]): raise IOError("%r already exists" % (dest["path"])) if src["info"]["kind"] == "file": _cp_file(src["fs"], src["path"], dest["fs"], dest["path"], **kwargs) else: dest["fs"].create_directory(dest["path"]) dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"] for item in src["fs"].list_directory(src["path"]): cp(item["name"], dest_hdfs_path, **kwargs) finally: for d in src, dest: try: d["fs"].close() except KeyError: pass
[ "def", "cp", "(", "src_hdfs_path", ",", "dest_hdfs_path", ",", "*", "*", "kwargs", ")", ":", "src", ",", "dest", "=", "{", "}", ",", "{", "}", "try", ":", "for", "d", ",", "p", "in", "(", "(", "src", ",", "src_hdfs_path", ")", ",", "(", "dest", ",", "dest_hdfs_path", ")", ")", ":", "d", "[", "\"host\"", "]", ",", "d", "[", "\"port\"", "]", ",", "d", "[", "\"path\"", "]", "=", "path", ".", "split", "(", "p", ")", "d", "[", "\"fs\"", "]", "=", "hdfs", "(", "d", "[", "\"host\"", "]", ",", "d", "[", "\"port\"", "]", ")", "# --- does src exist? ---", "try", ":", "src", "[", "\"info\"", "]", "=", "src", "[", "\"fs\"", "]", ".", "get_path_info", "(", "src", "[", "\"path\"", "]", ")", "except", "IOError", ":", "raise", "IOError", "(", "\"no such file or directory: %r\"", "%", "(", "src", "[", "\"path\"", "]", ")", ")", "# --- src exists. Does dest exist? ---", "try", ":", "dest", "[", "\"info\"", "]", "=", "dest", "[", "\"fs\"", "]", ".", "get_path_info", "(", "dest", "[", "\"path\"", "]", ")", "except", "IOError", ":", "if", "src", "[", "\"info\"", "]", "[", "\"kind\"", "]", "==", "\"file\"", ":", "_cp_file", "(", "src", "[", "\"fs\"", "]", ",", "src", "[", "\"path\"", "]", ",", "dest", "[", "\"fs\"", "]", ",", "dest", "[", "\"path\"", "]", ",", "*", "*", "kwargs", ")", "return", "else", ":", "dest", "[", "\"fs\"", "]", ".", "create_directory", "(", "dest", "[", "\"path\"", "]", ")", "dest_hdfs_path", "=", "dest", "[", "\"fs\"", "]", ".", "get_path_info", "(", "dest", "[", "\"path\"", "]", ")", "[", "\"name\"", "]", "for", "item", "in", "src", "[", "\"fs\"", "]", ".", "list_directory", "(", "src", "[", "\"path\"", "]", ")", ":", "cp", "(", "item", "[", "\"name\"", "]", ",", "dest_hdfs_path", ",", "*", "*", "kwargs", ")", "return", "# --- dest exists. Is it a file? ---", "if", "dest", "[", "\"info\"", "]", "[", "\"kind\"", "]", "==", "\"file\"", ":", "raise", "IOError", "(", "\"%r already exists\"", "%", "(", "dest", "[", "\"path\"", "]", ")", ")", "# --- dest is a directory ---", "dest", "[", "\"path\"", "]", "=", "path", ".", "join", "(", "dest", "[", "\"path\"", "]", ",", "path", ".", "basename", "(", "src", "[", "\"path\"", "]", ")", ")", "if", "dest", "[", "\"fs\"", "]", ".", "exists", "(", "dest", "[", "\"path\"", "]", ")", ":", "raise", "IOError", "(", "\"%r already exists\"", "%", "(", "dest", "[", "\"path\"", "]", ")", ")", "if", "src", "[", "\"info\"", "]", "[", "\"kind\"", "]", "==", "\"file\"", ":", "_cp_file", "(", "src", "[", "\"fs\"", "]", ",", "src", "[", "\"path\"", "]", ",", "dest", "[", "\"fs\"", "]", ",", "dest", "[", "\"path\"", "]", ",", "*", "*", "kwargs", ")", "else", ":", "dest", "[", "\"fs\"", "]", ".", "create_directory", "(", "dest", "[", "\"path\"", "]", ")", "dest_hdfs_path", "=", "dest", "[", "\"fs\"", "]", ".", "get_path_info", "(", "dest", "[", "\"path\"", "]", ")", "[", "\"name\"", "]", "for", "item", "in", "src", "[", "\"fs\"", "]", ".", "list_directory", "(", "src", "[", "\"path\"", "]", ")", ":", "cp", "(", "item", "[", "\"name\"", "]", ",", "dest_hdfs_path", ",", "*", "*", "kwargs", ")", "finally", ":", "for", "d", "in", "src", ",", "dest", ":", "try", ":", "d", "[", "\"fs\"", "]", ".", "close", "(", ")", "except", "KeyError", ":", "pass" ]
\ Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``. If ``src_hdfs_path`` is a directory, its contents will be copied recursively. Source file(s) are opened for reading and copies are opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`.
[ "\\", "Copy", "the", "contents", "of", "src_hdfs_path", "to", "dest_hdfs_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L177-L230
7,988
crs4/pydoop
pydoop/hdfs/__init__.py
put
def put(src_path, dest_hdfs_path, **kwargs): """\ Copy the contents of ``src_path`` to ``dest_hdfs_path``. ``src_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs)
python
def put(src_path, dest_hdfs_path, **kwargs): """\ Copy the contents of ``src_path`` to ``dest_hdfs_path``. ``src_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs)
[ "def", "put", "(", "src_path", ",", "dest_hdfs_path", ",", "*", "*", "kwargs", ")", ":", "cp", "(", "path", ".", "abspath", "(", "src_path", ",", "local", "=", "True", ")", ",", "dest_hdfs_path", ",", "*", "*", "kwargs", ")" ]
\ Copy the contents of ``src_path`` to ``dest_hdfs_path``. ``src_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`.
[ "\\", "Copy", "the", "contents", "of", "src_path", "to", "dest_hdfs_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L233-L242
7,989
crs4/pydoop
pydoop/hdfs/__init__.py
get
def get(src_hdfs_path, dest_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_path``. ``dest_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)
python
def get(src_hdfs_path, dest_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_path``. ``dest_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)
[ "def", "get", "(", "src_hdfs_path", ",", "dest_path", ",", "*", "*", "kwargs", ")", ":", "cp", "(", "src_hdfs_path", ",", "path", ".", "abspath", "(", "dest_path", ",", "local", "=", "True", ")", ",", "*", "*", "kwargs", ")" ]
\ Copy the contents of ``src_hdfs_path`` to ``dest_path``. ``dest_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`.
[ "\\", "Copy", "the", "contents", "of", "src_hdfs_path", "to", "dest_path", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L245-L254
7,990
crs4/pydoop
pydoop/hdfs/__init__.py
mkdir
def mkdir(hdfs_path, user=None): """ Create a directory and its parents as needed. """ host, port, path_ = path.split(hdfs_path, user) fs = hdfs(host, port, user) retval = fs.create_directory(path_) fs.close() return retval
python
def mkdir(hdfs_path, user=None): """ Create a directory and its parents as needed. """ host, port, path_ = path.split(hdfs_path, user) fs = hdfs(host, port, user) retval = fs.create_directory(path_) fs.close() return retval
[ "def", "mkdir", "(", "hdfs_path", ",", "user", "=", "None", ")", ":", "host", ",", "port", ",", "path_", "=", "path", ".", "split", "(", "hdfs_path", ",", "user", ")", "fs", "=", "hdfs", "(", "host", ",", "port", ",", "user", ")", "retval", "=", "fs", ".", "create_directory", "(", "path_", ")", "fs", ".", "close", "(", ")", "return", "retval" ]
Create a directory and its parents as needed.
[ "Create", "a", "directory", "and", "its", "parents", "as", "needed", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L257-L265
7,991
crs4/pydoop
pydoop/hdfs/__init__.py
lsl
def lsl(hdfs_path, user=None, recursive=False): """ Return a list of dictionaries of file properties. If ``hdfs_path`` is a file, there is only one item corresponding to the file itself; if it is a directory and ``recursive`` is :obj:`False`, each list item corresponds to a file or directory contained by it; if it is a directory and ``recursive`` is :obj:`True`, the list contains one item for every file or directory in the tree rooted at ``hdfs_path``. """ host, port, path_ = path.split(hdfs_path, user) fs = hdfs(host, port, user) if not recursive: dir_list = fs.list_directory(path_) else: treewalk = fs.walk(path_) top = next(treewalk) if top['kind'] == 'directory': dir_list = list(treewalk) else: dir_list = [top] fs.close() return dir_list
python
def lsl(hdfs_path, user=None, recursive=False): """ Return a list of dictionaries of file properties. If ``hdfs_path`` is a file, there is only one item corresponding to the file itself; if it is a directory and ``recursive`` is :obj:`False`, each list item corresponds to a file or directory contained by it; if it is a directory and ``recursive`` is :obj:`True`, the list contains one item for every file or directory in the tree rooted at ``hdfs_path``. """ host, port, path_ = path.split(hdfs_path, user) fs = hdfs(host, port, user) if not recursive: dir_list = fs.list_directory(path_) else: treewalk = fs.walk(path_) top = next(treewalk) if top['kind'] == 'directory': dir_list = list(treewalk) else: dir_list = [top] fs.close() return dir_list
[ "def", "lsl", "(", "hdfs_path", ",", "user", "=", "None", ",", "recursive", "=", "False", ")", ":", "host", ",", "port", ",", "path_", "=", "path", ".", "split", "(", "hdfs_path", ",", "user", ")", "fs", "=", "hdfs", "(", "host", ",", "port", ",", "user", ")", "if", "not", "recursive", ":", "dir_list", "=", "fs", ".", "list_directory", "(", "path_", ")", "else", ":", "treewalk", "=", "fs", ".", "walk", "(", "path_", ")", "top", "=", "next", "(", "treewalk", ")", "if", "top", "[", "'kind'", "]", "==", "'directory'", ":", "dir_list", "=", "list", "(", "treewalk", ")", "else", ":", "dir_list", "=", "[", "top", "]", "fs", ".", "close", "(", ")", "return", "dir_list" ]
Return a list of dictionaries of file properties. If ``hdfs_path`` is a file, there is only one item corresponding to the file itself; if it is a directory and ``recursive`` is :obj:`False`, each list item corresponds to a file or directory contained by it; if it is a directory and ``recursive`` is :obj:`True`, the list contains one item for every file or directory in the tree rooted at ``hdfs_path``.
[ "Return", "a", "list", "of", "dictionaries", "of", "file", "properties", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L287-L310
7,992
crs4/pydoop
pydoop/hdfs/__init__.py
ls
def ls(hdfs_path, user=None, recursive=False): """ Return a list of hdfs paths. Works in the same way as :func:`lsl`, except for the fact that list items are hdfs paths instead of dictionaries of properties. """ dir_list = lsl(hdfs_path, user, recursive) return [d["name"] for d in dir_list]
python
def ls(hdfs_path, user=None, recursive=False): """ Return a list of hdfs paths. Works in the same way as :func:`lsl`, except for the fact that list items are hdfs paths instead of dictionaries of properties. """ dir_list = lsl(hdfs_path, user, recursive) return [d["name"] for d in dir_list]
[ "def", "ls", "(", "hdfs_path", ",", "user", "=", "None", ",", "recursive", "=", "False", ")", ":", "dir_list", "=", "lsl", "(", "hdfs_path", ",", "user", ",", "recursive", ")", "return", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "dir_list", "]" ]
Return a list of hdfs paths. Works in the same way as :func:`lsl`, except for the fact that list items are hdfs paths instead of dictionaries of properties.
[ "Return", "a", "list", "of", "hdfs", "paths", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L313-L321
7,993
crs4/pydoop
pydoop/hdfs/__init__.py
move
def move(src, dest, user=None): """ Move or rename src to dest. """ src_host, src_port, src_path = path.split(src, user) dest_host, dest_port, dest_path = path.split(dest, user) src_fs = hdfs(src_host, src_port, user) dest_fs = hdfs(dest_host, dest_port, user) try: retval = src_fs.move(src_path, dest_fs, dest_path) return retval finally: src_fs.close() dest_fs.close()
python
def move(src, dest, user=None): """ Move or rename src to dest. """ src_host, src_port, src_path = path.split(src, user) dest_host, dest_port, dest_path = path.split(dest, user) src_fs = hdfs(src_host, src_port, user) dest_fs = hdfs(dest_host, dest_port, user) try: retval = src_fs.move(src_path, dest_fs, dest_path) return retval finally: src_fs.close() dest_fs.close()
[ "def", "move", "(", "src", ",", "dest", ",", "user", "=", "None", ")", ":", "src_host", ",", "src_port", ",", "src_path", "=", "path", ".", "split", "(", "src", ",", "user", ")", "dest_host", ",", "dest_port", ",", "dest_path", "=", "path", ".", "split", "(", "dest", ",", "user", ")", "src_fs", "=", "hdfs", "(", "src_host", ",", "src_port", ",", "user", ")", "dest_fs", "=", "hdfs", "(", "dest_host", ",", "dest_port", ",", "user", ")", "try", ":", "retval", "=", "src_fs", ".", "move", "(", "src_path", ",", "dest_fs", ",", "dest_path", ")", "return", "retval", "finally", ":", "src_fs", ".", "close", "(", ")", "dest_fs", ".", "close", "(", ")" ]
Move or rename src to dest.
[ "Move", "or", "rename", "src", "to", "dest", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L340-L353
7,994
crs4/pydoop
pydoop/hdfs/__init__.py
renames
def renames(from_path, to_path, user=None): """ Rename ``from_path`` to ``to_path``, creating parents as needed. """ to_dir = path.dirname(to_path) if to_dir: mkdir(to_dir, user=user) rename(from_path, to_path, user=user)
python
def renames(from_path, to_path, user=None): """ Rename ``from_path`` to ``to_path``, creating parents as needed. """ to_dir = path.dirname(to_path) if to_dir: mkdir(to_dir, user=user) rename(from_path, to_path, user=user)
[ "def", "renames", "(", "from_path", ",", "to_path", ",", "user", "=", "None", ")", ":", "to_dir", "=", "path", ".", "dirname", "(", "to_path", ")", "if", "to_dir", ":", "mkdir", "(", "to_dir", ",", "user", "=", "user", ")", "rename", "(", "from_path", ",", "to_path", ",", "user", "=", "user", ")" ]
Rename ``from_path`` to ``to_path``, creating parents as needed.
[ "Rename", "from_path", "to", "to_path", "creating", "parents", "as", "needed", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L381-L388
7,995
crs4/pydoop
pydoop/hdfs/file.py
FileIO.readline
def readline(self): """ Read and return a line of text. :rtype: str :return: the next line of text in the file, including the newline character """ _complain_ifclosed(self.closed) line = self.f.readline() if self.__encoding: return line.decode(self.__encoding, self.__errors) else: return line
python
def readline(self): """ Read and return a line of text. :rtype: str :return: the next line of text in the file, including the newline character """ _complain_ifclosed(self.closed) line = self.f.readline() if self.__encoding: return line.decode(self.__encoding, self.__errors) else: return line
[ "def", "readline", "(", "self", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "line", "=", "self", ".", "f", ".", "readline", "(", ")", "if", "self", ".", "__encoding", ":", "return", "line", ".", "decode", "(", "self", ".", "__encoding", ",", "self", ".", "__errors", ")", "else", ":", "return", "line" ]
Read and return a line of text. :rtype: str :return: the next line of text in the file, including the newline character
[ "Read", "and", "return", "a", "line", "of", "text", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L107-L120
7,996
crs4/pydoop
pydoop/hdfs/file.py
FileIO.pread
def pread(self, position, length): r""" Read ``length`` bytes of data from the file, starting from ``position``\ . :type position: int :param position: position from which to read :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file """ _complain_ifclosed(self.closed) if position > self.size: raise IOError("position cannot be past EOF") if length < 0: length = self.size - position data = self.f.raw.pread(position, length) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
python
def pread(self, position, length): r""" Read ``length`` bytes of data from the file, starting from ``position``\ . :type position: int :param position: position from which to read :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file """ _complain_ifclosed(self.closed) if position > self.size: raise IOError("position cannot be past EOF") if length < 0: length = self.size - position data = self.f.raw.pread(position, length) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
[ "def", "pread", "(", "self", ",", "position", ",", "length", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "position", ">", "self", ".", "size", ":", "raise", "IOError", "(", "\"position cannot be past EOF\"", ")", "if", "length", "<", "0", ":", "length", "=", "self", ".", "size", "-", "position", "data", "=", "self", ".", "f", ".", "raw", ".", "pread", "(", "position", ",", "length", ")", "if", "self", ".", "__encoding", ":", "return", "data", ".", "decode", "(", "self", ".", "__encoding", ",", "self", ".", "__errors", ")", "else", ":", "return", "data" ]
r""" Read ``length`` bytes of data from the file, starting from ``position``\ . :type position: int :param position: position from which to read :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file
[ "r", "Read", "length", "bytes", "of", "data", "from", "the", "file", "starting", "from", "position", "\\", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L165-L186
7,997
crs4/pydoop
pydoop/hdfs/file.py
FileIO.read
def read(self, length=-1): """ Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file """ _complain_ifclosed(self.closed) # NOTE: libhdfs read stops at block boundaries: it is *essential* # to ensure that we actually read the required number of bytes. if length < 0: length = self.size chunks = [] while 1: if length <= 0: break c = self.f.read(min(self.buff_size, length)) if c == b"": break chunks.append(c) length -= len(c) data = b"".join(chunks) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
python
def read(self, length=-1): """ Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file """ _complain_ifclosed(self.closed) # NOTE: libhdfs read stops at block boundaries: it is *essential* # to ensure that we actually read the required number of bytes. if length < 0: length = self.size chunks = [] while 1: if length <= 0: break c = self.f.read(min(self.buff_size, length)) if c == b"": break chunks.append(c) length -= len(c) data = b"".join(chunks) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
[ "def", "read", "(", "self", ",", "length", "=", "-", "1", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "# NOTE: libhdfs read stops at block boundaries: it is *essential*", "# to ensure that we actually read the required number of bytes.", "if", "length", "<", "0", ":", "length", "=", "self", ".", "size", "chunks", "=", "[", "]", "while", "1", ":", "if", "length", "<=", "0", ":", "break", "c", "=", "self", ".", "f", ".", "read", "(", "min", "(", "self", ".", "buff_size", ",", "length", ")", ")", "if", "c", "==", "b\"\"", ":", "break", "chunks", ".", "append", "(", "c", ")", "length", "-=", "len", "(", "c", ")", "data", "=", "b\"\"", ".", "join", "(", "chunks", ")", "if", "self", ".", "__encoding", ":", "return", "data", ".", "decode", "(", "self", ".", "__encoding", ",", "self", ".", "__errors", ")", "else", ":", "return", "data" ]
Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file
[ "Read", "length", "bytes", "from", "the", "file", ".", "If", "length", "is", "negative", "or", "omitted", "read", "all", "data", "until", "EOF", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L188-L216
7,998
crs4/pydoop
pydoop/hdfs/file.py
FileIO.seek
def seek(self, position, whence=os.SEEK_SET): """ Seek to ``position`` in file. :type position: int :param position: offset in bytes to seek to :type whence: int :param whence: defaults to ``os.SEEK_SET`` (absolute); other values are ``os.SEEK_CUR`` (relative to the current position) and ``os.SEEK_END`` (relative to the file's end). """ _complain_ifclosed(self.closed) return self.f.seek(position, whence)
python
def seek(self, position, whence=os.SEEK_SET): """ Seek to ``position`` in file. :type position: int :param position: offset in bytes to seek to :type whence: int :param whence: defaults to ``os.SEEK_SET`` (absolute); other values are ``os.SEEK_CUR`` (relative to the current position) and ``os.SEEK_END`` (relative to the file's end). """ _complain_ifclosed(self.closed) return self.f.seek(position, whence)
[ "def", "seek", "(", "self", ",", "position", ",", "whence", "=", "os", ".", "SEEK_SET", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "return", "self", ".", "f", ".", "seek", "(", "position", ",", "whence", ")" ]
Seek to ``position`` in file. :type position: int :param position: offset in bytes to seek to :type whence: int :param whence: defaults to ``os.SEEK_SET`` (absolute); other values are ``os.SEEK_CUR`` (relative to the current position) and ``os.SEEK_END`` (relative to the file's end).
[ "Seek", "to", "position", "in", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L218-L230
7,999
crs4/pydoop
pydoop/hdfs/file.py
FileIO.write
def write(self, data): """ Write ``data`` to the file. :type data: bytes :param data: the data to be written to the file :rtype: int :return: the number of bytes written """ _complain_ifclosed(self.closed) if self.__encoding: self.f.write(data.encode(self.__encoding, self.__errors)) return len(data) else: return self.f.write(data)
python
def write(self, data): """ Write ``data`` to the file. :type data: bytes :param data: the data to be written to the file :rtype: int :return: the number of bytes written """ _complain_ifclosed(self.closed) if self.__encoding: self.f.write(data.encode(self.__encoding, self.__errors)) return len(data) else: return self.f.write(data)
[ "def", "write", "(", "self", ",", "data", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "self", ".", "__encoding", ":", "self", ".", "f", ".", "write", "(", "data", ".", "encode", "(", "self", ".", "__encoding", ",", "self", ".", "__errors", ")", ")", "return", "len", "(", "data", ")", "else", ":", "return", "self", ".", "f", ".", "write", "(", "data", ")" ]
Write ``data`` to the file. :type data: bytes :param data: the data to be written to the file :rtype: int :return: the number of bytes written
[ "Write", "data", "to", "the", "file", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L242-L256