signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def _register_magics(ipython):
ipython.register_magic_function(<EOL>_start_magic,<EOL>magic_kind="<STR_LIT>",<EOL>magic_name="<STR_LIT>",<EOL>)<EOL>
Register IPython line/cell magics. Args: ipython: An `InteractiveShell` instance.
f8120:m3
def _start_magic(line):
return start(line)<EOL>
Implementation of the `%tensorboard` line magic.
f8120:m4
def start(args_string):
context = _get_context()<EOL>try:<EOL><INDENT>import IPython<EOL>import IPython.display<EOL><DEDENT>except ImportError:<EOL><INDENT>IPython = None<EOL><DEDENT>if context == _CONTEXT_NONE:<EOL><INDENT>handle = None<EOL>print("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>handle = IPython.display.display(<EOL>IPython.display.Pretty("<STR_LIT>"),<EOL>display_id=True,<EOL>)<EOL><DEDENT>def print_or_update(message):<EOL><INDENT>if handle is None:<EOL><INDENT>print(message)<EOL><DEDENT>else:<EOL><INDENT>handle.update(IPython.display.Pretty(message))<EOL><DEDENT><DEDENT>parsed_args = shlex.split(args_string, comments=True, posix=True)<EOL>start_result = manager.start(parsed_args)<EOL>if isinstance(start_result, manager.StartLaunched):<EOL><INDENT>_display(<EOL>port=start_result.info.port,<EOL>print_message=False,<EOL>display_handle=handle,<EOL>)<EOL><DEDENT>elif isinstance(start_result, manager.StartReused):<EOL><INDENT>template = (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL>message = template.format(<EOL>port=start_result.info.port,<EOL>pid=start_result.info.pid,<EOL>delta=_time_delta_from_info(start_result.info),<EOL>)<EOL>print_or_update(message)<EOL>_display(<EOL>port=start_result.info.port,<EOL>print_message=False,<EOL>display_handle=None,<EOL>)<EOL><DEDENT>elif isinstance(start_result, manager.StartFailed):<EOL><INDENT>def format_stream(name, value):<EOL><INDENT>if value == "<STR_LIT>":<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>elif value is None:<EOL><INDENT>return "<STR_LIT>" % name<EOL><DEDENT>else:<EOL><INDENT>return "<STR_LIT>" % (name, value.strip())<EOL><DEDENT><DEDENT>message = (<EOL>"<STR_LIT>" %<EOL>(<EOL>start_result.exit_code,<EOL>format_stream("<STR_LIT>", start_result.stderr),<EOL>format_stream("<STR_LIT>", start_result.stdout),<EOL>)<EOL>)<EOL>print_or_update(message)<EOL><DEDENT>elif isinstance(start_result, manager.StartTimedOut):<EOL><INDENT>message = (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% start_result.pid<EOL>)<EOL>print_or_update(message)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% start_result<EOL>)<EOL><DEDENT>
Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1".
f8120:m5
def _time_delta_from_info(info):
delta_seconds = int(time.time()) - info.start_time<EOL>return str(datetime.timedelta(seconds=delta_seconds))<EOL>
Format the elapsed time for the given TensorBoardInfo. Args: info: A TensorBoardInfo value. Returns: A human-readable string describing the time since the server described by `info` started: e.g., "2 days, 0:48:58".
f8120:m6
def display(port=None, height=None):
_display(port=port, height=height, print_message=True, display_handle=None)<EOL>
Display a TensorBoard instance already running on this machine. Args: port: The port on which the TensorBoard server is listening, as an `int`, or `None` to automatically select the most recently launched TensorBoard. height: The height of the frame into which to render the TensorBoard UI, as an `int` number of pixels, or `None` to use a default value (currently 800).
f8120:m7
def _display(port=None, height=None, print_message=False, display_handle=None):
if height is None:<EOL><INDENT>height = <NUM_LIT><EOL><DEDENT>if port is None:<EOL><INDENT>infos = manager.get_all()<EOL>if not infos:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>info = max(manager.get_all(), key=lambda x: x.start_time)<EOL>port = info.port<EOL><DEDENT><DEDENT>else:<EOL><INDENT>infos = [i for i in manager.get_all() if i.port == port]<EOL>info = (<EOL>max(infos, key=lambda x: x.start_time)<EOL>if infos<EOL>else None<EOL>)<EOL><DEDENT>if print_message:<EOL><INDENT>if info is not None:<EOL><INDENT>message = (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>).format(<EOL>data_source=manager.data_source_from_info(info),<EOL>delta=_time_delta_from_info(info),<EOL>port=info.port,<EOL>pid=info.pid,<EOL>)<EOL>print(message)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>fn = {<EOL>_CONTEXT_COLAB: _display_colab,<EOL>_CONTEXT_IPYTHON: _display_ipython,<EOL>_CONTEXT_NONE: _display_cli,<EOL>}[_get_context()]<EOL>return fn(port=port, height=height, display_handle=display_handle)<EOL>
Internal version of `display`. Args: port: As with `display`. height: As with `display`. print_message: True to print which TensorBoard instance was selected for display (if applicable), or False otherwise. display_handle: If not None, an IPython display handle into which to render TensorBoard.
f8120:m8
def _display_colab(port, height, display_handle):
import IPython.display<EOL>shell =
Display a TensorBoard instance in a Colab output frame. The Colab VM is not directly exposed to the network, so the Colab runtime provides a service worker tunnel to proxy requests from the end user's browser through to servers running on the Colab VM: the output frame may issue requests to https://localhost:<port> (HTTPS only), which will be forwarded to the specified port on the VM. It does not suffice to create an `iframe` and let the service worker redirect its traffic (`<iframe src="https://localhost:6006">`), because for security reasons service workers cannot intercept iframe traffic. Instead, we manually fetch the TensorBoard index page with an XHR in the output frame, and inject the raw HTML into `document.body`. By default, the TensorBoard web app requests resources against relative paths, like `./data/logdir`. Within the output frame, these requests must instead hit `https://localhost:<port>/data/logdir`. To redirect them, we change the document base URI, which transparently affects all requests (XHRs and resources alike).
f8120:m9
def list():
infos = manager.get_all()<EOL>if not infos:<EOL><INDENT>print("<STR_LIT>")<EOL>return<EOL><DEDENT>print("<STR_LIT>")<EOL>for info in infos:<EOL><INDENT>template = "<STR_LIT>"<EOL>print(template.format(<EOL>port=info.port,<EOL>data_source=manager.data_source_from_info(info),<EOL>delta=_time_delta_from_info(info),<EOL>pid=info.pid,<EOL>))<EOL><DEDENT>
Print a listing of known running TensorBoard instances. TensorBoard instances that were killed uncleanly (e.g., with SIGKILL or SIGQUIT) may appear in this list even if they are no longer running. Conversely, this list may be missing some entries if your operating system's temporary directory has been cleared since a still-running TensorBoard instance started.
f8120:m12
def _ensure_tensorboard_on_path(self, expected_binary_dir):
<EOL>command = "<STR_LIT>" if os.name == "<STR_LIT>" else "<STR_LIT>"<EOL>binary = subprocess.check_output([command, "<STR_LIT>"])<EOL>self.assertTrue(<EOL>binary.startswith(expected_binary_dir.encode("<STR_LIT:utf-8>")),<EOL>"<STR_LIT>" % (binary, expected_binary_dir),<EOL>)<EOL>
Ensure that `tensorboard(1)` refers to our own binary. Raises: subprocess.CalledProcessError: If there is no `tensorboard` on the path. AssertionError: If the `tensorboard` on the path is not under the provided directory.
f8122:c0:m2
def _stub_tensorboard(self, name, program):
tempdir = tempfile.mkdtemp(prefix="<STR_LIT>" % name)<EOL>filepath = os.path.join(tempdir, "<STR_LIT>")<EOL>with open(filepath, "<STR_LIT:w>") as outfile:<EOL><INDENT>outfile.write(program)<EOL><DEDENT>os.chmod(filepath, <NUM_LIT>)<EOL>environ = {<EOL>"<STR_LIT>": os.pathsep.join((tempdir, os.environ["<STR_LIT>"])),<EOL>}<EOL>environ_patcher = mock.patch.dict(os.environ, environ)<EOL>environ_patcher.start()<EOL>self.addCleanup(environ_patcher.stop)<EOL>self._ensure_tensorboard_on_path(expected_binary_dir=tempdir)<EOL>
Install a stub version of TensorBoard. Args: name: A short description of the stub's behavior. This will appear in the file path, which may appear in error messages. program: The contents of the stub: this should probably be a string that starts with "#!/bin/sh" and then contains a POSIX shell script.
f8122:c0:m3
def markdown_to_safe_html(markdown_string):
warning = '<STR_LIT>'<EOL>if isinstance(markdown_string, six.binary_type):<EOL><INDENT>markdown_string_decoded = markdown_string.decode('<STR_LIT:utf-8>')<EOL>markdown_string = markdown_string_decoded.replace(u'<STR_LIT:\x00>', u'<STR_LIT>')<EOL>num_null_bytes = len(markdown_string_decoded) - len(markdown_string)<EOL>if num_null_bytes:<EOL><INDENT>warning = ('<STR_LIT>'<EOL>'<STR_LIT>') % num_null_bytes<EOL><DEDENT><DEDENT>string_html = markdown.markdown(<EOL>markdown_string, extensions=['<STR_LIT>'])<EOL>string_sanitized = bleach.clean(<EOL>string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES)<EOL>return warning + string_sanitized<EOL>
Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML.
f8124:m0
def _make_info(i=<NUM_LIT:0>):
return manager.TensorBoardInfo(<EOL>version=version.VERSION,<EOL>start_time=<NUM_LIT> + i,<EOL>port=<NUM_LIT> + i,<EOL>pid=<NUM_LIT> + i,<EOL>path_prefix="<STR_LIT>",<EOL>logdir="<STR_LIT>",<EOL>db="<STR_LIT>",<EOL>cache_key="<STR_LIT>",<EOL>)<EOL>
Make a sample TensorBoardInfo object. Args: i: Seed; vary this value to produce slightly different outputs. Returns: A type-correct `TensorBoardInfo` object.
f8125:m0
def assertMode(self, path, expected):
stat_result = os.stat(path)<EOL>format_mode = lambda m: "<STR_LIT>" % m<EOL>self.assertEqual(<EOL>format_mode(stat_result.st_mode & <NUM_LIT>),<EOL>format_mode(expected),<EOL>)<EOL>
Assert that the permission bits of a file are as expected. Args: path: File to stat. expected: `int`; a subset of 0o777. Raises: AssertionError: If the permissions bits of `path` do not match `expected`.
f8125:c2:m2
def _MakeHistogram(values):
limits = _MakeHistogramBuckets()<EOL>counts = [<NUM_LIT:0>] * len(limits)<EOL>for v in values:<EOL><INDENT>idx = bisect.bisect_left(limits, v)<EOL>counts[idx] += <NUM_LIT:1><EOL><DEDENT>limit_counts = [(limits[i], counts[i]) for i in xrange(len(limits))<EOL>if counts[i]]<EOL>bucket_limit = [lc[<NUM_LIT:0>] for lc in limit_counts]<EOL>bucket = [lc[<NUM_LIT:1>] for lc in limit_counts]<EOL>sum_sq = sum(v * v for v in values)<EOL>return tf.compat.v1.HistogramProto(<EOL>min=min(values),<EOL>max=max(values),<EOL>num=len(values),<EOL>sum=sum(values),<EOL>sum_squares=sum_sq,<EOL>bucket_limit=bucket_limit,<EOL>bucket=bucket)<EOL>
Convert values into a histogram proto using logic from histogram.cc.
f8126:m1
def WriteScalarSeries(writer, tag, f, n=<NUM_LIT:5>):
step = <NUM_LIT:0><EOL>wall_time = _start_time<EOL>for i in xrange(n):<EOL><INDENT>v = f(i)<EOL>value = tf.Summary.Value(tag=tag, simple_value=v)<EOL>summary = tf.Summary(value=[value])<EOL>event = tf.Event(wall_time=wall_time, step=step, summary=summary)<EOL>writer.add_event(event)<EOL>step += <NUM_LIT:1><EOL>wall_time += <NUM_LIT:10><EOL><DEDENT>
Write a series of scalar events to writer, using f to create values.
f8126:m2
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=<NUM_LIT:20>):
step = <NUM_LIT:0><EOL>wall_time = _start_time<EOL>for [mean, stddev] in mu_sigma_tuples:<EOL><INDENT>data = [random.normalvariate(mean, stddev) for _ in xrange(n)]<EOL>histo = _MakeHistogram(data)<EOL>summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)])<EOL>event = tf.Event(wall_time=wall_time, step=step, summary=summary)<EOL>writer.add_event(event)<EOL>step += <NUM_LIT:10><EOL>wall_time += <NUM_LIT:100><EOL><DEDENT>
Write a sequence of normally distributed histograms to writer.
f8126:m3
def WriteImageSeries(writer, tag, n_images=<NUM_LIT:1>):
step = <NUM_LIT:0><EOL>session = tf.compat.v1.Session()<EOL>p = tf.compat.v1.placeholder("<STR_LIT>", (<NUM_LIT:1>, <NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:3>))<EOL>s = tf.compat.v1.summary.image(tag, p)<EOL>for _ in xrange(n_images):<EOL><INDENT>im = np.random.random_integers(<NUM_LIT:0>, <NUM_LIT:255>, (<NUM_LIT:1>, <NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:3>))<EOL>summ = session.run(s, feed_dict={p: im})<EOL>writer.add_summary(summ, step)<EOL>step += <NUM_LIT:20><EOL><DEDENT>session.close()<EOL>
Write a few dummy images to writer.
f8126:m4
def WriteAudioSeries(writer, tag, n_audio=<NUM_LIT:1>):
step = <NUM_LIT:0><EOL>session = tf.compat.v1.Session()<EOL>min_frequency_hz = <NUM_LIT><EOL>max_frequency_hz = <NUM_LIT><EOL>sample_rate = <NUM_LIT><EOL>duration_frames = sample_rate // <NUM_LIT:2> <EOL>frequencies_per_run = <NUM_LIT:1><EOL>num_channels = <NUM_LIT:2><EOL>p = tf.compat.v1.placeholder("<STR_LIT>", (frequencies_per_run, duration_frames,<EOL>num_channels))<EOL>s = tf.compat.v1.summary.audio(tag, p, sample_rate)<EOL>for _ in xrange(n_audio):<EOL><INDENT>frequencies = np.random.random_integers(<EOL>min_frequency_hz,<EOL>max_frequency_hz,<EOL>size=(frequencies_per_run, num_channels))<EOL>tiled_frequencies = np.tile(frequencies, (<NUM_LIT:1>, duration_frames))<EOL>tiled_increments = np.tile(<EOL>np.arange(<NUM_LIT:0>, duration_frames),<EOL>(num_channels, <NUM_LIT:1>)).T.reshape(<NUM_LIT:1>, duration_frames * num_channels)<EOL>tones = np.sin(<NUM_LIT> * np.pi * tiled_frequencies * tiled_increments /<EOL>sample_rate)<EOL>tones = tones.reshape(frequencies_per_run, duration_frames, num_channels)<EOL>summ = session.run(s, feed_dict={p: tones})<EOL>writer.add_summary(summ, step)<EOL>step += <NUM_LIT:20><EOL><DEDENT>session.close()<EOL>
Write a few dummy audio clips to writer.
f8126:m5
def GenerateTestData(path):
run1_path = os.path.join(path, "<STR_LIT>")<EOL>os.makedirs(run1_path)<EOL>writer1 = tf.summary.FileWriter(run1_path)<EOL>WriteScalarSeries(writer1, "<STR_LIT>", lambda x: x * x)<EOL>WriteScalarSeries(writer1, "<STR_LIT>", lambda x: x * x)<EOL>WriteScalarSeries(writer1, "<STR_LIT>", math.sin)<EOL>WriteScalarSeries(writer1, "<STR_LIT>", math.cos)<EOL>WriteHistogramSeries(writer1, "<STR_LIT>", [[<NUM_LIT:0>, <NUM_LIT:1>], [<NUM_LIT>, <NUM_LIT:1>], [<NUM_LIT:0.5>, <NUM_LIT:1>], [<NUM_LIT>, <NUM_LIT:1>],<EOL>[<NUM_LIT:1>, <NUM_LIT:1>]])<EOL>WriteImageSeries(writer1, "<STR_LIT>")<EOL>WriteImageSeries(writer1, "<STR_LIT>")<EOL>WriteAudioSeries(writer1, "<STR_LIT>")<EOL>run2_path = os.path.join(path, "<STR_LIT>")<EOL>os.makedirs(run2_path)<EOL>writer2 = tf.summary.FileWriter(run2_path)<EOL>WriteScalarSeries(writer2, "<STR_LIT>", lambda x: x * x * <NUM_LIT:2>)<EOL>WriteScalarSeries(writer2, "<STR_LIT>", lambda x: x * x * <NUM_LIT:3>)<EOL>WriteScalarSeries(writer2, "<STR_LIT>", lambda x: math.cos(x) * <NUM_LIT:2>)<EOL>WriteHistogramSeries(writer2, "<STR_LIT>", [[<NUM_LIT:0>, <NUM_LIT:2>], [<NUM_LIT>, <NUM_LIT:2>], [<NUM_LIT:0.5>, <NUM_LIT:2>], [<NUM_LIT>, <NUM_LIT:2>],<EOL>[<NUM_LIT:1>, <NUM_LIT:2>]])<EOL>WriteHistogramSeries(writer2, "<STR_LIT>", [[<NUM_LIT:0>, <NUM_LIT:1>], [<NUM_LIT>, <NUM_LIT:1>], [<NUM_LIT:0.5>, <NUM_LIT:1>], [<NUM_LIT>, <NUM_LIT:1>],<EOL>[<NUM_LIT:1>, <NUM_LIT:1>]])<EOL>WriteImageSeries(writer2, "<STR_LIT>")<EOL>WriteAudioSeries(writer2, "<STR_LIT>")<EOL>graph_def = tf.compat.v1.GraphDef()<EOL>node1 = graph_def.node.add()<EOL>node1.name = "<STR_LIT:a>"<EOL>node1.op = "<STR_LIT>"<EOL>node2 = graph_def.node.add()<EOL>node2.name = "<STR_LIT:b>"<EOL>node2.op = "<STR_LIT>"<EOL>node2.input.extend(["<STR_LIT>"])<EOL>writer1.add_graph(graph_def)<EOL>node3 = graph_def.node.add()<EOL>node3.name = "<STR_LIT:c>"<EOL>node3.op = "<STR_LIT>"<EOL>node3.input.extend(["<STR_LIT>", "<STR_LIT>"])<EOL>writer2.add_graph(graph_def)<EOL>writer1.close()<EOL>writer2.close()<EOL>
Generates the test data directory.
f8126:m6
def run(inputs, program, outputs):
root = tempfile.mkdtemp()<EOL>try:<EOL><INDENT>cwd = os.getcwd()<EOL>for fake, real in inputs:<EOL><INDENT>parent = os.path.join(root, os.path.dirname(fake))<EOL>if not os.path.exists(parent):<EOL><INDENT>os.makedirs(parent)<EOL><DEDENT>if hasattr(os, '<STR_LIT>') and not os.name == '<STR_LIT>':<EOL><INDENT>os.symlink(os.path.join(cwd, real), os.path.join(root, fake))<EOL><DEDENT>else:<EOL><INDENT>shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake))<EOL><DEDENT><DEDENT>if subprocess.call(program + [root]) != <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>for fake, real in outputs:<EOL><INDENT>shutil.copyfile(os.path.join(root, fake), real)<EOL><DEDENT>return <NUM_LIT:0><EOL><DEDENT>finally:<EOL><INDENT>try:<EOL><INDENT>shutil.rmtree(root)<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>
Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed.
f8127:m0
def main(args):
if not args:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>inputs = []<EOL>program = []<EOL>outputs = []<EOL>for arg in args:<EOL><INDENT>with open(arg) as fd:<EOL><INDENT>config = json.load(fd)<EOL><DEDENT>inputs.extend(config.get('<STR_LIT>', []))<EOL>program.extend(config.get('<STR_LIT>', []))<EOL>outputs.extend(config.get('<STR_LIT>', []))<EOL><DEDENT>if not program:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return run(inputs, program, outputs)<EOL>
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing.
f8127:m1
def load_config(configfile):
try:<EOL><INDENT>with open(configfile, '<STR_LIT:r>') as ymlfile:<EOL><INDENT>try:<EOL><INDENT>config = yaml.load(ymlfile)<EOL>return config<EOL><DEDENT>except yaml.parser.ParserError:<EOL><INDENT>raise PyYAMLConfigError(<EOL>'<STR_LIT>'.format(configfile),<EOL>)<EOL><DEDENT><DEDENT><DEDENT>except IOError:<EOL><INDENT>raise PyYAMLConfigError(<EOL>'<STR_LIT>'.format(configfile),<EOL>)<EOL><DEDENT>
Return a dict with configuration from the supplied yaml file
f8129:m0
def write_config(configfile, content):
with open(configfile, '<STR_LIT>') as ymlfile:<EOL><INDENT>yaml.dump(<EOL>content,<EOL>ymlfile,<EOL>default_flow_style=False,<EOL>)<EOL><DEDENT>
Write dict to a file in yaml format
f8129:m1
def colorize(bg, base, fg, *text):
<EOL>rtext = [str(f) for f in text]<EOL>return COLORIZE_FORMAT.format(<EOL>_to_int(bg), _to_int(base), _to_int(fg), '<STR_LIT>'.join(rtext)<EOL>)<EOL>
colorize(bg, base, fg, *text)
f8135:m1
def uncolorize(text):
match = re.match('<STR_LIT>', text)<EOL>try:<EOL><INDENT>return '<STR_LIT>'.join(match.groups())<EOL><DEDENT>except:<EOL><INDENT>return text<EOL><DEDENT>
uncolorize(text)
f8135:m2
def tr(text, kword, color):
return re.sub(kword, colorize(BgColor.Null, Base.Null, color, kword), text)<EOL>
tr(text, keyword, color)
f8135:m3
def tr_iter(text, kword, color):
s='<STR_LIT>'<EOL>for _t in text:<EOL><INDENT>if _t in kword:<EOL><INDENT>s += colorize(BgColor.Null, Base.Null, color, _t)<EOL><DEDENT>else:<EOL><INDENT>s += _t<EOL><DEDENT><DEDENT>return s<EOL>
tr_iter(text, kword, color)
f8135:m4
def cprint(*text, **kwargs):
print(<EOL>colorize(<EOL>kwargs.get('<STR_LIT>') or BgColor.Null,<EOL>kwargs.get('<STR_LIT>') or Base.Null,<EOL>kwargs.get('<STR_LIT>') or FgColor.Null,<EOL>*text<EOL>),<EOL>file=kwargs.get('<STR_LIT:file>') or sys.stdout<EOL>)<EOL>
cprint(*text, **keywordargs)
f8135:m5
def __init__(self,<EOL>positions=None,<EOL>scaled_positions=None,<EOL>masses=None,<EOL>cell=None,<EOL>force_sets=None,<EOL>force_constants=None,<EOL>atomic_numbers=None,<EOL>atomic_elements=None,<EOL>atom_type_index=None,<EOL>primitive_matrix=None):
self._cell = np.array(cell, dtype='<STR_LIT>')<EOL>self._masses = np.array(masses, dtype='<STR_LIT>')<EOL>self._atomic_numbers = np.array(atomic_numbers, dtype='<STR_LIT>')<EOL>self._force_constants = force_constants<EOL>self._force_sets = force_sets<EOL>self._atomic_elements = atomic_elements<EOL>self._atom_type_index = atom_type_index<EOL>self._scaled_positions = scaled_positions<EOL>self._positions = positions<EOL>self._primitive_matrix = primitive_matrix<EOL>self._primitive_cell = None<EOL>self._supercell_matrix = None<EOL>self._supercell_phonon = None<EOL>self._supercell_phonon_renormalized = None<EOL>self._number_of_cell_atoms = None<EOL>self._number_of_atoms = None<EOL>self._number_of_atom_types = None<EOL>self._number_of_primitive_atoms = None<EOL>if atomic_elements is None and masses is not None:<EOL><INDENT>self._atomic_elements = []<EOL>for i in masses:<EOL><INDENT>for j in atom_data:<EOL><INDENT>if "<STR_LIT>".format(i) == "<STR_LIT>".format(j[<NUM_LIT:3>]):<EOL><INDENT>self._atomic_elements.append(j[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if atomic_numbers is None and self._atomic_elements is not None:<EOL><INDENT>self._atomic_numbers = np.array([symbol_map[i] for i in self._atomic_elements])<EOL><DEDENT>else:<EOL><INDENT>self._atomic_numbers = np.array(atomic_numbers)<EOL><DEDENT>if masses is None and self._atomic_numbers is not None:<EOL><INDENT>self._masses = np.array([atom_data[i][<NUM_LIT:3>] for i in self._atomic_numbers])<EOL><DEDENT>else:<EOL><INDENT>self._masses = masses<EOL><DEDENT>
:param positions: atoms cartesian positions (array Ndim x Natoms) :param scaled_positions: atom positions scaled to 1 (array Ndim x Natoms) :param masses: masses of the atoms (vector NAtoms) :param cell: Numpy array containing the unit cell (lattice vectors in rows) :param force_sets: force constants: Harmonic force constants :param force_constants: atomic numbers vector (1x Natoms): :param atomic_numbers: number of total atoms in the crystal: :param atomic_elements: atomic names of each element (ex: H, Be, Si,..) (vector Natoms): :param atom_type_index: index vector that contains the number of different types of atoms in crystal (vector NdiferentAtoms) :param primitive_matrix: matrix that defines the primitive cell respect to the unicell
f8142:c0:m0
def diff_matrix(array_1, array_2, cell_size):
array_1_norm = np.array(array_1) / np.array(cell_size, dtype=float)[None,:]<EOL>array_2_norm = np.array(array_2) / np.array(cell_size, dtype=float)[None,:]<EOL>return array_2_norm - array_1_norm<EOL>
:param array_1: supercell scaled positions respect unit cell :param array_2: supercell scaled positions respect unit cell :param cell_size: diference between arrays accounting for periodicity :return:
f8147:m0
def _function(self, x, a, b, c, d):
return c/(np.pi*b*(<NUM_LIT:1.0>+((x - a)/b)**<NUM_LIT:2>))+d<EOL>
Lorentzian function x: frequency coordinate a: peak position b: half width c: area proportional parameter d: base line
f8157:c0:m1
def _g_a (self, x, a, b, s):
return <NUM_LIT:2>*b/(<NUM_LIT:1.0>+np.exp(s*(x-a)))<EOL>
Asymmetric width term x: frequency coordinate a: peak position b: half width s: asymmetry parameter
f8157:c1:m1
def _function(self, x, a, b, c, d, s):
return c/(np.pi*self._g_a(x, a, b, s)*(<NUM_LIT:1.0>+((x-a)/(self._g_a(x, a, b, s)))**<NUM_LIT:2>))+d<EOL>
Lorentzian asymmetric function x: frequency coordinate a: peak position b: half width c: area proportional parameter d: base line s: asymmetry parameter
f8157:c1:m2
def _function(self, x, a, b, c, d):
return c/((a**<NUM_LIT:2>-x**<NUM_LIT:2>)**<NUM_LIT:2> + (b*x)**<NUM_LIT:2>)+d<EOL>
Damped harmonic oscillator PS function x: frequency coordinate a: peak position b: half width c: area proportional parameter d: base line
f8157:c2:m1
def _function(self, x, a, b, c, d):
return c/b*np.sqrt(<NUM_LIT:2>*np.pi)*np.exp(-(x-a)**<NUM_LIT:2>/(<NUM_LIT:2>*b**<NUM_LIT:2>))+d<EOL>
Gaussian PDF function x: coordinate a: peak position b: deviation (sigma) c: area proportional parameter d: base line
f8157:c3:m1
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph):
if not rhs_graph:<EOL><INDENT>return {}, {}, {}<EOL><DEDENT>self.matching_code_container.add_graph_to_namespace(lhs_graph)<EOL>self.matching_code_container.add_graph_to_namespace(rhs_graph)<EOL>return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph)<EOL>
Looks for sub-isomorphisms of rhs into lhs :param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph) :param rhs_graph: The smaller graph :return: The list of matching names
f8166:c1:m1
def add_line(self, string):
self.code_strings.append(string)<EOL>code = '<STR_LIT>'<EOL>if len(self.code_strings) == <NUM_LIT:1>:<EOL><INDENT>code = '<STR_LIT>' + self.code_strings[<NUM_LIT:0>] + '<STR_LIT:)>'<EOL><DEDENT>if len(self.code_strings) > <NUM_LIT:1>:<EOL><INDENT>code = '<STR_LIT>' + '<STR_LIT:U+0020>'.join(self.code_strings) + '<STR_LIT>'<EOL><DEDENT>self._compiled_ast_and_expr = self.__compile_code(code_string=code)<EOL>
Adds a line to the LISP code to execute :param string: The line to add :return: None
f8167:c0:m1
def add_graph_to_namespace(self, graph):
for node in graph.vs:<EOL><INDENT>attributes = node.attributes()<EOL>self.namespace[node['<STR_LIT:name>']] = attributes<EOL><DEDENT>for node in graph.es:<EOL><INDENT>attributes = node.attributes()<EOL>self.namespace[node['<STR_LIT:name>']] = attributes<EOL><DEDENT>
Adds the variables name to the namespace of the local LISP code :param graph: the graph to add to the namespace :return: None
f8167:c0:m2
def execute(self, vertices_substitution_dict={}):
if not self.code_strings:<EOL><INDENT>return True<EOL><DEDENT>if vertices_substitution_dict:<EOL><INDENT>namespace = self.__substitute_names_in_namespace(self.namespace, vertices_substitution_dict)<EOL><DEDENT>else:<EOL><INDENT>namespace = self.namespace<EOL><DEDENT>try:<EOL><INDENT>self.__execute_code(self._compiled_ast_and_expr, namespace)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return namespace['<STR_LIT:result>']<EOL>
Executes the code :param vertices_substitution_dict: aliases of the variables in the code :return: True/False, depending on the result of the code (default is True)
f8167:c0:m3
def substitute_namespace_into_graph(self, graph):
for key, value in self.namespace.items():<EOL><INDENT>try:<EOL><INDENT>nodes = graph.vs.select(name=key)<EOL>for node in nodes:<EOL><INDENT>for k, v in value.items():<EOL><INDENT>node[k] = v<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>nodes = graph.es.select(name=key)<EOL>for node in nodes:<EOL><INDENT>for k, v in value.items():<EOL><INDENT>node[k] = v<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return graph<EOL>
Creates a graph from the local namespace of the code (to be used after the execution of the code) :param graph: The graph to use as a recipient of the namespace :return: the updated graph
f8167:c0:m4
def __init__(self, g, node_matcher=StringNodeMatcher(), code_container_factory=CodeContainerFactory()):
self.g = g<EOL>self.node_matcher = node_matcher<EOL>self.action_list = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>self.action_dict = {'<STR_LIT>': self.__match,<EOL>'<STR_LIT>': self.__create,<EOL>'<STR_LIT>': self.__delete,<EOL>'<STR_LIT>': self.__set,<EOL>'<STR_LIT>': self.__where,<EOL>}<EOL>self.code_container_factory = code_container_factory<EOL>
This class interprets the commands translates them into operations on a graph by calling GraphBuilder(). It accepts a graph as an argument and performs operations onto it. :param g: The graph to perform operations onto :param node_matcher: The class that decides if two nodes match :param code_container_factory: the class that creates the object that executes the LISP code
f8168:c0:m0
def query(self, string, repeat_n_times=None):
if not repeat_n_times:<EOL><INDENT>repeat_n_times = self.__determine_how_many_times_to_repeat_query(string)<EOL><DEDENT>lines = self.__get_command_lines(string)<EOL>return_list = []<EOL>for line in lines:<EOL><INDENT>lst = self.__query_n_times(line, repeat_n_times)<EOL>if lst and lst[<NUM_LIT:0>]:<EOL><INDENT>return_list = lst<EOL><DEDENT><DEDENT>return return_list<EOL>
This method performs the operations onto self.g :param string: The list of operations to perform. The sequences of commands should be separated by a semicolon An example might be CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2), {'tag': 'PLACE', 'text': 'London'}(v2) MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b) WHERE (= (get _a "text") "joseph") RETURN _a,_b; :param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of the return list. If None then the value is set by the function self.__determine_how_many_times_to_repeat_query(string) :return: If the RETURN command is called with a list of variables names, a list of JSON with the corresponding properties is returned. If the RETURN command is used alone, a list with the entire graph is returned. Otherwise it returns an empty list
f8168:c0:m1
def __query_with_builder(self, string, builder):
action_graph_pairs = self.__get_action_graph_pairs_from_query(string)<EOL>for action, graph_str in action_graph_pairs:<EOL><INDENT>if action == '<STR_LIT>' or action == '<STR_LIT>':<EOL><INDENT>return self.__return(graph_str, builder)<EOL><DEDENT>try:<EOL><INDENT>self.action_dict[action](graph_str, builder)<EOL><DEDENT>except MatchException:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return {}<EOL>
Uses the builder in the argument to modify the graph, according to the commands in the string :param string: The single query to the database :return: The result of the RETURN operation
f8168:c0:m4
def __get_action_graph_pairs_from_query(self, query):
import re<EOL>query = convert_special_characters_to_spaces(query)<EOL>graph_list = re.split('<STR_LIT:|>'.join(self.action_list), query)<EOL>query_list_positions = [query.find(graph) for graph in graph_list]<EOL>query_list_positions = query_list_positions<EOL>query_list_positions = query_list_positions<EOL>action_list = [query[query_list_positions[i] + len(graph_list[i]):query_list_positions[i + <NUM_LIT:1>]].strip()<EOL>for i in range(len(graph_list) - <NUM_LIT:1>)]<EOL>graph_list = graph_list[<NUM_LIT:1>:]<EOL>return zip(action_list, graph_list)<EOL>
Splits the query into command/argument pairs, for example [("MATCH","{}(_a))", ("RETURN","_a")] :param query: The string with the list of commands :return: the command/argument pairs
f8168:c0:m5
def __init__(self, g, node_matcher, code_container_factory):
self.g = g<EOL>self.vertices_substitution_dict = {}<EOL>self.edges_substitution_dict = {}<EOL>self.matching_graph = None<EOL>self.matching_code_container = code_container_factory.create()<EOL>self.match = Match(self.matching_code_container, node_matcher)<EOL>self.update = True<EOL>self.match_info = {}<EOL>
This class performs the operations into the graph g. :param g: The graph to modify
f8170:c0:m0
def add_graph(self, rhs_graph):
rhs_graph = self.__substitute_names_in_graph(rhs_graph)<EOL>self.g = self.__merge_graphs(self.g, rhs_graph)<EOL>return self<EOL>
Adds a graph to self.g :param rhs_graph: the graph to add :return: itself
f8170:c0:m1
def set(self, code):
if self.update:<EOL><INDENT>self.vertices_substitution_dict, self.edges_substitution_dict, self.match_info= self.match.get_variables_substitution_dictionaries(self.g, self.matching_graph)<EOL><DEDENT>try:<EOL><INDENT>self.matching_graph = self.__apply_code_to_graph(code, self.matching_graph)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>code = self.__substitute_names_in_code(code)<EOL>self.g = self.__apply_code_to_graph(code, self.g)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return True<EOL>
Executes the code and apply it to the self.g :param code: the LISP code to execute :return: True/False, depending on the result of the LISP code
f8170:c0:m2
def match_graph(self, rhs_graph):
self.matching_graph = rhs_graph<EOL>
Sets the graph to match with self.g :param rhs_graph: The graph to match :return: None
f8170:c0:m3
def where(self, code_string):
self.matching_code_container.add_line(code_string)<EOL>
It sets the LISP code to execute upon matching graphs :param code_string: The code to execute :return: None
f8170:c0:m4
def delete_list(self, variables):
variables = set(self.__substitute_names_in_list(variables))<EOL>self.update = False<EOL>self.g.delete_vertices(self.g.vs.select(name_in=variables))<EOL>self.g.delete_edges(self.g.es.select(name_in=variables))<EOL>
Deletes a list of vertices/edges from self.g :param variables: the names of the variables to delete :return:
f8170:c0:m5
def build(self):
return self.g<EOL>
Return the graph :return: self.g
f8170:c0:m6
def get_match_dict(self):
:return: A dict with the information on how the match went. The keys are: * __RESULT__: True/False
f8170:c0:m7
def build_variables(self, variable_placeholders):
variables = self.__substitute_names_in_list(variable_placeholders)<EOL>attributes = {}<EOL>for i, variable in enumerate(variables):<EOL><INDENT>placeholder_name = variable_placeholders[i]<EOL>try:<EOL><INDENT>vertices = self.g.vs.select(name=variable)<EOL>attributes[placeholder_name] = vertices[<NUM_LIT:0>].attributes()<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>for i, variable in enumerate(variables):<EOL><INDENT>placeholder_name = variable_placeholders[i]<EOL>try:<EOL><INDENT>edges = self.g.es.select(name=variable)<EOL>edge_attr = edges[<NUM_LIT:0>].attributes()<EOL>attributes[placeholder_name] = edge_attr<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>for i, variable in enumerate(variables):<EOL><INDENT>placeholder_name = variable_placeholders[i]<EOL>try:<EOL><INDENT>attributes[placeholder_name] = self.match_info[placeholder_name]<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return attributes<EOL>
:param variables: The list of vertices/edges to return :return: a dict where the keys are the names of the variables to return, the values are the JSON of the properties of these variables
f8170:c0:m8
@jinja2.contextfunction<EOL>def url_for(context, __route_name, **parts):
app = context['<STR_LIT>']<EOL>query = None<EOL>if '<STR_LIT>' in parts:<EOL><INDENT>query = parts.pop('<STR_LIT>')<EOL><DEDENT>for key in parts:<EOL><INDENT>val = parts[key]<EOL>if isinstance(val, str):<EOL><INDENT>val = str(val)<EOL><DEDENT>elif type(val) is int:<EOL><INDENT>val = str(val)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>"<EOL>"<STR_LIT>".format(key, type(val), val))<EOL><DEDENT>parts[key] = val<EOL><DEDENT>url = app.router[__route_name].url_for(**parts)<EOL>if query:<EOL><INDENT>url = url.with_query(query)<EOL><DEDENT>return url<EOL>
Filter for generating urls. Usage: {{ url('the-view-name') }} might become "/path/to/view" or {{ url('item-details', id=123, query={'active': 'true'}) }} might become "/items/1?active=true".
f8179:m0
@jinja2.contextfunction<EOL>def static_url(context, static_file_path):
app = context['<STR_LIT>']<EOL>try:<EOL><INDENT>static_url = app['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise RuntimeError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>") from None<EOL><DEDENT>return '<STR_LIT>'.format(static_url.rstrip('<STR_LIT:/>'), static_file_path.lstrip('<STR_LIT:/>'))<EOL>
Filter for generating urls for static files. NOTE: you'll need to set app['static_root_url'] to be used as the root for the urls returned. Usage: {{ static('styles.css') }} might become "/static/styles.css" or "http://mycdn.example.com/styles.css"
f8179:m1
def _prepare_uri(self, service_name, **parameters):
query_parameters = []<EOL>for key, value in parameters.items():<EOL><INDENT>if isinstance(value, (list, tuple)):<EOL><INDENT>value = "<STR_LIT:U+002C>".join([str(member) for member in value])<EOL><DEDENT>if isinstance(value, bool):<EOL><INDENT>value = "<STR_LIT:true>" if value else "<STR_LIT:false>"<EOL><DEDENT>query_parameters.append("<STR_LIT>".format(key, value))<EOL><DEDENT>if query_parameters:<EOL><INDENT>uri = "<STR_LIT>".format(self.base_url, service_name,<EOL>"<STR_LIT:&>".join(query_parameters))<EOL><DEDENT>else:<EOL><INDENT>uri = "<STR_LIT>".format(self.base_url, service_name)<EOL><DEDENT>return uri<EOL>
Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request
f8191:c0:m1
def _handle_rate_exceeded(self, response):
waiting_time = int(response.headers.get("<STR_LIT>", <NUM_LIT:10>))<EOL>time.sleep(waiting_time)<EOL>
Handles rate exceeded errors
f8191:c0:m2
def _send(self, send_method, service_name, data=None, **kwargs):
valid_response = False<EOL>raw = kwargs.pop("<STR_LIT>", False)<EOL>while not valid_response:<EOL><INDENT>headers = dict(Authorization=self.token)<EOL>uri = self._prepare_uri(service_name, **kwargs)<EOL>logger.debug('<STR_LIT:U+0020>'.join(map(str, (headers, uri, data))))<EOL>response = send_method(uri, headers=headers, json=data)<EOL>content_type = response.headers["<STR_LIT:Content-Type>"].split("<STR_LIT:;>")[<NUM_LIT:0>]<EOL>if response.content and content_type == "<STR_LIT:application/json>":<EOL><INDENT>response_data = response.json()<EOL>if "<STR_LIT>" in response_data:<EOL><INDENT>response_data = response_data["<STR_LIT>"]<EOL><DEDENT><DEDENT>elif response.content:<EOL><INDENT>return response.content<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>self.check_errors(response, response_data)<EOL><DEDENT>except RateExceeded:<EOL><INDENT>self._handle_rate_exceeded(response)<EOL><DEDENT>except NoAuth:<EOL><INDENT>self.update_token()<EOL><DEDENT>else:<EOL><INDENT>valid_response = True<EOL><DEDENT><DEDENT>if raw:<EOL><INDENT>return response.json()<EOL><DEDENT>return response_data<EOL>
Send a request to the AppNexus API (used for internal routing) :param send_method: The method sending the request (usualy requests.*) :type send_method: function :param service_name: The target service :param data: The payload of the request (optionnal) :type data: anything JSON-serializable
f8191:c0:m3
def update_token(self):
logger.info('<STR_LIT>')<EOL>if None in self.credentials.values():<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>credentials = dict(auth=self.credentials)<EOL>url = self.test_url if self.test else self.url<EOL>response = requests.post(url + "<STR_LIT>",<EOL>json=credentials)<EOL>data = response.json()["<STR_LIT>"]<EOL>if "<STR_LIT>" in data and data["<STR_LIT>"] == "<STR_LIT>":<EOL><INDENT>raise BadCredentials()<EOL><DEDENT>if "<STR_LIT>" in data and data["<STR_LIT>"] == "<STR_LIT>":<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL>return<EOL><DEDENT>if "<STR_LIT>" in data or "<STR_LIT>" in data:<EOL><INDENT>raise AppNexusException(response)<EOL><DEDENT>self.token = data["<STR_LIT>"]<EOL>self.save_token()<EOL>return self.token<EOL>
Request a new token and store it for future use
f8191:c0:m4
def check_errors(self, response, data):
if "<STR_LIT>" in data:<EOL><INDENT>error_id = data["<STR_LIT>"]<EOL>if error_id in self.error_ids:<EOL><INDENT>raise self.error_ids[error_id](response)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in data:<EOL><INDENT>error_code = data["<STR_LIT>"]<EOL>if error_code in self.error_codes:<EOL><INDENT>raise self.error_codes[error_code](response)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in data or "<STR_LIT>" in data:<EOL><INDENT>raise AppNexusException(response)<EOL><DEDENT>
Check for errors and raise an appropriate error if needed
f8191:c0:m5
def get(self, service_name, **kwargs):
return self._send(requests.get, service_name, **kwargs)<EOL>
Retrieve data from AppNexus API
f8191:c0:m6
def modify(self, service_name, json, **kwargs):
return self._send(requests.put, service_name, json, **kwargs)<EOL>
Modify an AppNexus object
f8191:c0:m7
def create(self, service_name, json, **kwargs):
return self._send(requests.post, service_name, json, **kwargs)<EOL>
Create a new AppNexus object
f8191:c0:m8
def delete(self, service_name, *ids, **kwargs):
return self._send(requests.delete, service_name, id=ids, **kwargs)<EOL>
Delete an AppNexus object
f8191:c0:m9
def meta(self, service_name):
return self.get(service_name + "<STR_LIT>")<EOL>
Retrieve meta-informations about a service
f8191:c0:m11
def __init__(self, client, service_name, representation, **specs):
<EOL>if client is None or service_name is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>if representation is None or not callable(representation):<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>self.client = client<EOL>self.service_name = service_name<EOL>self.representation = representation<EOL>self.specs = specs<EOL>self.retrieved = <NUM_LIT:0><EOL>self._skip = <NUM_LIT:0><EOL>self._limit = float('<STR_LIT>')<EOL>
Initialize the object :param client: an AppNexusClient instance :param service_name: the service to which the request was made :param specs: The specifications sent to AppNexus with the request
f8193:c0:m0
def __len__(self):
return self.count()<EOL>
Returns the number of elements matching the specifications
f8193:c0:m1
def __getitem__(self, idx):
page = self.get_page(num_elements=<NUM_LIT:1>, start_element=idx)<EOL>data = self.extract_data(page)<EOL>return data[<NUM_LIT:0>]<EOL>
Returns the nth element matching the specifications
f8193:c0:m2
def __iter__(self):
for page in self.iter_pages():<EOL><INDENT>data = self.extract_data(page)<EOL>if self._skip >= len(data):<EOL><INDENT>self._skip -= len(data)<EOL>continue<EOL><DEDENT>elif self._skip:<EOL><INDENT>self._skip = <NUM_LIT:0><EOL>data = data[self._skip:]<EOL><DEDENT>lasting = self._limit - self.retrieved<EOL>if not lasting:<EOL><INDENT>break<EOL><DEDENT>elif lasting < len(data):<EOL><INDENT>data = data[:lasting]<EOL><DEDENT>for entity in data:<EOL><INDENT>self.retrieved += <NUM_LIT:1><EOL>yield entity<EOL><DEDENT><DEDENT>
Iterate over all AppNexus objects matching the specifications
f8193:c0:m3
def extract_data(self, page):
response_keys = set(page.keys())<EOL>uncommon_keys = response_keys - self.common_keys<EOL>for possible_data_key in uncommon_keys:<EOL><INDENT>element = page[possible_data_key]<EOL>if isinstance(element, dict):<EOL><INDENT>return [self.representation(self.client, self.service_name,<EOL>element)]<EOL><DEDENT>if isinstance(element, list):<EOL><INDENT>return [self.representation(self.client, self.service_name, x)<EOL>for x in element]<EOL><DEDENT><DEDENT>
Extract the AppNexus object or list of objects from the response
f8193:c0:m4
@property<EOL><INDENT>def first(self):<DEDENT>
page = self.get_page(num_elements=<NUM_LIT:1>)<EOL>data = self.extract_data(page)<EOL>if data:<EOL><INDENT>return data[<NUM_LIT:0>]<EOL><DEDENT>
Extract the first AppNexus object present in the response
f8193:c0:m5
def get_page(self, start_element=<NUM_LIT:0>, num_elements=None):
if num_elements is None:<EOL><INDENT>num_elements = self.batch_size<EOL><DEDENT>specs = self.specs.copy()<EOL>specs.update(start_element=start_element, num_elements=num_elements)<EOL>return self.client.get(self.service_name, **specs)<EOL>
Get a page (100 elements) starting from `start_element`
f8193:c0:m6
def count(self):
return self.get_page(num_elements=<NUM_LIT:1>)["<STR_LIT:count>"]<EOL>
Returns the number of elements matching the specifications
f8193:c0:m8
def limit(self, number):
self._limit = number<EOL>return self<EOL>
Limit the cursor to retrieve at most `number` elements
f8193:c0:m10
def skip(self, number):
self._skip = number<EOL>return self<EOL>
Skip the first `number` elements of the cursor
f8193:c0:m11
def size(self):
initial_count = self.count()<EOL>count_with_skip = max(<NUM_LIT:0>, initial_count - self._skip)<EOL>size = min(count_with_skip, self._limit)<EOL>return size<EOL>
Return the number of elements of the cursor with skip and limit
f8193:c0:m12
@staticmethod<EOL><INDENT>def read(text):<DEDENT>
return EmailMessage(text).read()<EOL>
Factory method that splits email into list of fragments text - A string email body Returns an EmailMessage instance
f8199:c0:m0
@staticmethod<EOL><INDENT>def parse_reply(text):<DEDENT>
return EmailReplyParser.read(text).reply<EOL>
Provides the reply portion of email. text - A string email body Returns reply body message
f8199:c0:m1
def read(self):
self.found_visible = False<EOL>is_multi_quote_header = self.MULTI_QUOTE_HDR_REGEX_MULTILINE.search(self.text)<EOL>if is_multi_quote_header:<EOL><INDENT>self.text = self.MULTI_QUOTE_HDR_REGEX.sub(is_multi_quote_header.groups()[<NUM_LIT:0>].replace('<STR_LIT:\n>', '<STR_LIT>'), self.text)<EOL><DEDENT>self.text = re.sub('<STR_LIT>', '<STR_LIT>', self.text, re.MULTILINE)<EOL>self.lines = self.text.split('<STR_LIT:\n>')<EOL>self.lines.reverse()<EOL>for line in self.lines:<EOL><INDENT>self._scan_line(line)<EOL><DEDENT>self._finish_fragment()<EOL>self.fragments.reverse()<EOL>return self<EOL>
Creates new fragment for each line and labels as a signature, quote, or hidden. Returns EmailMessage instance
f8199:c1:m1
@property<EOL><INDENT>def reply(self):<DEDENT>
reply = []<EOL>for f in self.fragments:<EOL><INDENT>if not (f.hidden or f.quoted):<EOL><INDENT>reply.append(f.content)<EOL><DEDENT><DEDENT>return '<STR_LIT:\n>'.join(reply)<EOL>
Captures reply message within email
f8199:c1:m2
def _scan_line(self, line):
is_quote_header = self.QUOTE_HDR_REGEX.match(line) is not None<EOL>is_quoted = self.QUOTED_REGEX.match(line) is not None<EOL>is_header = is_quote_header or self.HEADER_REGEX.match(line) is not None<EOL>if self.fragment and len(line.strip()) == <NUM_LIT:0>:<EOL><INDENT>if self.SIG_REGEX.match(self.fragment.lines[-<NUM_LIT:1>].strip()):<EOL><INDENT>self.fragment.signature = True<EOL>self._finish_fragment()<EOL><DEDENT><DEDENT>if self.fragmentand ((self.fragment.headers == is_header and self.fragment.quoted == is_quoted) or<EOL>(self.fragment.quoted and (is_quote_header or len(line.strip()) == <NUM_LIT:0>))):<EOL><INDENT>self.fragment.lines.append(line)<EOL><DEDENT>else:<EOL><INDENT>self._finish_fragment()<EOL>self.fragment = Fragment(is_quoted, line, headers=is_header)<EOL><DEDENT>
Reviews each line in email message and determines fragment type line - a row of text from an email message
f8199:c1:m3
def quote_header(self, line):
return self.QUOTE_HDR_REGEX.match(line[::-<NUM_LIT:1>]) is not None<EOL>
Determines whether line is part of a quoted area line - a row of the email message Returns True or False
f8199:c1:m4
def _finish_fragment(self):
if self.fragment:<EOL><INDENT>self.fragment.finish()<EOL>if self.fragment.headers:<EOL><INDENT>self.found_visible = False<EOL>for f in self.fragments:<EOL><INDENT>f.hidden = True<EOL><DEDENT><DEDENT>if not self.found_visible:<EOL><INDENT>if self.fragment.quotedor self.fragment.headersor self.fragment.signatureor (len(self.fragment.content.strip()) == <NUM_LIT:0>):<EOL><INDENT>self.fragment.hidden = True<EOL><DEDENT>else:<EOL><INDENT>self.found_visible = True<EOL><DEDENT><DEDENT>self.fragments.append(self.fragment)<EOL><DEDENT>self.fragment = None<EOL>
Creates fragment
f8199:c1:m5
def finish(self):
self.lines.reverse()<EOL>self._content = '<STR_LIT:\n>'.join(self.lines)<EOL>self.lines = None<EOL>
Creates block of content with lines belonging to fragment.
f8199:c2:m1
def get_email(self, name):
with open('<STR_LIT>' % name) as f:<EOL><INDENT>text = f.read()<EOL><DEDENT>return EmailReplyParser.read(text)<EOL>
Return EmailMessage instance
f8200:c0:m21
def register_compressor(ext, callback):
if not (ext and ext[<NUM_LIT:0>] == '<STR_LIT:.>'):<EOL><INDENT>raise ValueError('<STR_LIT>' % ext)<EOL><DEDENT>if ext in _COMPRESSOR_REGISTRY:<EOL><INDENT>logger.warning('<STR_LIT>', ext)<EOL><DEDENT>_COMPRESSOR_REGISTRY[ext] = callback<EOL>
Register a callback for transparently decompressing files with a specific extension. Parameters ---------- ext: str The extension. callback: callable The callback. It must accept two position arguments, file_obj and mode. Examples -------- Instruct smart_open to use the identity function whenever opening a file with a .xz extension (see README.rst for the complete example showing I/O): >>> def _handle_xz(file_obj, mode): ... import lzma ... return lzma.LZMAFile(filename=file_obj, mode=mode, format=lzma.FORMAT_XZ) >>> >>> register_compressor('.xz', _handle_xz)
f8202:m0
def _check_kwargs(kallable, kwargs):
supported_keywords = sorted(_inspect_kwargs(kallable))<EOL>unsupported_keywords = [k for k in sorted(kwargs) if k not in supported_keywords]<EOL>supported_kwargs = {k: v for (k, v) in kwargs.items() if k in supported_keywords}<EOL>if unsupported_keywords:<EOL><INDENT>logger.warning('<STR_LIT>', unsupported_keywords)<EOL><DEDENT>return supported_kwargs<EOL>
Check which keyword arguments the callable supports. Parameters ---------- kallable: callable A function or method to test kwargs: dict The keyword arguments to check. If the callable doesn't support any of these, a warning message will get printed. Returns ------- dict A dictionary of argument names and values supported by the callable.
f8202:m4
def open(<EOL>uri,<EOL>mode='<STR_LIT:r>',<EOL>buffering=-<NUM_LIT:1>,<EOL>encoding=None,<EOL>errors=None,<EOL>newline=None,<EOL>closefd=True,<EOL>opener=None,<EOL>ignore_ext=False,<EOL>transport_params=None,<EOL>):
logger.debug('<STR_LIT>', locals())<EOL>if not isinstance(mode, six.string_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if transport_params is None:<EOL><INDENT>transport_params = {}<EOL><DEDENT>fobj = _shortcut_open(<EOL>uri,<EOL>mode,<EOL>ignore_ext=ignore_ext,<EOL>buffering=buffering,<EOL>encoding=encoding,<EOL>errors=errors,<EOL>)<EOL>if fobj is not None:<EOL><INDENT>return fobj<EOL><DEDENT>if encoding is not None and '<STR_LIT:b>' in mode:<EOL><INDENT>mode = mode.replace('<STR_LIT:b>', '<STR_LIT>')<EOL><DEDENT>if PATHLIB_SUPPORT and isinstance(uri, pathlib.Path):<EOL><INDENT>uri = str(uri)<EOL><DEDENT>explicit_encoding = encoding<EOL>encoding = explicit_encoding if explicit_encoding else SYSTEM_ENCODING<EOL>try:<EOL><INDENT>binary_mode = {'<STR_LIT:r>': '<STR_LIT:rb>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:w>': '<STR_LIT:wb>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:a>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}[mode]<EOL><DEDENT>except KeyError:<EOL><INDENT>binary_mode = mode<EOL><DEDENT>binary, filename = _open_binary_stream(uri, binary_mode, transport_params)<EOL>if ignore_ext:<EOL><INDENT>decompressed = binary<EOL><DEDENT>else:<EOL><INDENT>decompressed = _compression_wrapper(binary, filename, mode)<EOL><DEDENT>if '<STR_LIT:b>' not in mode or explicit_encoding is not None:<EOL><INDENT>decoded = _encoding_wrapper(decompressed, mode, encoding=encoding, errors=errors)<EOL><DEDENT>else:<EOL><INDENT>decoded = decompressed<EOL><DEDENT>return decoded<EOL>
r"""Open the URI object, returning a file-like object. The URI is usually a string in a variety of formats: 1. a URI for the local filesystem: `./lines.txt`, `/home/joe/lines.txt.gz`, `file:///home/joe/lines.txt.bz2` 2. a URI for HDFS: `hdfs:///some/path/lines.txt` 3. a URI for Amazon's S3 (can also supply credentials inside the URI): `s3://my_bucket/lines.txt`, `s3://my_aws_key_id:key_secret@my_bucket/lines.txt` The URI may also be one of: - an instance of the pathlib.Path class - a stream (anything that implements io.IOBase-like functionality) This function supports transparent compression and decompression using the following codec: - ``.gz`` - ``.bz2`` The function depends on the file extension to determine the appropriate codec. Parameters ---------- uri: str or object The object to open. mode: str, optional Mimicks built-in open parameter of the same name. buffering: int, optional Mimicks built-in open parameter of the same name. encoding: str, optional Mimicks built-in open parameter of the same name. errors: str, optional Mimicks built-in open parameter of the same name. newline: str, optional Mimicks built-in open parameter of the same name. closefd: boolean, optional Mimicks built-in open parameter of the same name. Ignored. opener: object, optional Mimicks built-in open parameter of the same name. Ignored. ignore_ext: boolean, optional Disable transparent compression/decompression based on the file extension. transport_params: dict, optional Additional parameters for the transport layer (see notes below). Returns ------- A file-like object. Notes ----- smart_open has several implementations for its transport layer (e.g. S3, HTTP). Each transport layer has a different set of keyword arguments for overriding default behavior. If you specify a keyword argument that is *not* supported by the transport layer being used, smart_open will ignore that argument and log a warning message. S3 (for details, see :mod:`smart_open.s3` and :func:`smart_open.s3.open`): %(s3)s HTTP (for details, see :mod:`smart_open.http` and :func:`smart_open.http.open`): %(http)s WebHDFS (for details, see :mod:`smart_open.webhdfs` and :func:`smart_open.webhdfs.open`): %(webhdfs)s SSH (for details, see :mod:`smart_open.ssh` and :func:`smart_open.ssh.open`): %(ssh)s Examples -------- %(examples)s See Also -------- - `Standard library reference <https://docs.python.org/3.7/library/functions.html#open>`__ - `smart_open README.rst <https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst>`__
f8202:m5
def smart_open(uri, mode="<STR_LIT:rb>", **kw):
logger.warning('<STR_LIT>')<EOL>ignore_extension = kw.pop('<STR_LIT>', False)<EOL>expected_kwargs = _inspect_kwargs(open)<EOL>scrubbed_kwargs = {}<EOL>transport_params = {}<EOL>if '<STR_LIT:host>' in kw or '<STR_LIT>' in kw:<EOL><INDENT>transport_params['<STR_LIT>'] = {}<EOL>transport_params['<STR_LIT>'] = {}<EOL><DEDENT>if '<STR_LIT:host>' in kw:<EOL><INDENT>url = kw.pop('<STR_LIT:host>')<EOL>if not url.startswith('<STR_LIT:http>'):<EOL><INDENT>url = '<STR_LIT>' + url<EOL><DEDENT>transport_params['<STR_LIT>'].update(endpoint_url=url)<EOL><DEDENT>if '<STR_LIT>' in kw and kw['<STR_LIT>']:<EOL><INDENT>transport_params['<STR_LIT>'].update(**kw.pop('<STR_LIT>'))<EOL><DEDENT>if '<STR_LIT>' in kw and '<STR_LIT>' in kw:<EOL><INDENT>logger.error('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in kw:<EOL><INDENT>transport_params['<STR_LIT>'] = boto3.Session(profile_name=kw.pop('<STR_LIT>'))<EOL><DEDENT>if '<STR_LIT>' in kw:<EOL><INDENT>transport_params['<STR_LIT>'] = kw.pop('<STR_LIT>')<EOL><DEDENT>for key, value in kw.items():<EOL><INDENT>if key in expected_kwargs:<EOL><INDENT>scrubbed_kwargs[key] = value<EOL><DEDENT>else:<EOL><INDENT>transport_params[key] = value<EOL><DEDENT><DEDENT>return open(uri, mode, ignore_ext=ignore_extension, transport_params=transport_params, **scrubbed_kwargs)<EOL>
Deprecated, use smart_open.open instead.
f8202:m6
def _shortcut_open(<EOL>uri,<EOL>mode,<EOL>ignore_ext=False,<EOL>buffering=-<NUM_LIT:1>,<EOL>encoding=None,<EOL>errors=None,<EOL>):
if not isinstance(uri, six.string_types):<EOL><INDENT>return None<EOL><DEDENT>parsed_uri = _parse_uri(uri)<EOL>if parsed_uri.scheme != '<STR_LIT:file>':<EOL><INDENT>return None<EOL><DEDENT>_, extension = P.splitext(parsed_uri.uri_path)<EOL>if extension in _COMPRESSOR_REGISTRY and not ignore_ext:<EOL><INDENT>return None<EOL><DEDENT>open_kwargs = {}<EOL>if encoding is not None:<EOL><INDENT>open_kwargs['<STR_LIT>'] = encoding<EOL>mode = mode.replace('<STR_LIT:b>', '<STR_LIT>')<EOL><DEDENT>if errors and '<STR_LIT:b>' not in mode:<EOL><INDENT>open_kwargs['<STR_LIT>'] = errors<EOL><DEDENT>if six.PY3:<EOL><INDENT>return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)<EOL><DEDENT>elif not open_kwargs:<EOL><INDENT>return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering)<EOL><DEDENT>return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)<EOL>
Try to open the URI using the standard library io.open function. This can be much faster than the alternative of opening in binary mode and then decoding. This is only possible under the following conditions: 1. Opening a local file 2. Ignore extension is set to True If it is not possible to use the built-in open for the specified URI, returns None. :param str uri: A string indicating what to open. :param str mode: The mode to pass to the open function. :param dict kw: :returns: The opened file :rtype: file
f8202:m7
def _open_binary_stream(uri, mode, transport_params):
if mode not in ('<STR_LIT:rb>', '<STR_LIT>', '<STR_LIT:wb>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise NotImplementedError('<STR_LIT>' % mode)<EOL><DEDENT>if isinstance(uri, six.string_types):<EOL><INDENT>filename = uri.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>parsed_uri = _parse_uri(uri)<EOL>unsupported = "<STR_LIT>" % (mode, parsed_uri.scheme)<EOL>if parsed_uri.scheme == "<STR_LIT:file>":<EOL><INDENT>fobj = io.open(parsed_uri.uri_path, mode)<EOL>return fobj, filename<EOL><DEDENT>elif parsed_uri.scheme in smart_open_ssh.SCHEMES:<EOL><INDENT>fobj = smart_open_ssh.open(<EOL>parsed_uri.uri_path,<EOL>mode,<EOL>host=parsed_uri.host,<EOL>user=parsed_uri.user,<EOL>port=parsed_uri.port,<EOL>)<EOL>return fobj, filename<EOL><DEDENT>elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:<EOL><INDENT>return _s3_open_uri(parsed_uri, mode, transport_params), filename<EOL><DEDENT>elif parsed_uri.scheme == "<STR_LIT>":<EOL><INDENT>_check_kwargs(smart_open_hdfs.open, transport_params)<EOL>return smart_open_hdfs.open(parsed_uri.uri_path, mode), filename<EOL><DEDENT>elif parsed_uri.scheme == "<STR_LIT>":<EOL><INDENT>kw = _check_kwargs(smart_open_webhdfs.open, transport_params)<EOL>return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename<EOL><DEDENT>elif parsed_uri.scheme.startswith('<STR_LIT:http>'):<EOL><INDENT>filename = P.basename(urlparse.urlparse(uri).path)<EOL>kw = _check_kwargs(smart_open_http.open, transport_params)<EOL>return smart_open_http.open(uri, mode, **kw), filename<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError("<STR_LIT>", parsed_uri.scheme)<EOL><DEDENT><DEDENT>elif hasattr(uri, '<STR_LIT>'):<EOL><INDENT>filename = getattr(uri, '<STR_LIT:name>', '<STR_LIT>')<EOL>return uri, filename<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>" % uri)<EOL><DEDENT>
Open an arbitrary URI in the specified binary mode. Not all modes are supported for all protocols. :arg uri: The URI to open. May be a string, or something else. :arg str mode: The mode to open with. Must be rb, wb or ab. :arg transport_params: Keyword argumens for the transport layer. :returns: A file object and the filename :rtype: tuple
f8202:m8
def _my_urlsplit(url):
if '<STR_LIT:?>' not in url:<EOL><INDENT>return urlsplit(url, allow_fragments=False)<EOL><DEDENT>sr = urlsplit(url.replace('<STR_LIT:?>', '<STR_LIT:\n>'), allow_fragments=False)<EOL>SplitResult = collections.namedtuple('<STR_LIT>', '<STR_LIT>')<EOL>return SplitResult(sr.scheme, sr.netloc, sr.path.replace('<STR_LIT:\n>', '<STR_LIT:?>'), '<STR_LIT>', '<STR_LIT>')<EOL>
This is a hack to prevent the regular urlsplit from splitting around question marks. A question mark (?) in a URL typically indicates the start of a querystring, and the standard library's urlparse function handles the querystring separately. Unfortunately, question marks can also appear _inside_ the actual URL for some schemas like S3. Replaces question marks with newlines prior to splitting. This is safe because: 1. The standard library's urlsplit completely ignores newlines 2. Raw newlines will never occur in innocuous URLs. They are always URL-encoded. See Also -------- https://github.com/python/cpython/blob/3.7/Lib/urllib/parse.py https://github.com/RaRe-Technologies/smart_open/issues/285
f8202:m11
def _parse_uri(uri_as_string):
if os.name == '<STR_LIT>':<EOL><INDENT>if '<STR_LIT>' not in uri_as_string:<EOL><INDENT>uri_as_string = '<STR_LIT>' + uri_as_string<EOL><DEDENT><DEDENT>parsed_uri = _my_urlsplit(uri_as_string)<EOL>if parsed_uri.scheme == "<STR_LIT>":<EOL><INDENT>return _parse_uri_hdfs(parsed_uri)<EOL><DEDENT>elif parsed_uri.scheme == "<STR_LIT>":<EOL><INDENT>return _parse_uri_webhdfs(parsed_uri)<EOL><DEDENT>elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:<EOL><INDENT>return _parse_uri_s3x(parsed_uri)<EOL><DEDENT>elif parsed_uri.scheme == '<STR_LIT:file>':<EOL><INDENT>return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)<EOL><DEDENT>elif parsed_uri.scheme in ('<STR_LIT>', None):<EOL><INDENT>return _parse_uri_file(uri_as_string)<EOL><DEDENT>elif parsed_uri.scheme.startswith('<STR_LIT:http>'):<EOL><INDENT>return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)<EOL><DEDENT>elif parsed_uri.scheme in smart_open_ssh.SCHEMES:<EOL><INDENT>return _parse_uri_ssh(parsed_uri)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>" % (parsed_uri.scheme, uri_as_string)<EOL>)<EOL><DEDENT>
Parse the given URI from a string. Supported URI schemes are: * file * hdfs * http * https * s3 * s3a * s3n * s3u * webhdfs .s3, s3a and s3n are treated the same way. s3u is s3 but without SSL. Valid URI examples:: * s3://my_bucket/my_key * s3://my_key:my_secret@my_bucket/my_key * s3://my_key:my_secret@my_server:my_port@my_bucket/my_key * hdfs:///path/file * hdfs://path/file * webhdfs://host:port/path/file * ./local/path/file * ~/local/path/file * local/path/file * ./local/path/file.gz * file:///home/user/file * file:///home/user/file.bz2 * [ssh|scp|sftp]://username@host//path/file * [ssh|scp|sftp]://username@host/path/file
f8202:m12
def _parse_uri_ssh(unt):
if '<STR_LIT:@>' in unt.netloc:<EOL><INDENT>user, host_port = unt.netloc.split('<STR_LIT:@>', <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>user, host_port = None, unt.netloc<EOL><DEDENT>if '<STR_LIT::>' in host_port:<EOL><INDENT>host, port = host_port.split('<STR_LIT::>', <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>host, port = host_port, None<EOL><DEDENT>if not user:<EOL><INDENT>user = None<EOL><DEDENT>if not port:<EOL><INDENT>port = smart_open_ssh.DEFAULT_PORT<EOL><DEDENT>else:<EOL><INDENT>port = int(port)<EOL><DEDENT>return Uri(scheme=unt.scheme, uri_path=unt.path, user=user, host=host, port=port)<EOL>
Parse a Uri from a urllib namedtuple.
f8202:m17
def _need_to_buffer(file_obj, mode, ext):
try:<EOL><INDENT>is_seekable = file_obj.seekable()<EOL><DEDENT>except AttributeError:<EOL><INDENT>is_seekable = hasattr(file_obj, '<STR_LIT>')<EOL><DEDENT>return six.PY2 and mode.startswith('<STR_LIT:r>') and ext in _COMPRESSOR_REGISTRY and not is_seekable<EOL>
Returns True if we need to buffer the whole file in memory in order to proceed.
f8202:m18
def _compression_wrapper(file_obj, filename, mode):
_, ext = os.path.splitext(filename)<EOL>if _need_to_buffer(file_obj, mode, ext):<EOL><INDENT>warnings.warn('<STR_LIT>' % _ISSUE_189_URL)<EOL>file_obj = io.BytesIO(file_obj.read())<EOL><DEDENT>if ext in _COMPRESSOR_REGISTRY and mode.endswith('<STR_LIT:+>'):<EOL><INDENT>raise ValueError('<STR_LIT>' % mode)<EOL><DEDENT>try:<EOL><INDENT>callback = _COMPRESSOR_REGISTRY[ext]<EOL><DEDENT>except KeyError:<EOL><INDENT>return file_obj<EOL><DEDENT>else:<EOL><INDENT>return callback(file_obj, mode)<EOL><DEDENT>
This function will wrap the file_obj with an appropriate [de]compression mechanism based on the extension of the filename. file_obj must either be a filehandle object, or a class which behaves like one. If the filename extension isn't recognized, will simply return the original file_obj.
f8202:m19
def _encoding_wrapper(fileobj, mode, encoding=None, errors=None):
logger.debug('<STR_LIT>', locals())<EOL>if '<STR_LIT:b>' in mode and encoding is None:<EOL><INDENT>return fileobj<EOL><DEDENT>if encoding is None:<EOL><INDENT>encoding = SYSTEM_ENCODING<EOL><DEDENT>kw = {'<STR_LIT>': errors} if errors else {}<EOL>if mode[<NUM_LIT:0>] == '<STR_LIT:r>' or mode.endswith('<STR_LIT:+>'):<EOL><INDENT>fileobj = codecs.getreader(encoding)(fileobj, **kw)<EOL><DEDENT>if mode[<NUM_LIT:0>] in ('<STR_LIT:w>', '<STR_LIT:a>') or mode.endswith('<STR_LIT:+>'):<EOL><INDENT>fileobj = codecs.getwriter(encoding)(fileobj, **kw)<EOL><DEDENT>return fileobj<EOL>
Decode bytes into text, if necessary. If mode specifies binary access, does nothing, unless the encoding is specified. A non-null encoding implies text mode. :arg fileobj: must quack like a filehandle object. :arg str mode: is the mode which was originally requested by the user. :arg str encoding: The text encoding to use. If mode is binary, overrides mode. :arg str errors: The method to use when handling encoding/decoding errors. :returns: a file object
f8202:m20
def extract_kwargs(docstring):
lines = inspect.cleandoc(docstring).split('<STR_LIT:\n>')<EOL>retval = []<EOL>while lines[<NUM_LIT:0>] != '<STR_LIT>':<EOL><INDENT>lines.pop(<NUM_LIT:0>)<EOL><DEDENT>lines.pop(<NUM_LIT:0>)<EOL>lines.pop(<NUM_LIT:0>)<EOL>while lines and lines[<NUM_LIT:0>]:<EOL><INDENT>name, type_ = lines.pop(<NUM_LIT:0>).split('<STR_LIT::>', <NUM_LIT:1>)<EOL>description = []<EOL>while lines and lines[<NUM_LIT:0>].startswith('<STR_LIT:U+0020>'):<EOL><INDENT>description.append(lines.pop(<NUM_LIT:0>).strip())<EOL><DEDENT>if '<STR_LIT>' in type_:<EOL><INDENT>retval.append((name.strip(), type_.strip(), description))<EOL><DEDENT><DEDENT>return retval<EOL>
Extract keyword argument documentation from a function's docstring. Parameters ---------- docstring: str The docstring to extract keyword arguments from. Returns ------- list of (str, str, list str) str The name of the keyword argument. str Its type. str Its documentation as a list of lines. Notes ----- The implementation is rather fragile. It expects the following: 1. The parameters are under an underlined Parameters section 2. Keyword parameters have the literal ", optional" after the type 3. Names and types are not indented 4. Descriptions are indented with 4 spaces 5. The Parameters section ends with an empty line. Examples -------- >>> docstring = '''The foo function. ... Parameters ... ---------- ... bar: str, optional ... This parameter is the bar. ... baz: int, optional ... This parameter is the baz. ... ... ''' >>> kwargs = extract_kwargs(docstring) >>> kwargs[0] ('bar', 'str, optional', ['This parameter is the bar.'])
f8203:m0
def to_docstring(kwargs, lpad='<STR_LIT>'):
buf = io.StringIO()<EOL>for name, type_, description in kwargs:<EOL><INDENT>buf.write('<STR_LIT>' % (lpad, name, type_))<EOL>for line in description:<EOL><INDENT>buf.write('<STR_LIT>' % (lpad, line))<EOL><DEDENT><DEDENT>return buf.getvalue()<EOL>
Reconstruct a docstring from keyword argument info. Basically reverses :func:`extract_kwargs`. Parameters ---------- kwargs: list Output from the extract_kwargs function lpad: str, optional Padding string (from the left). Returns ------- str The docstring snippet documenting the keyword arguments. Examples -------- >>> kwargs = [ ... ('bar', 'str, optional', ['This parameter is the bar.']), ... ('baz', 'int, optional', ['This parameter is the baz.']), ... ] >>> print(to_docstring(kwargs), end='') bar: str, optional This parameter is the bar. baz: int, optional This parameter is the baz.
f8203:m1
def extract_examples_from_readme_rst(indent='<STR_LIT:U+0020>'):
curr_dir = os.path.dirname(os.path.abspath(__file__))<EOL>readme_path = os.path.join(curr_dir, '<STR_LIT:..>', '<STR_LIT>')<EOL>try:<EOL><INDENT>with open(readme_path) as fin:<EOL><INDENT>lines = list(fin)<EOL><DEDENT>start = lines.index('<STR_LIT>')<EOL>end = lines.index("<STR_LIT>")<EOL>lines = lines[start+<NUM_LIT:4>:end-<NUM_LIT:2>]<EOL>return '<STR_LIT>'.join([indent + re.sub('<STR_LIT>', '<STR_LIT>', l) for l in lines])<EOL><DEDENT>except Exception:<EOL><INDENT>return indent + '<STR_LIT>'<EOL><DEDENT>
Extract examples from this project's README.rst file. Parameters ---------- indent: str Prepend each line with this string. Should contain some number of spaces. Returns ------- str The examples. Notes ----- Quite fragile, depends on named labels inside the README.rst file.
f8203:m2