INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Create event in calendar with sms reminder.
def create_event(options, config, credentials): """ Create event in calendar with sms reminder. """ try: http = credentials.authorize(httplib2.Http()) service = build("calendar", "v3", http=http) event = { "summary": options.message, "location": "", "reminders": { "useDefault": False, "overrides": [ { "method": "sms", "minutes": config["message"], }, ], } } event.update(create_event_datetimes(options, config)) service.events().insert(calendarId=options.calendar, sendNotifications=True, body=event).execute() except Exception, err: if not options.quiet: sys.stderr.write("ERROR: Creating google calendar event error. {err}\n".format(err=err)) sys.exit(-1)
Processing notification call main function.
def main(): """ Processing notification call main function. """ # getting info for creating event options = parse_options() config = parse_config(options) credentials = get_google_credentials(options, config) if not options.get_google_credentials: create_event(options, config, credentials)
Use docstamp to create documents from the content of a CSV file or a Google Spreadsheet.
def create(input, template, field, outdir, prefix, otype, command, index, dpi, verbose, unicode_support): """Use docstamp to create documents from the content of a CSV file or a Google Spreadsheet. Examples: \n docstamp create -i badge.csv -t badge_template.svg -o badges docstamp create -i badge.csv -t badge_template.svg -o ./badges -d pdf """ logging.basicConfig(level=LOGGING_LVL) log = logging.getLogger(__name__) # setup verbose mode verbose_switch(verbose) input_file = input fields = field # init set of template contents log.debug('Reading CSV elements from {}.'.format(input_file)) items, fieldnames = get_items_from_csv(input_file) # check if got any item if len(items) == 0: click.echo('Quiting because found 0 items.') exit(-1) if not fields: # set the number of zeros that the files will have n_zeros = int(math.floor(math.log10(len(items))) + 1) else: # check that fields has all valid fields for field_name in fields: if field_name not in fieldnames: raise ValueError('Field name {} not found in input file ' ' header.'.format(field_name)) # filter the items if index if index: myitems = {int(idx): items[int(idx)] for idx in index} items = myitems log.debug('Using the elements with index {} of the input ' 'file.'.format(index)) # make output folder if not os.path.exists(outdir): os.mkdir(outdir) # create template document model log.debug('Creating the template object using the file {}.'.format(template)) template_doc = TextDocument.from_template_file(template, command) log.debug('Created an object of type {}.'.format(type(template_doc))) # let's stamp them! for idx in items: item = items[idx] if not len(fields): file_name = str(idx).zfill(n_zeros) else: field_values = [] try: for field_name in fields: field_values.append(item[field_name].replace(' ', '')) except: log.exception('Could not get field {} value from' ' {}'.format(field_name, item)) exit(-1) else: file_name = '_'.join(field_values) log.debug('Filling template {} with values of item {}.'.format(file_name, idx)) try: template_doc.fill(item) except: log.exception('Error filling document for {}th item'.format(idx)) continue # set output file path file_extension = get_extension(template) if prefix is None: basename = os.path.basename(template).replace(file_extension, '') file_name = basename + '_' + file_name file_path = os.path.join(outdir, file_name + '.' + otype) kwargs = {'file_type': otype, 'dpi': dpi, 'support_unicode': unicode_support} log.debug('Rendering file {}.'.format(file_path)) try: template_doc.render(file_path, **kwargs) except: log.exception('Error creating {} for {}.'.format(file_path, item)) exit(-1) else: log.debug('Successfully rendered {}.'.format(file_path))
Return the extension of fpath.
def get_extension(filepath, check_if_exists=False): """Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool Returns ------- str The extension of the file name or path """ if check_if_exists: if not os.path.exists(filepath): err = 'File not found: ' + filepath log.error(err) raise IOError(err) try: rest, ext = os.path.splitext(filepath) except: raise else: return ext
Add the extension ext to fpath if it doesn t have it.
def add_extension_if_needed(filepath, ext, check_if_exists=False): """Add the extension ext to fpath if it doesn't have it. Parameters ---------- filepath: str File name or path ext: str File extension check_if_exists: bool Returns ------- File name or path with extension added, if needed. """ if not filepath.endswith(ext): filepath += ext if check_if_exists: if not os.path.exists(filepath): err = 'File not found: ' + filepath log.error(err) raise IOError(err) return filepath
Return a temporary file with the given suffix within dirpath. If dirpath is None will look for a temporary folder in your system.
def get_tempfile(suffix='.txt', dirpath=None): """ Return a temporary file with the given suffix within dirpath. If dirpath is None, will look for a temporary folder in your system. Parameters ---------- suffix: str Temporary file name suffix dirpath: str Folder path where create the temporary file Returns ------- temp_filepath: str The path to the temporary path """ if dirpath is None: dirpath = get_temp_dir() return tempfile.NamedTemporaryFile(suffix=suffix, dir=dirpath)
Remove the files in workdir that have the given extension.
def cleanup(workdir, extension): """ Remove the files in workdir that have the given extension. Parameters ---------- workdir: Folder path from where to clean the files. extension: str File extension without the dot, e.g., 'txt' """ [os.remove(f) for f in glob(os.path.join(workdir, '*.' + extension))]
Convert a CSV file in csv_filepath into a JSON file in json_filepath.
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True): """ Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`. Parameters ---------- csv_filepath: str Path to the input CSV file. json_filepath: str Path to the output JSON file. Will be overwritten if exists. fieldnames: List[str] Names of the fields in the CSV file. ignore_first_line: bool """ import csv import json csvfile = open(csv_filepath, 'r') jsonfile = open(json_filepath, 'w') reader = csv.DictReader(csvfile, fieldnames) rows = [] if ignore_first_line: next(reader) for row in reader: rows.append(row) json.dump(rows, jsonfile) jsonfile.close() csvfile.close()
Write content inside the file in file_path with the given encoding. Parameters ---------- file_path: str Path to the output file. Will be overwritten if exists.
def write_to_file(file_path, content, encoding=None): """ Write `content` inside the file in `file_path` with the given encoding. Parameters ---------- file_path: str Path to the output file. Will be overwritten if exists. content: str The content you want in the file. encoding: str The name of the encoding. """ try: # TODO: check if in Python2 this should be this way # it's possible that we have to make this function more complex # to check type(content) and depending on that set 'w' without enconde # or 'wb' with encode. with open(file_path, "wb") as f: f.write(content.encode(encoding)) except: log.exception('Error writing to file in {}'.format(file_path)) raise
Modify the content of filepath replacing old for new.
def replace_file_content(filepath, old, new, max=1): """ Modify the content of `filepath`, replacing `old` for `new`. Parameters ---------- filepath: str Path to the file to be modified. It will be overwritten. old: str This is old substring to be replaced. new: str This is new substring, which would replace old substring. max: int If larger than 0, Only the first `max` occurrences are replaced. """ with open(filepath, 'r') as f: content = f.read() content = content.replace(old, new, max) with open(filepath, 'w') as f: f.write(content)
Remove the tmp *. aux tmp *. out and tmp *. log files in output_dir.: param output_dir:
def cleanup_docstamp_output(output_dir=''): """ Remove the 'tmp*.aux', 'tmp*.out' and 'tmp*.log' files in `output_dir`. :param output_dir: """ suffixes = ['aux', 'out', 'log'] files = [f for suf in suffixes for f in glob(os.path.join(output_dir, 'tmp*.{}'.format(suf)))] [os.remove(file) for file in files]
Run all parsing functions.
def parse(self): """ Run all parsing functions. """ for tag in self.soup.findAll('span'): self.create_italic(tag) self.create_strong(tag) self.create_underline(tag) self.unwrap_span(tag) for tag in self.soup.findAll('a'): self.remove_comments(tag) self.check_next(tag) if self.soup.body: for tag in self.soup.body.findAll(): self.remove_empty(tag) self.remove_inline_comment(tag) self.parse_attrs(tag) for token, target in self.tokens: self.find_token(tag, token, target) self.remove_blacklisted_tags(tag)
If next tag is link with same href combine them.
def check_next(self, tag): """ If next tag is link with same href, combine them. """ if (type(tag.next_sibling) == element.Tag and tag.next_sibling.name == 'a'): next_tag = tag.next_sibling if tag.get('href') and next_tag.get('href'): href = self._parse_href(tag.get('href')) next_href = self._parse_href(next_tag.get('href')) if href == next_href: next_text = next_tag.get_text() tag.append(next_text) self.tags_blacklist.append(next_tag)
See if span tag has italic style and wrap with em tag.
def create_italic(self, tag): """ See if span tag has italic style and wrap with em tag. """ style = tag.get('style') if style and 'font-style:italic' in style: tag.wrap(self.soup.new_tag('em'))
See if span tag has bold style and wrap with strong tag.
def create_strong(self, tag): """ See if span tag has bold style and wrap with strong tag. """ style = tag.get('style') if (style and ('font-weight:bold' in style or 'font-weight:700' in style)): tag.wrap(self.soup.new_tag('strong'))
See if span tag has underline style and wrap with u tag.
def create_underline(self, tag): """ See if span tag has underline style and wrap with u tag. """ style = tag.get('style') if style and 'text-decoration:underline' in style: tag.wrap(self.soup.new_tag('u'))
Reject attributes not defined in ATTR_WHITELIST.
def parse_attrs(self, tag): """ Reject attributes not defined in ATTR_WHITELIST. """ if tag.name in ATTR_WHITELIST.keys(): attrs = copy(tag.attrs) for attr, value in attrs.items(): if attr in ATTR_WHITELIST[tag.name]: tag.attrs[attr] = self._parse_attr(tag.name, attr, value) else: del tag.attrs[attr] else: tag.attrs = {}
Remove non - self - closing tags with no children * and * no content.
def remove_empty(self, tag): """ Remove non-self-closing tags with no children *and* no content. """ has_children = len(tag.contents) has_text = len(list(tag.stripped_strings)) if not has_children and not has_text and not tag.is_empty_element: tag.extract()
get unicode string without any other content transformation. and clean extra spaces
def clean_linebreaks(self, tag): """ get unicode string without any other content transformation. and clean extra spaces """ stripped = tag.decode(formatter=None) stripped = re.sub('\s+', ' ', stripped) stripped = re.sub('\n', '', stripped) return stripped
Extract real URL from Google redirected url by getting q querystring parameter.
def _parse_href(self, href): """ Extract "real" URL from Google redirected url by getting `q` querystring parameter. """ params = parse_qs(urlsplit(href).query) return params.get('q')
Parse attribute. Delegate to href parser for hrefs otherwise return value.
def _parse_attr(self, tagname, attr, value): """ Parse attribute. Delegate to href parser for hrefs, otherwise return value. """ if tagname == 'a' and attr == 'href': return self._parse_href(value) else: return value
Modify the keys in adict to the ones in translations. Be careful this will modify your input dictionary. The keys not present in translations will be left intact.
def translate_key_values(adict, translations, default=''): """Modify the keys in adict to the ones in translations. Be careful, this will modify your input dictionary. The keys not present in translations will be left intact. Parameters ---------- adict: a dictionary translations: iterable of 2-tuples Each 2-tuple must have the following format: (<adict existing key>, <desired key name for the existing key>) Returns ------- Translated adict """ for src_key, dst_key in translations: adict[dst_key] = adict.pop(src_key, default) return adict
Convert data to json string representation.
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ adict = dict(vars(self), sort_keys=True) adict['type'] = self.__class__.__name__ return json.dumps(adict)
Returns absolute paths of files that match the regex within folder_path and all its children folders.
def find_file_match(folder_path, regex=''): """ Returns absolute paths of files that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([os.path.join(root, f) for f in files if re.match(regex, f)]) return outlist
Quick query. Convenience for using the MicroXPath engine. Give it some XML and an expression and it will yield the results. No fuss. xml_thing - bytes or string or amara3. xml. tree node xpath_thing - string or parsed XPath expression vars - optional mapping of variables name to value funcs - optional mapping of functions name to function object >>> from amara3. uxml. uxpath import qquery >>> results = qquery ( b <a > 1<b > 2</ b > 3</ a > a/ text () )) >>> next ( results ). xml_value 1 >>> next ( results ). xml_value 3
def qquery(xml_thing, xpath_thing, vars=None, funcs=None): ''' Quick query. Convenience for using the MicroXPath engine. Give it some XML and an expression and it will yield the results. No fuss. xml_thing - bytes or string, or amara3.xml.tree node xpath_thing - string or parsed XPath expression vars - optional mapping of variables, name to value funcs - optional mapping of functions, name to function object >>> from amara3.uxml.uxpath import qquery >>> results = qquery(b'<a>1<b>2</b>3</a>', 'a/text()')) >>> next(results).xml_value '1' >>> next(results).xml_value '3' ''' root = None if isinstance(xml_thing, nodetype): root = xml_thing elif isinstance(xml_thing, str): tb = tree.treebuilder() root = tb.parse(xml_thing) elif isinstance(xml_thing, bytes): tb = tree.treebuilder() #Force UTF-8 root = tb.parse(xml_thing.decode('utf-8')) if not root: return if isinstance(xpath_thing, str): parsed_expr = parse(xpath_thing) ctx = context(root, variables=vars, functions=funcs) result = parsed_expr.compute(ctx) yield from result
FunctionCall: NAME FormalArguments
def p_function_call(p): """ FunctionCall : NAME FormalArguments """ #Hacking around the ambiguity between node type test & function call if p[1] in ('node', 'text'): p[0] = ast.NodeType(p[1]) else: p[0] = ast.FunctionCall(p[1], p[2])
Handles LiteralObjects as well as computable arguments
def boolean_arg(ctx, obj): ''' Handles LiteralObjects as well as computable arguments ''' if hasattr(obj, 'compute'): obj = next(obj.compute(ctx), False) return to_boolean(obj)
Handles LiteralObjects as well as computable arguments
def number_arg(ctx, obj): ''' Handles LiteralObjects as well as computable arguments ''' if hasattr(obj, 'compute'): obj = next(obj.compute(ctx), False) return to_number(obj)
Handles LiteralObjects as well as computable arguments
def string_arg(ctx, obj): ''' Handles LiteralObjects as well as computable arguments ''' if hasattr(obj, 'compute'): obj = next(obj.compute(ctx), False) return to_string(obj)
Yields one string a node name or the empty string operating on the first item in the provided obj or the current item if obj is omitted If this item is a node yield its node name ( generic identifier ) otherwise yield If obj is provided but empty yield
def name(ctx, obj=None): ''' Yields one string a node name or the empty string, operating on the first item in the provided obj, or the current item if obj is omitted If this item is a node, yield its node name (generic identifier), otherwise yield '' If obj is provided, but empty, yield '' ''' if obj is None: item = ctx.item elif hasattr(obj, 'compute'): item = next(obj.compute(ctx), None) else: item = obj if isinstance(item, node): yield item.xml_name else: yield ''
Yields one string derived from the argument literal ( or the first item in the argument sequence unless empty in which case yield ) as follows:
def string_(ctx, seq=None): ''' Yields one string, derived from the argument literal (or the first item in the argument sequence, unless empty in which case yield '') as follows: * If a node, yield its string-value * If NaN, yield 'NaN' * If +0 or -0, yield '0' * If positive infinity, yield 'Infinity' * If negative infinity, yield '-Infinity' * If an integer, no decimal point and no leading zeros * If a non-integer number, at least one digit before the decimal point and at least one digit after * If boolean, either 'true' or 'false' ''' if seq is None: item = ctx.item elif hasattr(seq, 'compute'): item = next(seq.compute(ctx), '') else: item = seq yield next(to_string(item), '')
Yields one string concatenation of argument strings
def concat(ctx, *strings): ''' Yields one string, concatenation of argument strings ''' strings = flatten([ (s.compute(ctx) if callable(s) else s) for s in strings ]) strings = (next(string_arg(ctx, s), '') for s in strings) #assert(all(map(lambda x: isinstance(x, str), strings))) #FIXME: Check arg types yield ''.join(strings)
Yields one boolean whether the first string starts with the second
def starts_with(ctx, full, part): ''' Yields one boolean, whether the first string starts with the second ''' full = next(string_arg(ctx, full), '') part = next(string_arg(ctx, part), '') yield full.startswith(part)
Yields one boolean whether the first string contains the second
def contains(ctx, full, part): ''' Yields one boolean, whether the first string contains the second ''' full = next(string_arg(ctx, full), '') part = next(string_arg(ctx, part), '') yield part in full
Yields one string
def substring_before(ctx, full, part): ''' Yields one string ''' full = next(string_arg(ctx, full), '') part = next(string_arg(ctx, part), '') yield full.partition(part)[0]
Yields one string
def substring_after(ctx, full, part): ''' Yields one string ''' full = next(string_arg(ctx, full), '') part = next(string_arg(ctx, part), '') yield full.partition(part)[-1]
Yields one string
def substring(ctx, full, start, length): ''' Yields one string ''' full = next(string_arg(ctx, full), '') start = int(next(to_number(start))) length = int(next(to_number(length))) yield full[start-1:start-1+length]
Yields one number
def string_length(ctx, s=None): ''' Yields one number ''' if s is None: s = ctx.node elif callable(s): s = next(s.compute(ctx), '') yield len(s)
Yields one boolean false if the argument sequence is empty otherwise
def boolean(ctx, obj): ''' Yields one boolean, false if the argument sequence is empty, otherwise * false if the first item is a boolean and false * false if the first item is a number and positive or negative zero or NaN * false if the first item is a string and '' * true in all other cases ''' if hasattr(obj, 'compute'): obj = next(seq.compute(ctx), '') else: obj = seq yield next(to_boolean(obj), '')
Yields one float derived from the first item in the argument sequence ( unless empty in which case yield NaN ) as follows:
def number(ctx, seq=None): ''' Yields one float, derived from the first item in the argument sequence (unless empty in which case yield NaN) as follows: * If string with optional whitespace followed by an optional minus sign followed by a Number followed by whitespace, converte to the IEEE 754 number that is nearest (according to the IEEE 754 round-to-nearest rule) to the mathematical value represented by the string; in case of any other string yield NaN * If boolean true yield 1; if boolean false yield 0 * If a node convert to string as if by a call to string(); yield the same value as if passed that string argument to number() ''' if hasattr(obj, 'compute'): obj = next(seq.compute(ctx), '') else: obj = seq yield next(to_number(obj), '')
Yields the result of applying an expression to each item in the input sequence.
def foreach_(ctx, seq, expr): ''' Yields the result of applying an expression to each item in the input sequence. * seq: input sequence * expr: expression to be converted to string, then dynamically evaluated for each item on the sequence to produce the result ''' from . import context, parse as uxpathparse if hasattr(seq, 'compute'): seq = seq.compute(ctx) expr = next(string_arg(ctx, expr), '') pexpr = uxpathparse(expr) for item in seq: innerctx = ctx.copy(item=item) yield from pexpr.compute(innerctx)
Yields a sequence of a single value the result of looking up a value from the tables provided in the context or an empty sequence if lookup is unsuccessful
def lookup_(ctx, tableid, key): ''' Yields a sequence of a single value, the result of looking up a value from the tables provided in the context, or an empty sequence if lookup is unsuccessful * tableid: id of the lookup table to use * expr: expression to be converted to string, then dynamically evaluated for each item on the sequence to produce the result ''' tableid = next(string_arg(ctx, tableid), '') key = next(string_arg(ctx, key), '') #value = ctx. for item in seq: innerctx = ctx.copy(item=item) yield from pexpr.compute(innerctx)
Replace known special characters to SVG code.
def replace_chars_for_svg_code(svg_content): """ Replace known special characters to SVG code. Parameters ---------- svg_content: str Returns ------- corrected_svg: str Corrected SVG content """ result = svg_content svg_char = [ ('&', '&amp;'), ('>', '&gt;'), ('<', '&lt;'), ('"', '&quot;'), ] for c, entity in svg_char: result = result.replace(c, entity) return result
Try to read a SVG file if svg_file is a string. Raise an exception in case of error or return the svg object.
def _check_svg_file(svg_file): """ Try to read a SVG file if `svg_file` is a string. Raise an exception in case of error or return the svg object. If `svg_file` is a svgutils svg object, will just return it. Parameters ---------- svg_file: str or svgutils.transform.SVGFigure object If a `str`: path to a '.svg' file, otherwise a svgutils svg object is expected. Returns ------- svgutils svg object Raises ------ Exception if any error happens. """ if isinstance(svg_file, str): try: svg = sg.fromfile(svg_file) except Exception as exc: raise Exception('Error reading svg file {}.'.format(svg_file)) from exc else: return svg if isinstance(svg_file, sg.SVGFigure): return svg_file raise ValueError('Expected `svg_file` to be `str` or `svgutils.SVG`, got {}.'.format(type(svg_file)))
Merge svg_file2 in svg_file1 in the given positions x_coord y_coord and scale.
def merge_svg_files(svg_file1, svg_file2, x_coord, y_coord, scale=1): """ Merge `svg_file2` in `svg_file1` in the given positions `x_coord`, `y_coord` and `scale`. Parameters ---------- svg_file1: str or svgutils svg document object Path to a '.svg' file. svg_file2: str or svgutils svg document object Path to a '.svg' file. x_coord: float Horizontal axis position of the `svg_file2` content. y_coord: float Vertical axis position of the `svg_file2` content. scale: float Scale to apply to `svg_file2` content. Returns ------- `svg1` svgutils object with the content of 'svg_file2' """ svg1 = _check_svg_file(svg_file1) svg2 = _check_svg_file(svg_file2) svg2_root = svg2.getroot() svg1.append([svg2_root]) svg2_root.moveto(x_coord, y_coord, scale=scale) return svg1
Calls the rsvg - convert command to convert a svg to a PDF ( with unicode ).
def rsvg_export(input_file, output_file, dpi=90, rsvg_binpath=None): """ Calls the `rsvg-convert` command, to convert a svg to a PDF (with unicode). Parameters ---------- rsvg_binpath: str Path to `rsvg-convert` command input_file: str Path to the input file output_file: str Path to the output file Returns ------- return_value Command call return value """ if not os.path.exists(input_file): log.error('File {} not found.'.format(input_file)) raise IOError((0, 'File not found.', input_file)) if rsvg_binpath is None: rsvg_binpath = which('rsvg-convert') check_command(rsvg_binpath) args_strings = [] args_strings += ["-f pdf"] args_strings += ["-o {}".format(output_file)] args_strings += ["--dpi-x {}".format(dpi)] args_strings += ["--dpi-y {}".format(dpi)] args_strings += [input_file] return call_command(rsvg_binpath, args_strings)
Merge all the PDF files in pdf_filepaths in a new PDF file out_filepath.
def merge_pdfs(pdf_filepaths, out_filepath): """ Merge all the PDF files in `pdf_filepaths` in a new PDF file `out_filepath`. Parameters ---------- pdf_filepaths: list of str Paths to PDF files. out_filepath: str Path to the result PDF file. Returns ------- path: str The output file path. """ merger = PdfFileMerger() for pdf in pdf_filepaths: merger.append(PdfFileReader(open(pdf, 'rb'))) merger.write(out_filepath) return out_filepath
Return the ElementTree of the SVG content in filepath with the font content embedded.
def _embed_font_to_svg(filepath, font_files): """ Return the ElementTree of the SVG content in `filepath` with the font content embedded. """ with open(filepath, 'r') as svgf: tree = etree.parse(svgf) if not font_files: return tree fontfaces = FontFaceGroup() for font_file in font_files: fontfaces.append(FontFace(font_file)) for element in tree.iter(): if element.tag.split("}")[1] == 'svg': break element.insert(0, fontfaces.xml_elem) return tree
Write ttf and otf font content from font_files in the svg file in filepath and write the result in outfile.
def embed_font_to_svg(filepath, outfile, font_files): """ Write ttf and otf font content from `font_files` in the svg file in `filepath` and write the result in `outfile`. Parameters ---------- filepath: str The SVG file whose content must be modified. outfile: str The file path where the result will be written. font_files: iterable of str List of paths to .ttf or .otf files. """ tree = _embed_font_to_svg(filepath, font_files) tree.write(outfile, encoding='utf-8', pretty_print=True)
make some basic checks on the inputs to make sure they are valid
def _check_inputs(self): ''' make some basic checks on the inputs to make sure they are valid''' try: _ = self._inputs[0] except TypeError: raise RuntimeError( "inputs should be iterable but found type='{0}', value=" "'{1}'".format(type(self._inputs), str(self._inputs))) from melody.inputs import Input for check_input in self._inputs: if not isinstance(check_input, Input): raise RuntimeError( "input should be a subclass of the Input class but " "found type='{0}', value='{1}'".format(type(check_input), str(check_input)))
make some basic checks on the function to make sure it is valid
def _check_function(self): ''' make some basic checks on the function to make sure it is valid''' # note, callable is valid for Python 2 and Python 3.2 onwards but # not inbetween if not callable(self._function): raise RuntimeError( "provided function '{0}' is not callable". format(str(self._function))) from inspect import getargspec arg_info = getargspec(self._function) if len(arg_info.args) != 1: print str(arg_info) raise RuntimeError( "provided function should have one argument but found " "{0}".format(len(arg_info.args)))
internal recursion routine called by the run method that generates all input combinations
def _recurse(self, inputs, output): '''internal recursion routine called by the run method that generates all input combinations''' if inputs: my_input = inputs[0] name = my_input.name if my_input.state: my_options = my_input.options(self.state) else: my_options = my_input.options for option in my_options: my_output = list(output) my_output.append({name: option}) self._recurse(inputs[1:], my_output) else: try: valid, result = self._function(output) except ValueError: raise RuntimeError("function must return 2 values") print output, valid, result
create an input file using jinja2 by filling a template with the values from the option variable passed in.
def create_input(option, template_name, template_location="template"): '''create an input file using jinja2 by filling a template with the values from the option variable passed in.''' # restructure option list into jinja2 input format jinja2_input = {} for item in option: try: jinja2_input.update(item) except ValueError: raise RuntimeError( ("inputs.py, create_input : format of item '{0}' is not " "supported. Expecting a dictionary.".format(str(item)))) # load the template and fill it with the option variable contents import jinja2 try: template_loader = jinja2.FileSystemLoader(searchpath=template_location) template_env = jinja2.Environment(loader=template_loader) template = template_env.get_template(template_name) output_text = template.render(jinja2_input) except jinja2.TemplateNotFound: raise RuntimeError("template '{0}' not found".format(template_name)) # return the particular input file as a string return output_text
We work out all combinations using this internal recursion method
def _recurse(self, inputs, output, depth, max_depth): '''We work out all combinations using this internal recursion method''' if depth < max_depth: for index, option in enumerate(inputs): my_output = list(output) my_output.append(option) self._recurse(inputs[index + 1:], my_output, depth + 1, max_depth) else: self._options.append(output)
Cast an arbitrary object or sequence to a string type
def to_string(obj): ''' Cast an arbitrary object or sequence to a string type ''' if isinstance(obj, LiteralWrapper): val = obj.obj elif isinstance(obj, Iterable) and not isinstance(obj, str): val = next(obj, None) else: val = obj if val is None: yield '' elif isinstance(val, str): yield val elif isinstance(val, node): yield strval(val) elif isinstance(val, int) or isinstance(val, float): yield str(val) elif isinstance(item, bool): yield 'true' if item else 'false' else: raise RuntimeError('Unknown type for string conversion: {}'.format(val))
Cast an arbitrary object or sequence to a number type
def to_number(obj): ''' Cast an arbitrary object or sequence to a number type ''' if isinstance(obj, LiteralWrapper): val = obj.obj elif isinstance(obj, Iterable) and not isinstance(obj, str): val = next(obj, None) else: val = obj if val is None: #FIXME: Should be NaN, not 0 yield 0 elif isinstance(val, str): yield float(val) elif isinstance(val, node): yield float(strval(val)) elif isinstance(val, int) or isinstance(val, float): yield val else: raise RuntimeError('Unknown type for number conversion: {}'.format(val))
Cast an arbitrary sequence to a boolean type
def to_boolean(obj): ''' Cast an arbitrary sequence to a boolean type ''' #if hasattr(obj, '__iter__'): if isinstance(obj, LiteralWrapper): val = obj.obj elif isinstance(obj, Iterable) and not isinstance(obj, str): val = next(obj, None) else: val = obj if val is None: yield False elif isinstance(val, bool): yield val elif isinstance(val, str): yield bool(str) elif isinstance(val, node): yield True elif isinstance(val, float) or isinstance(val, int): yield bool(val) else: raise RuntimeError('Unknown type for boolean conversion: {}'.format(val))
Generate token strings which when joined together form a valid XPath serialization of the AST.
def _serialize(xp_ast): '''Generate token strings which, when joined together, form a valid XPath serialization of the AST.''' if hasattr(xp_ast, '_serialize'): for tok in xp_ast._serialize(): yield(tok) elif isinstance(xp_ast, str): yield(repr(xp_ast))
Modify the encoding entry in the XML file.
def change_xml_encoding(filepath, src_enc, dst_enc='utf-8'): """ Modify the encoding entry in the XML file. Parameters ---------- filepath: str Path to the file to be modified. src_enc: str Encoding that is written in the file dst_enc: str Encoding to be set in the file. """ enc_attr = "encoding='{}'" replace_file_content(filepath, enc_attr.format(src_enc), enc_attr.format(dst_enc), 1)
Save text in a qrcode svg image file.
def save_into_qrcode(text, out_filepath, color='', box_size=10, pixel_size=1850): """ Save `text` in a qrcode svg image file. Parameters ---------- text: str The string to be codified in the QR image. out_filepath: str Path to the output file color: str A RGB color expressed in 6 hexadecimal values. box_size: scalar Size of the QR code boxes. """ try: qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=box_size, border=0, ) qr.add_data(text) qr.make(fit=True) except Exception as exc: raise Exception('Error trying to generate QR code ' ' from `vcard_string`: {}'.format(text)) from exc else: img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage) _ = _qrcode_to_file(img, out_filepath) if color: replace_file_content(out_filepath, 'fill:#000000', 'fill:#{}'.format(color))
Save a qrcode object into out_filepath. Parameters ---------- qrcode: qrcode object
def _qrcode_to_file(qrcode, out_filepath): """ Save a `qrcode` object into `out_filepath`. Parameters ---------- qrcode: qrcode object out_filepath: str Path to the output file. """ try: qrcode.save(out_filepath) except Exception as exc: raise IOError('Error trying to save QR code file {}.'.format(out_filepath)) from exc else: return qrcode
Return ( result new_position ) tuple. Result is cdata string if possible and None if more input is needed Or of course bad syntax can raise a RuntimeError
def handle_cdata(pos, window, charpat, stopchars): ''' Return (result, new_position) tuple. Result is cdata string if possible and None if more input is needed Or of course bad syntax can raise a RuntimeError ''' cdata = '' cursor = start = pos try: while True: while charpat.match(window[cursor]): cursor += 1 addchars = window[start:cursor] cdata += addchars #if window[pos] != openattr: # raise RuntimeError('Mismatch in attribute quotes') if window[cursor] in stopchars: return cdata, cursor #Check for charref elif window[cursor] == '&': start = cursor = cursor + 1 if window[cursor] == '#' and window[cursor + 1] == 'x': #Numerical charref start = cursor = cursor + 2 while True: if HEXCHARENTOK.match(window[cursor]): cursor += 1 elif window[cursor] == ';': c = chr(int(window[start:cursor], 16)) if not CHARACTER.match(c): raise RuntimeError('Character reference gives an illegal character: {0}'.format('&' + window[start:cursor] + ';')) cdata += c break else: raise RuntimeError('Illegal in character entity: {0}'.format(window[cursor])) else: #Named charref while True: if NAMEDCHARENTOK.match(window[cursor]): cursor += 1 elif window[cursor] == ';': for cn, c in CHARNAMES: if window[start:cursor] == cn: cdata += c #cursor += 1 #Skip ; break else: raise RuntimeError('Unknown named character reference: {0}'.format(repr(window[start:cursor]))) break else: raise RuntimeError('Illegal in character reference: {0} (around {1})'.format(window[cursor]), error_context(window, start, cursor)) #print(start, cursor, cdata, window[cursor]) cursor += 1 start = cursor except IndexError: return None, cursor
Set the gromacs input data using the supplied input options run gromacs and extract and return the required outputs.
def launch(option): '''Set the gromacs input data using the supplied input options, run gromacs and extract and return the required outputs.''' from melody.inputs import create_input _ = create_input(option, template_name="input.mdp") # save the input file in the appropriate place and launch gromacs using # longbow ... # determine if the run was successful success = True results = None if success: # extract the required outputs results = {"rate": {"value": 35, "units": "ns/day"}, } return success, results
Call CLI command with arguments and returns its return value.
def call_command(cmd_name, args_strings): """Call CLI command with arguments and returns its return value. Parameters ---------- cmd_name: str Command name or full path to the binary file. arg_strings: str Argument strings list. Returns ------- return_value Command return value. """ if not os.path.isabs(cmd_name): cmd_fullpath = which(cmd_name) else: cmd_fullpath = cmd_name try: cmd_line = [cmd_fullpath] + args_strings log.debug('Calling: `{}`.'.format(' '.join(cmd_line))) # retval = subprocess.check_call(cmd_line) retval = subprocess.call(' '.join(cmd_line), shell=True) except CalledProcessError as ce: log.exception( "Error calling command with arguments: " "{} \n With return code: {}".format(cmd_line, ce.returncode) ) raise else: return retval
Returns ------- filename: str
def getCSV(self): """ Returns ------- filename: str """ import getpass import gspread user = raw_input("Insert Google username:") password = getpass.getpass(prompt="Insert password:") name = raw_input("SpreadSheet filename on Drive:") sheet = raw_input("Sheet name (first sheet is default):") cl = gspread.login(user, password) sh = cl.open(name) if not (sheet.strip()): ws = sh.sheet1 sheet = "1" else: ws = sh.worksheet(sheet) filename = name + '-worksheet_' + sheet + '.csv' with open(filename, 'wb') as f: writer = UnicodeWriter(f) writer.writerows(ws.get_all_values()) return filename
Write a MicroXML element node ( yes even one representign a whole document ) elem - Amara MicroXML element node to be written out writer - instance of amara3. uxml. writer to implement the writing process
def write(elem, a_writer): ''' Write a MicroXML element node (yes, even one representign a whole document) elem - Amara MicroXML element node to be written out writer - instance of amara3.uxml.writer to implement the writing process ''' a_writer.start_element(elem.xml_name, attribs=elem.xml_attributes) for node in elem.xml_children: if isinstance(node, tree.element): write(node, a_writer) elif isinstance(node, tree.text): a_writer.text(node) a_writer.end_element(elem.xml_name) return
Call PDFLatex to convert TeX files to PDF.
def tex2pdf(tex_file, output_file=None, output_format='pdf'): """ Call PDFLatex to convert TeX files to PDF. Parameters ---------- tex_file: str Path to the input LateX file. output_file: str Path to the output PDF file. If None, will use the same output directory as the tex_file. output_format: str Output file format. Choices: 'pdf' or 'dvi'. Default: 'pdf' Returns ------- return_value PDFLatex command call return value. """ if not os.path.exists(tex_file): raise IOError('Could not find file {}.'.format(tex_file)) if output_format != 'pdf' and output_format != 'dvi': raise ValueError("Invalid output format given {}. Can only accept 'pdf' or 'dvi'.".format(output_format)) cmd_name = 'pdflatex' check_command(cmd_name) args_strings = [cmd_name] if output_file is not None: args_strings += ['-output-directory="{}" '.format(os.path.abspath(os.path.dirname(output_file)))] result_dir = os.path.dirname(output_file) if output_file else os.path.dirname(tex_file) args_strings += ['-output-format="{}"'.format(output_format)] args_strings += ['"' + tex_file + '"'] log.debug('Calling command {} with args: {}.'.format(cmd_name, args_strings)) ret = simple_call(args_strings) result_file = os.path.join(result_dir, remove_ext(os.path.basename(tex_file)) + '.' + output_format) if os.path.exists(result_file): shutil.move(result_file, output_file) else: raise IOError('Could not find PDFLatex result file.') log.debug('Cleaning *.aux and *.log files from folder {}.'.format(result_dir)) cleanup(result_dir, 'aux') cleanup(result_dir, 'log') return ret
Returns all potential loop fusion options for the psy object provided
def options(self, my_psy): '''Returns all potential loop fusion options for the psy object provided''' # compute options dynamically here as they may depend on previous # changes to the psy tree my_options = [] invokes = my_psy.invokes.invoke_list #print "there are {0} invokes".format(len(invokes)) if self._dependent_invokes: raise RuntimeError( "dependent invokes assumes fusion in one invoke might " "affect fusion in another invoke. This is not yet " "implemented") else: # treat each invoke separately for idx, invoke in enumerate(invokes): print "invoke {0}".format(idx) # iterate through each outer loop for loop in invoke.schedule.loops(): if loop.loop_type == "outer": siblings = loop.parent.children my_index = siblings.index(loop) option = [] self._recurse(siblings, my_index, option, my_options, invoke) return my_options
Returns a transformed Geometry.
def transform(geom, to_sref): """Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int """ # If we have an envelope, assume it's in the target sref. try: geom = getattr(geom, 'polygon', Envelope(geom).polygon) except (TypeError, ValueError): pass else: geom.AssignSpatialReference(to_sref) try: geom_sref = geom.GetSpatialReference() except AttributeError: return transform(Geometry(geom), to_sref) if geom_sref is None: raise Exception('Cannot transform from unknown spatial reference') # Reproject geom if necessary if not geom_sref.IsSame(to_sref): geom = geom.Clone() geom.TransformTo(to_sref) return geom
Returns an ogr. Geometry instance optionally created from a geojson str or dict. The spatial reference may also be provided.
def Geometry(*args, **kwargs): """Returns an ogr.Geometry instance optionally created from a geojson str or dict. The spatial reference may also be provided. """ # Look for geojson as a positional or keyword arg. arg = kwargs.pop('geojson', None) or len(args) and args[0] try: srs = kwargs.pop('srs', None) or arg.srs.wkt except AttributeError: srs = SpatialReference(4326) if hasattr(arg, 'keys'): geom = ogr.CreateGeometryFromJson(json.dumps(arg)) elif hasattr(arg, 'startswith'): # WKB as hexadecimal string. char = arg[0] if arg else ' ' i = char if isinstance(char, int) else ord(char) if i in (0, 1): geom = ogr.CreateGeometryFromWkb(arg) elif arg.startswith('{'): geom = ogr.CreateGeometryFromJson(arg) elif arg.startswith('<gml'): geom = ogr.CreateGeometryFromGML(arg) else: raise ValueError('Invalid geometry value: %s' % arg) elif hasattr(arg, 'wkb'): geom = ogr.CreateGeometryFromWkb(bytes(arg.wkb)) else: geom = ogr.Geometry(*args, **kwargs) if geom: if not isinstance(srs, SpatialReference): srs = SpatialReference(srs) geom.AssignSpatialReference(srs) return geom
Returns the envelope centroid as a ( x y ) tuple.
def centroid(self): """Returns the envelope centroid as a (x, y) tuple.""" return self.min_x + self.width * 0.5, self.min_y + self.height * 0.5
Expands this envelope by the given Envelope or tuple.
def expand(self, other): """Expands this envelope by the given Envelope or tuple. Arguments: other -- Envelope, two-tuple, or four-tuple """ if len(other) == 2: other += other mid = len(other) // 2 self.ll = map(min, self.ll, other[:mid]) self.ur = map(max, self.ur, other[mid:])
Returns the intersection of this and another Envelope.
def intersect(self, other): """Returns the intersection of this and another Envelope.""" inter = Envelope(tuple(self)) if inter.intersects(other): mid = len(other) // 2 inter.ll = map(max, inter.ll, other[:mid]) inter.ur = map(min, inter.ur, other[mid:]) else: inter.ll = (0, 0) inter.ur = (0, 0) return inter
Returns true if this envelope intersects another.
def intersects(self, other): """Returns true if this envelope intersects another. Arguments: other -- Envelope or tuple of (minX, minY, maxX, maxY) """ try: return (self.min_x <= other.max_x and self.max_x >= other.min_x and self.min_y <= other.max_y and self.max_y >= other.min_y) except AttributeError: return self.intersects(Envelope(other))
Returns a new envelope rescaled from center by the given factor ( s ).
def scale(self, xfactor, yfactor=None): """Returns a new envelope rescaled from center by the given factor(s). Arguments: xfactor -- int or float X scaling factor yfactor -- int or float Y scaling factor """ yfactor = xfactor if yfactor is None else yfactor x, y = self.centroid xshift = self.width * xfactor * 0.5 yshift = self.height * yfactor * 0.5 return Envelope(x - xshift, y - yshift, x + xshift, y + yshift)
Returns an OGR Geometry for this envelope.
def polygon(self): """Returns an OGR Geometry for this envelope.""" ring = ogr.Geometry(ogr.wkbLinearRing) for coord in self.ll, self.lr, self.ur, self.ul, self.ll: ring.AddPoint_2D(*coord) polyg = ogr.Geometry(ogr.wkbPolygon) polyg.AddGeometryDirectly(ring) return polyg
Imports a mass table from a file
def from_name(cls, name): "Imports a mass table from a file" filename = os.path.join(package_dir, 'data', name + '.txt') return cls.from_file(filename, name)
Imports a mass table from a file
def from_file(cls, filename, name=''): "Imports a mass table from a file" df = pd.read_csv(filename, header=0, delim_whitespace=True, index_col=[0, 1])['M'] df.name = name return cls(df=df, name=name)
Creates a table from arrays Z N and M
def from_ZNM(cls, Z, N, M, name=''): """ Creates a table from arrays Z, N and M Example: ________ >>> Z = [82, 82, 83] >>> N = [126, 127, 130] >>> M = [-21.34, -18.0, -14.45] >>> Table.from_ZNM(Z, N, M, name='Custom Table') Z N 82 126 -21.34 127 -18.00 83 130 -14.45 Name: Custom Table, dtype: float64 """ df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M'] df.name = name return cls(df=df, name=name)
Export the contents to a file as comma separated values.
def to_file(self, path): """Export the contents to a file as comma separated values. Parameters ---------- path : string File path where the data should be saved to Example ------- Export the last ten elements of AME2012 to a new file: >>> Table('AME2012').tail(10).to_file('last_ten.txt') """ with open(path, 'w') as f: f.write('Z N M\n') self.df.to_csv(path, sep='\t', mode='a')
Selects nuclei according to a condition on Z N or M
def select(self, condition, name=''): """ Selects nuclei according to a condition on Z,N or M Parameters ---------- condition : function, Can have one of the signatures f(M), f(Z,N) or f(Z, N, M) must return a boolean value name: string, optional name for the resulting Table Example: -------- Select all nuclei with A > 160: >>> A_gt_160 = lambda Z,N: Z + N > 160 >>> Table('AME2003').select(A_gt_160) """ if condition.func_code.co_argcount == 1: idx = [(Z, N) for (Z, N), M in self if condition(M)] if condition.func_code.co_argcount == 2: idx = [(Z, N) for (Z, N) in self.index if condition(Z, N)] if condition.func_code.co_argcount == 3: idx = [(Z, N) for (Z, N), M in self if condition(Z, N, M)] index = pd.MultiIndex.from_tuples(idx, names=['Z', 'N']) return Table(df=self.df.ix[index], name=name)
Return a selection of the Table at positions given by nuclei
def at(self, nuclei): """Return a selection of the Table at positions given by ``nuclei`` Parameters ---------- nuclei: list of tuples A list where each element is tuple of the form (Z,N) Example ------- Return binding energies at magic nuclei: >>> magic_nuclei = [(20,28), (50,50), (50,82), (82,126)] >>> Table('AME2012').binding_energy.at(magic_nuclei) Z N 20 28 416.014215 50 50 825.325172 82 1102.876416 82 126 1636.486450 """ index = pd.MultiIndex.from_tuples(nuclei, names=['Z', 'N']) return Table(df=self.df.ix[index], name=self.name)
Select nuclei which also belong to table
def intersection(self, table): """ Select nuclei which also belong to ``table`` Parameters ---------- table: Table, Table object Example: ---------- Table('AME2003').intersection(Table('AME1995')) """ idx = self.df.index & table.df.index return Table(df=self.df[idx], name=self.name)
Select nuclei not in table
def not_in(self, table): """ Select nuclei not in table Parameters ---------- table: Table, Table object from where nuclei should be removed Example: ---------- Find the new nuclei in AME2003 with Z,N >= 8: >>> Table('AME2003').not_in(Table('AME1995'))[8:,8:].count 389 """ idx = self.df.index - table.df.index return Table(df=self.df[idx], name=self.name)
Selects odd - odd nuclei from the table:
def odd_odd(self): """Selects odd-odd nuclei from the table: >>> Table('FRDM95').odd_odd Out[13]: Z N 9 9 1.21 11 0.10 13 3.08 15 9.32 ... """ return self.select(lambda Z, N: (Z % 2) and (N % 2), name=self.name)
Selects odd - even nuclei from the table
def odd_even(self): """ Selects odd-even nuclei from the table """ return self.select(lambda Z, N: (Z % 2) and not(N % 2), name=self.name)
Selects even - odd nuclei from the table
def even_odd(self): """ Selects even-odd nuclei from the table """ return self.select(lambda Z, N: not(Z % 2) and (N % 2), name=self.name)
Selects even - even nuclei from the table
def even_even(self): """ Selects even-even nuclei from the table """ return self.select(lambda Z, N: not(Z % 2) and not(N % 2), name=self.name)
Calculate error difference
def error(self, relative_to='AME2003'): """ Calculate error difference Parameters ---------- relative_to : string, a valid mass table name. Example: ---------- >>> Table('DUZU').error(relative_to='AME2003') """ df = self.df - Table(relative_to).df return Table(df=df)
Calculate root mean squared error
def rmse(self, relative_to='AME2003'): """Calculate root mean squared error Parameters ---------- relative_to : string, a valid mass table name. Example: ---------- >>> template = '{0:10}|{1:^6.2f}|{2:^6.2f}|{3:^6.2f}' >>> print 'Model ', 'AME95 ', 'AME03 ', 'AME12 ' # Table header ... for name in Table.names: ... print template.format(name, Table(name).rmse(relative_to='AME1995'), ... Table(name).rmse(relative_to='AME2003'), ... Table(name).rmse(relative_to='AME2012')) Model AME95 AME03 AME12 AME2003 | 0.13 | 0.00 | 0.13 AME2003all| 0.42 | 0.40 | 0.71 AME2012 | 0.16 | 0.13 | 0.00 AME2012all| 0.43 | 0.43 | 0.69 AME1995 | 0.00 | 0.13 | 0.16 AME1995all| 0.00 | 0.17 | 0.21 DUZU | 0.52 | 0.52 | 0.76 FRDM95 | 0.79 | 0.78 | 0.95 KTUY05 | 0.78 | 0.77 | 1.03 ETFSI12 | 0.84 | 0.84 | 1.04 HFB14 | 0.84 | 0.83 | 1.02 """ error = self.error(relative_to=relative_to) return math.sqrt((error.df ** 2).mean())
Return binding energies instead of mass excesses
def binding_energy(self): """ Return binding energies instead of mass excesses """ M_P = 938.2723 # MeV M_E = 0.5110 # MeV M_N = 939.5656 # MeV AMU = 931.494028 # MeV df = self.Z * (M_P + M_E) + (self.A - self.Z) * M_N - (self.df + self.A * AMU) return Table(df=df, name='BE' + '(' + self.name + ')')
Return Q_alpha
def q_alpha(self): """Return Q_alpha""" M_ALPHA = 2.4249156 # He4 mass excess in MeV f = lambda parent, daugther: parent - daugther - M_ALPHA return self.derived('Q_alpha', (-2, -2), f)
Return Q_beta
def q_beta(self): """Return Q_beta""" f = lambda parent, daugther: parent - daugther return self.derived('Q_beta', (1, -1), f)
Return 2 neutron separation energy
def s2n(self): """Return 2 neutron separation energy""" M_N = 8.0713171 # neutron mass excess in MeV f = lambda parent, daugther: -parent + daugther + 2 * M_N return self.derived('s2n', (0, -2), f)
Return 1 neutron separation energy
def s1n(self): """Return 1 neutron separation energy""" M_N = 8.0713171 # neutron mass excess in MeV f = lambda parent, daugther: -parent + daugther + M_N return self.derived('s1n', (0, -1), f)
Return 2 proton separation energy
def s2p(self): """Return 2 proton separation energy""" M_P = 7.28897050 # proton mass excess in MeV f = lambda parent, daugther: -parent + daugther + 2 * M_P return self.derived('s2p', (-2, 0), f)
Return 1 proton separation energy
def s1p(self): """Return 1 proton separation energy""" M_P = 7.28897050 # proton mass excess in MeV f = lambda parent, daugther: -parent + daugther + M_P return self.derived('s1p', (-1, 0), f)
Helper function for derived quantities
def derived(self, name, relative_coords, formula): """Helper function for derived quantities""" relZ, relN = relative_coords daughter_idx = [(x[0] + relZ, x[1] + relN) for x in self.df.index] values = formula(self.df.values, self.df.loc[daughter_idx].values) return Table(df=pd.Series(values, index=self.df.index, name=name + '(' + self.name + ')'))
Calculates the derivative of the neutron separation energies:
def ds2n(self): """Calculates the derivative of the neutron separation energies: ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2) """ idx = [(x[0] + 0, x[1] + 2) for x in self.df.index] values = self.s2n.values - self.s2n.loc[idx].values return Table(df=pd.Series(values, index=self.df.index, name='ds2n' + '(' + self.name + ')'))
Calculates the derivative of the neutron separation energies:
def ds2p(self): """Calculates the derivative of the neutron separation energies: ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2) """ idx = [(x[0] + 2, x[1]) for x in self.df.index] values = self.s2p.values - self.s2p.loc[idx].values return Table(df=pd.Series(values, index=self.df.index, name='ds2p' + '(' + self.name + ')'))