Search is not available for this dataset
text
stringlengths
75
104k
def name(self): """ The instruction name/mnemonic """ if self._name == 'PUSH': return 'PUSH%d' % self.operand_size elif self._name == 'DUP': return 'DUP%d' % self.pops elif self._name == 'SWAP': return 'SWAP%d' % (self.pops - 1) elif self._name == 'LOG': return 'LOG%d' % (self.pops - 2) return self._name
def parse_operand(self, buf): """ Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string """ buf = iter(buf) try: operand = 0 for _ in range(self.operand_size): operand <<= 8 operand |= next(buf) self._operand = operand except StopIteration: raise ParseError("Not enough data for decoding")
def bytes(self): """ Encoded instruction """ b = [bytes([self._opcode])] for offset in reversed(range(self.operand_size)): b.append(bytes([(self.operand >> offset * 8) & 0xff])) return b''.join(b)
def reverse(rev_inputs=REV_INPUTS): """ reverse the key, value in dictionary file :param rev_inputs: the files to be reversed :return: None """ dirname = os.path.dirname(__file__) for in_file in rev_inputs: reversed_dict = {} input_file = in_file + '.txt' output_file = in_file + 'Rev.txt' input_file = os.path.join(dirname, DICT_DIRECTORY, input_file) output_file = os.path.join(dirname, DICT_DIRECTORY, output_file) with open(input_file, 'r', encoding='utf-8') as f: for line in f: line = line.strip() split = line.split('\t') if len(split) < 2: continue term1 = split[0] term2 = split[1] for char in term2.split(' '): if char in reversed_dict: reversed_dict[char].append(term1) else: reversed_dict[char] = [term1] with open(output_file, 'w', encoding='utf-8') as f: for key in reversed_dict: line = key + '\t' + ' '.join(reversed_dict[key]) + '\n' f.write(line)
def merge(mer_inputs=MER_INPUTS, mer_output=MER_OUTPUT): """ merge the phrase files into one file :param mer_inputs: the phrase files :param mer_output: the output file :return: None """ dirname = os.path.dirname(__file__) output_file = os.path.join(dirname, DICT_DIRECTORY, mer_output) lines = [] for in_file in MER_INPUTS: input_file = os.path.join(dirname, DICT_DIRECTORY, in_file) with open(input_file, encoding='utf-8') as f: for line in f: lines.append(line) with open(output_file, 'w', encoding='utf-8') as f: for line in lines: f.write(line)
def _adjust_delay(self, slot, response): """Define delay adjustment policy""" if response.status in self.retry_http_codes: new_delay = max(slot.delay, 1) * 4 new_delay = max(new_delay, self.mindelay) new_delay = min(new_delay, self.maxdelay) slot.delay = new_delay self.stats.inc_value('delay_count') elif response.status == 200: new_delay = max(slot.delay / 2, self.mindelay) if new_delay < 0.01: new_delay = 0 slot.delay = new_delay
def memberness(context): '''The likelihood that the context is a "member".''' if context: texts = context.xpath('.//*[local-name()="explicitMember"]/text()').extract() text = str(texts).lower() if len(texts) > 1: return 2 elif 'country' in text: return 2 elif 'member' not in text: return 0 elif 'successor' in text: # 'SuccessorMember' is a rare case that shouldn't be treated as member return 1 elif 'parent' in text: return 2 return 3
def parse_10qk(self, response): '''Parse 10-Q or 10-K XML report.''' loader = ReportItemLoader(response=response) item = loader.load_item() if 'doc_type' in item: doc_type = item['doc_type'] if doc_type in ('10-Q', '10-K'): return item return None
def camelcase(string): """ Convert string into camel case. Args: string: String to convert. Returns: string: Camel case string. """ string = re.sub(r"^[\-_\.]", '', str(string)) if not string: return string return lowercase(string[0]) + re.sub(r"[\-_\.\s]([a-z])", lambda matched: uppercase(matched.group(1)), string[1:])
def capitalcase(string): """Convert string into capital case. First letters will be uppercase. Args: string: String to convert. Returns: string: Capital case string. """ string = str(string) if not string: return string return uppercase(string[0]) + string[1:]
def pathcase(string): """Convert string into path case. Join punctuation with slash. Args: string: String to convert. Returns: string: Path cased string. """ string = snakecase(string) if not string: return string return re.sub(r"_", "/", string)
def backslashcase(string): """Convert string into spinal case. Join punctuation with backslash. Args: string: String to convert. Returns: string: Spinal cased string. """ str1 = re.sub(r"_", r"\\", snakecase(string)) return str1
def sentencecase(string): """Convert string into sentence case. First letter capped and each punctuations are joined with space. Args: string: String to convert. Returns: string: Sentence cased string. """ joiner = ' ' string = re.sub(r"[\-_\.\s]", joiner, str(string)) if not string: return string return capitalcase(trimcase( re.sub(r"[A-Z]", lambda matched: joiner + lowercase(matched.group(0)), string) ))
def snakecase(string): """Convert string into snake case. Join punctuation with underscore Args: string: String to convert. Returns: string: Snake cased string. """ string = re.sub(r"[\-\.\s]", '_', str(string)) if not string: return string return lowercase(string[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), string[1:])
def _check_input(self, input): """Checks the validity of the input. In case of an invalid input throws ValueError. """ if isinstance(input, str): return 'st' elif isinstance(input, list): if all(isinstance(item, str) for item in input): return 'gst' raise ValueError("String argument should be of type String or" " a list of strings")
def build(self, x): """Builds the Suffix tree on the given input. If the input is of type List of Strings: Generalized Suffix Tree is built. :param x: String or List of Strings """ type = self._check_input(x) if type == 'st': x += next(self._terminalSymbolsGenerator()) self._build(x) if type == 'gst': self._build_generalized(x)
def _build_McCreight(self, x): """Builds a Suffix tree using McCreight O(n) algorithm. Algorithm based on: McCreight, Edward M. "A space-economical suffix tree construction algorithm." - ACM, 1976. Implementation based on: UH CS - 58093 String Processing Algorithms Lecture Notes """ u = self.root d = 0 for i in range(len(x)): while u.depth == d and u._has_transition(x[d+i]): u = u._get_transition_link(x[d+i]) d = d + 1 while d < u.depth and x[u.idx + d] == x[i + d]: d = d + 1 if d < u.depth: u = self._create_node(x, u, d) self._create_leaf(x, i, u, d) if not u._get_suffix_link(): self._compute_slink(x, u) u = u._get_suffix_link() d = d - 1 if d < 0: d = 0
def _build_generalized(self, xs): """Builds a Generalized Suffix Tree (GST) from the array of strings provided. """ terminal_gen = self._terminalSymbolsGenerator() _xs = ''.join([x + next(terminal_gen) for x in xs]) self.word = _xs self._generalized_word_starts(xs) self._build(_xs) self.root._traverse(self._label_generalized)
def _label_generalized(self, node): """Helper method that labels the nodes of GST with indexes of strings found in their descendants. """ if node.is_leaf(): x = {self._get_word_start_index(node.idx)} else: x = {n for ns in node.transition_links for n in ns[0].generalized_idxs} node.generalized_idxs = x
def _get_word_start_index(self, idx): """Helper method that returns the index of the string based on node's starting index""" i = 0 for _idx in self.word_starts[1:]: if idx < _idx: return i else: i+=1 return i
def lcs(self, stringIdxs=-1): """Returns the Largest Common Substring of Strings provided in stringIdxs. If stringIdxs is not provided, the LCS of all strings is returned. ::param stringIdxs: Optional: List of indexes of strings. """ if stringIdxs == -1 or not isinstance(stringIdxs, list): stringIdxs = set(range(len(self.word_starts))) else: stringIdxs = set(stringIdxs) deepestNode = self._find_lcs(self.root, stringIdxs) start = deepestNode.idx end = deepestNode.idx + deepestNode.depth return self.word[start:end]
def _find_lcs(self, node, stringIdxs): """Helper method that finds LCS by traversing the labeled GSD.""" nodes = [self._find_lcs(n, stringIdxs) for (n,_) in node.transition_links if n.generalized_idxs.issuperset(stringIdxs)] if nodes == []: return node deepestNode = max(nodes, key=lambda n: n.depth) return deepestNode
def _generalized_word_starts(self, xs): """Helper method returns the starting indexes of strings in GST""" self.word_starts = [] i = 0 for n in range(len(xs)): self.word_starts.append(i) i += len(xs[n]) + 1
def find(self, y): """Returns starting position of the substring y in the string used for building the Suffix tree. :param y: String :return: Index of the starting position of string y in the string used for building the Suffix tree -1 if y is not a substring. """ node = self.root while True: edge = self._edgeLabel(node, node.parent) if edge.startswith(y): return node.idx i = 0 while(i < len(edge) and edge[i] == y[0]): y = y[1:] i += 1 if i != 0: if i == len(edge) and y != '': pass else: return -1 node = node._get_transition_link(y[0]) if not node: return -1
def _edgeLabel(self, node, parent): """Helper method, returns the edge label between a node and it's parent""" return self.word[node.idx + parent.depth : node.idx + node.depth]
def _terminalSymbolsGenerator(self): """Generator of unique terminal symbols used for building the Generalized Suffix Tree. Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols are not part of the input string. """ py2 = sys.version[0] < '3' UPPAs = list(list(range(0xE000,0xF8FF+1)) + list(range(0xF0000,0xFFFFD+1)) + list(range(0x100000, 0x10FFFD+1))) for i in UPPAs: if py2: yield(unichr(i)) else: yield(chr(i)) raise ValueError("To many input strings.")
def _dist(self, x, y, A): "(x - y)^T A (x - y)" return scipy.spatial.distance.mahalanobis(x, y, A) ** 2
def query(self, i, j): "Query the oracle to find out whether i and j should be must-linked" if self.queries_cnt < self.max_queries_cnt: self.queries_cnt += 1 return self.labels[i] == self.labels[j] else: raise MaximumQueriesExceeded
def preprocess_constraints(ml, cl, n): "Create a graph of constraints for both must- and cannot-links" # Represent the graphs using adjacency-lists ml_graph, cl_graph = {}, {} for i in range(n): ml_graph[i] = set() cl_graph[i] = set() def add_both(d, i, j): d[i].add(j) d[j].add(i) for (i, j) in ml: ml_graph[i].add(j) ml_graph[j].add(i) for (i, j) in cl: cl_graph[i].add(j) cl_graph[j].add(i) def dfs(i, graph, visited, component): visited[i] = True for j in graph[i]: if not visited[j]: dfs(j, graph, visited, component) component.append(i) # Run DFS from each node to get all the graph's components # and add an edge for each pair of nodes in the component (create a complete graph) # See http://www.techiedelight.com/transitive-closure-graph/ for more details visited = [False] * n neighborhoods = [] for i in range(n): if not visited[i] and ml_graph[i]: component = [] dfs(i, ml_graph, visited, component) for x1 in component: for x2 in component: if x1 != x2: ml_graph[x1].add(x2) neighborhoods.append(component) for (i, j) in cl: for x in ml_graph[i]: add_both(cl_graph, x, j) for y in ml_graph[j]: add_both(cl_graph, i, y) for x in ml_graph[i]: for y in ml_graph[j]: add_both(cl_graph, x, y) for i in ml_graph: for j in ml_graph[i]: if j != i and j in cl_graph[i]: raise InconsistentConstraintsException('Inconsistent constraints between {} and {}'.format(i, j)) return ml_graph, cl_graph, neighborhoods
def make_pmml_pipeline(obj, active_fields = None, target_fields = None): """Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline. Parameters: ---------- obj: BaseEstimator The object. active_fields: list of strings, optional Feature names. If missing, "x1", "x2", .., "xn" are assumed. target_fields: list of strings, optional Label name(s). If missing, "y" is assumed. """ steps = _filter_steps(_get_steps(obj)) pipeline = PMMLPipeline(steps) if active_fields is not None: pipeline.active_fields = numpy.asarray(active_fields) if target_fields is not None: pipeline.target_fields = numpy.asarray(target_fields) return pipeline
def sklearn2pmml(pipeline, pmml, user_classpath = [], with_repr = False, debug = False, java_encoding = "UTF-8"): """Converts a fitted Scikit-Learn pipeline to PMML. Parameters: ---------- pipeline: PMMLPipeline The pipeline. pmml: string The path to where the PMML document should be stored. user_classpath: list of strings, optional The paths to JAR files that provide custom Transformer, Selector and/or Estimator converter classes. The JPMML-SkLearn classpath is constructed by appending user JAR files to package JAR files. with_repr: boolean, optional If true, insert the string representation of pipeline into the PMML document. debug: boolean, optional If true, print information about the conversion process. java_encoding: string, optional The character encoding to use for decoding Java output and error byte streams. """ if debug: java_version = _java_version(java_encoding) if java_version is None: java_version = ("java", "N/A") print("python: {0}".format(platform.python_version())) print("sklearn: {0}".format(sklearn.__version__)) print("sklearn.externals.joblib: {0}".format(joblib.__version__)) print("pandas: {0}".format(pandas.__version__)) print("sklearn_pandas: {0}".format(sklearn_pandas.__version__)) print("sklearn2pmml: {0}".format(__version__)) print("{0}: {1}".format(java_version[0], java_version[1])) if not isinstance(pipeline, PMMLPipeline): raise TypeError("The pipeline object is not an instance of " + PMMLPipeline.__name__ + ". Use the 'sklearn2pmml.make_pmml_pipeline(obj)' utility function to translate a regular Scikit-Learn estimator or pipeline to a PMML pipeline") estimator = pipeline._final_estimator cmd = ["java", "-cp", os.pathsep.join(_classpath(user_classpath)), "org.jpmml.sklearn.Main"] dumps = [] try: if with_repr: pipeline.repr_ = repr(pipeline) # if isinstance(estimator, H2OEstimator): if hasattr(estimator, "download_mojo"): estimator_mojo = estimator.download_mojo() dumps.append(estimator_mojo) estimator._mojo_path = estimator_mojo pipeline_pkl = _dump(pipeline, "pipeline") cmd.extend(["--pkl-pipeline-input", pipeline_pkl]) dumps.append(pipeline_pkl) cmd.extend(["--pmml-output", pmml]) if debug: print("Executing command:\n{0}".format(" ".join(cmd))) try: process = Popen(cmd, stdout = PIPE, stderr = PIPE, bufsize = 1) except OSError: raise RuntimeError("Java is not installed, or the Java executable is not on system path") output, error = process.communicate() retcode = process.poll() if debug or retcode: if(len(output) > 0): print("Standard output:\n{0}".format(_decode(output, java_encoding))) else: print("Standard output is empty") if(len(error) > 0): print("Standard error:\n{0}".format(_decode(error, java_encoding))) else: print("Standard error is empty") if retcode: raise RuntimeError("The JPMML-SkLearn conversion application has failed. The Java executable should have printed more information about the failure into its standard output and/or standard error streams") finally: if debug: print("Preserved joblib dump file(s): {0}".format(" ".join(dumps))) else: for dump in dumps: os.remove(dump)
def make_tpot_pmml_config(config, user_classpath = []): """Translates a regular TPOT configuration to a PMML-compatible TPOT configuration. Parameters: ---------- obj: config The configuration dictionary. user_classpath: list of strings, optional The paths to JAR files that provide custom Transformer, Selector and/or Estimator converter classes. The JPMML-SkLearn classpath is constructed by appending user JAR files to package JAR files. """ tpot_keys = set(config.keys()) classes = _supported_classes(user_classpath) pmml_keys = (set(classes)).union(set([_strip_module(class_) for class_ in classes])) return { key : config[key] for key in (tpot_keys).intersection(pmml_keys)}
def construct_formset(self): """ Returns an instance of the formset """ formset_class = self.get_formset() if hasattr(self, 'get_extra_form_kwargs'): klass = type(self).__name__ raise DeprecationWarning( 'Calling {0}.get_extra_form_kwargs is no longer supported. ' 'Set `form_kwargs` in {0}.formset_kwargs or override ' '{0}.get_formset_kwargs() directly.'.format(klass), ) return formset_class(**self.get_formset_kwargs())
def get_formset_kwargs(self): """ Returns the keyword arguments for instantiating the formset. """ kwargs = self.formset_kwargs.copy() kwargs.update({ 'initial': self.get_initial(), 'prefix': self.get_prefix(), }) if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST.copy(), 'files': self.request.FILES, }) return kwargs
def get_factory_kwargs(self): """ Returns the keyword arguments for calling the formset factory """ # Perform deprecation check for attr in ['extra', 'max_num', 'can_order', 'can_delete', 'ct_field', 'formfield_callback', 'fk_name', 'widgets', 'ct_fk_field']: if hasattr(self, attr): klass = type(self).__name__ raise DeprecationWarning( 'Setting `{0}.{1}` at the class level is now deprecated. ' 'Set `{0}.factory_kwargs` instead.'.format(klass, attr) ) kwargs = self.factory_kwargs.copy() if self.get_formset_class(): kwargs['formset'] = self.get_formset_class() return kwargs
def get_success_url(self): """ Returns the supplied URL. """ if self.success_url: url = self.success_url else: # Default to returning to the same page url = self.request.get_full_path() return url
def get_formset_kwargs(self): """ Returns the keyword arguments for instantiating the formset. """ kwargs = super(ModelFormSetMixin, self).get_formset_kwargs() kwargs['queryset'] = self.get_queryset() return kwargs
def formset_valid(self, formset): """ If the formset is valid, save the associated models. """ self.object_list = formset.save() return super(ModelFormSetMixin, self).formset_valid(formset)
def get_formset_kwargs(self): """ Returns the keyword arguments for instantiating the formset. """ # Perform deprecation check if hasattr(self, 'save_as_new'): klass = type(self).__name__ raise DeprecationWarning( 'Setting `{0}.save_as_new` at the class level is now ' 'deprecated. Set `{0}.formset_kwargs` instead.'.format(klass) ) kwargs = super(BaseInlineFormSetFactory, self).get_formset_kwargs() kwargs['instance'] = self.object return kwargs
def get_factory_kwargs(self): """ Returns the keyword arguments for calling the formset factory """ kwargs = super(BaseInlineFormSetFactory, self).get_factory_kwargs() kwargs.setdefault('fields', self.fields) kwargs.setdefault('exclude', self.exclude) if self.get_form_class(): kwargs['form'] = self.get_form_class() return kwargs
def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates a blank version of the formset. """ formset = self.construct_formset() return self.render_to_response(self.get_context_data(formset=formset))
def post(self, request, *args, **kwargs): """ Handles POST requests, instantiating a formset instance with the passed POST variables and then checked for validity. """ formset = self.construct_formset() if formset.is_valid(): return self.formset_valid(formset) else: return self.formset_invalid(formset)
def construct_formset(self): """ Overrides construct_formset to attach the model class as an attribute of the returned formset instance. """ formset = super(InlineFormSetFactory, self).construct_formset() formset.model = self.inline_model return formset
def forms_valid(self, form, inlines): """ If the form and formsets are valid, save the associated models. """ response = self.form_valid(form) for formset in inlines: formset.save() return response
def forms_invalid(self, form, inlines): """ If the form or formsets are invalid, re-render the context data with the data-filled form and formsets and errors. """ return self.render_to_response(self.get_context_data(form=form, inlines=inlines))
def construct_inlines(self): """ Returns the inline formset instances """ inline_formsets = [] for inline_class in self.get_inlines(): inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self) inline_formset = inline_instance.construct_formset() inline_formsets.append(inline_formset) return inline_formsets
def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates a blank version of the form and formsets. """ form_class = self.get_form_class() form = self.get_form(form_class) inlines = self.construct_inlines() return self.render_to_response(self.get_context_data(form=form, inlines=inlines, **kwargs))
def post(self, request, *args, **kwargs): """ Handles POST requests, instantiating a form and formset instances with the passed POST variables and then checked for validity. """ form_class = self.get_form_class() form = self.get_form(form_class) if form.is_valid(): self.object = form.save(commit=False) form_validated = True else: form_validated = False inlines = self.construct_inlines() if all_valid(inlines) and form_validated: return self.forms_valid(form, inlines) return self.forms_invalid(form, inlines)
def get_context_data(self, **kwargs): """ If `inlines_names` has been defined, add each formset to the context under its corresponding entry in `inlines_names` """ context = {} inlines_names = self.get_inlines_names() if inlines_names: # We have formset or inlines in context, but never both context.update(zip(inlines_names, kwargs.get('inlines', []))) if 'formset' in kwargs: context[inlines_names[0]] = kwargs['formset'] context.update(kwargs) return super(NamedFormsetsMixin, self).get_context_data(**context)
def try_convert_to_date(self, word): """ Tries to convert word to date(datetime) using search_date_formats Return None if word fits no one format """ for frm in self.search_date_formats: try: return datetime.datetime.strptime(word, frm).date() except ValueError: pass return None
def get_params_for_field(self, field_name, sort_type=None): """ If sort_type is None - inverse current sort for field, if no sorted - use asc """ if not sort_type: if self.initial_sort == field_name: sort_type = 'desc' if self.initial_sort_type == 'asc' else 'asc' else: sort_type = 'asc' self.initial_params[self.sort_param_name] = self.sort_fields[field_name] self.initial_params[self.sort_type_param_name] = sort_type return '?%s' % self.initial_params.urlencode()
def get_start_date(self, obj): """ Returns the start date for a model instance """ obj_date = getattr(obj, self.get_date_field()) try: obj_date = obj_date.date() except AttributeError: # It's a date rather than datetime, so we use it as is pass return obj_date
def get_end_date(self, obj): """ Returns the end date for a model instance """ obj_date = getattr(obj, self.get_end_date_field()) try: obj_date = obj_date.date() except AttributeError: # It's a date rather than datetime, so we use it as is pass return obj_date
def get_first_of_week(self): """ Returns an integer representing the first day of the week. 0 represents Monday, 6 represents Sunday. """ if self.first_of_week is None: raise ImproperlyConfigured("%s.first_of_week is required." % self.__class__.__name__) if self.first_of_week not in range(7): raise ImproperlyConfigured("%s.first_of_week must be an integer between 0 and 6." % self.__class__.__name__) return self.first_of_week
def get_queryset(self): """ Returns a queryset of models for the month requested """ qs = super(BaseCalendarMonthView, self).get_queryset() year = self.get_year() month = self.get_month() date_field = self.get_date_field() end_date_field = self.get_end_date_field() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) since = date until = self.get_next_month(date) # Adjust our start and end dates to allow for next and previous # month edges if since.weekday() != self.get_first_of_week(): diff = math.fabs(since.weekday() - self.get_first_of_week()) since = since - datetime.timedelta(days=diff) if until.weekday() != ((self.get_first_of_week() + 6) % 7): diff = math.fabs(((self.get_first_of_week() + 6) % 7) - until.weekday()) until = until + datetime.timedelta(days=diff) if end_date_field: # 5 possible conditions for showing an event: # 1) Single day event, starts after 'since' # 2) Multi-day event, starts after 'since' and ends before 'until' # 3) Starts before 'since' and ends after 'since' and before 'until' # 4) Starts after 'since' but before 'until' and ends after 'until' # 5) Starts before 'since' and ends after 'until' predicate1 = Q(**{ '%s__gte' % date_field: since, end_date_field: None }) predicate2 = Q(**{ '%s__gte' % date_field: since, '%s__lt' % end_date_field: until }) predicate3 = Q(**{ '%s__lt' % date_field: since, '%s__gte' % end_date_field: since, '%s__lt' % end_date_field: until }) predicate4 = Q(**{ '%s__gte' % date_field: since, '%s__lt' % date_field: until, '%s__gte' % end_date_field: until }) predicate5 = Q(**{ '%s__lt' % date_field: since, '%s__gte' % end_date_field: until }) return qs.filter(predicate1 | predicate2 | predicate3 | predicate4 | predicate5) return qs.filter(**{ '%s__gte' % date_field: since })
def get_context_data(self, **kwargs): """ Injects variables necessary for rendering the calendar into the context. Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`. """ data = super(BaseCalendarMonthView, self).get_context_data(**kwargs) year = self.get_year() month = self.get_month() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) cal = Calendar(self.get_first_of_week()) month_calendar = [] now = datetime.datetime.utcnow() date_lists = defaultdict(list) multidate_objs = [] for obj in data['object_list']: obj_date = self.get_start_date(obj) end_date_field = self.get_end_date_field() if end_date_field: end_date = self.get_end_date(obj) if end_date and end_date != obj_date: multidate_objs.append({ 'obj': obj, 'range': [x for x in daterange(obj_date, end_date)] }) continue # We don't put multi-day events in date_lists date_lists[obj_date].append(obj) for week in cal.monthdatescalendar(date.year, date.month): week_range = set(daterange(week[0], week[6])) week_events = [] for val in multidate_objs: intersect_length = len(week_range.intersection(val['range'])) if intersect_length: # Event happens during this week slot = 1 width = intersect_length # How many days is the event during this week? nowrap_previous = True # Does the event continue from the previous week? nowrap_next = True # Does the event continue to the next week? if val['range'][0] >= week[0]: slot = 1 + (val['range'][0] - week[0]).days else: nowrap_previous = False if val['range'][-1] > week[6]: nowrap_next = False week_events.append({ 'event': val['obj'], 'slot': slot, 'width': width, 'nowrap_previous': nowrap_previous, 'nowrap_next': nowrap_next, }) week_calendar = { 'events': week_events, 'date_list': [], } for day in week: week_calendar['date_list'].append({ 'day': day, 'events': date_lists[day], 'today': day == now.date(), 'is_current_month': day.month == date.month, }) month_calendar.append(week_calendar) data['calendar'] = month_calendar data['weekdays'] = [DAYS[x] for x in cal.iterweekdays()] data['month'] = date data['next_month'] = self.get_next_month(date) data['previous_month'] = self.get_previous_month(date) return data
def read_version(): """Read version from __init__.py without loading any files""" finder = VersionFinder() path = os.path.join(PROJECT_ROOT, 'colorful', '__init__.py') with codecs.open(path, 'r', encoding='utf-8') as fp: file_data = fp.read().encode('utf-8') finder.visit(ast.parse(file_data)) return finder.version
def with_setup(self, colormode=None, colorpalette=None, extend_colors=False): """ Return a new Colorful object with the given color config. """ colorful = Colorful( colormode=self.colorful.colormode, colorpalette=copy.copy(self.colorful.colorpalette) ) colorful.setup( colormode=colormode, colorpalette=colorpalette, extend_colors=extend_colors ) yield colorful
def parse_colors(path): """Parse the given color files. Supported are: * .txt for X11 colors * .json for colornames """ if path.endswith(".txt"): return parse_rgb_txt_file(path) elif path.endswith(".json"): return parse_json_color_file(path) raise TypeError("colorful only supports .txt and .json files for colors")
def parse_rgb_txt_file(path): """ Parse the given rgb.txt file into a Python dict. See https://en.wikipedia.org/wiki/X11_color_names for more information :param str path: the path to the X11 rgb.txt file """ #: Holds the generated color dict color_dict = {} with open(path, 'r') as rgb_txt: for line in rgb_txt: line = line.strip() if not line or line.startswith('!'): continue # skip comments parts = line.split() color_dict[" ".join(parts[3:])] = (int(parts[0]), int(parts[1]), int(parts[2])) return color_dict
def parse_json_color_file(path): """Parse a JSON color file. The JSON has to be in the following format: .. code:: json [{"name": "COLOR_NAME", "hex": "#HEX"}, ...] :param str path: the path to the JSON color file """ with open(path, "r") as color_file: color_list = json.load(color_file) # transform raw color list into color dict color_dict = {c["name"]: c["hex"] for c in color_list} return color_dict
def sanitize_color_palette(colorpalette): """ Sanitze the given color palette so it can be safely used by Colorful. It will convert colors specified in hex RGB to a RGB channel triplet. """ new_palette = {} def __make_valid_color_name(name): """ Convert the given name into a valid colorname """ if len(name) == 1: name = name[0] return name[:1].lower() + name[1:] return name[0].lower() + ''.join(word.capitalize() for word in name[1:]) for key, value in colorpalette.items(): if isinstance(value, str): # we assume it's a hex RGB value value = utils.hex_to_rgb(value) new_palette[__make_valid_color_name(key.split())] = value return new_palette
def show(): """ Show the modifiers and colors """ # modifiers sys.stdout.write(colorful.bold('bold') + ' ') sys.stdout.write(colorful.dimmed('dimmed') + ' ') sys.stdout.write(colorful.italic('italic') + ' ') sys.stdout.write(colorful.underlined('underlined') + ' ') sys.stdout.write(colorful.inversed('inversed') + ' ') sys.stdout.write(colorful.concealed('concealed') + ' ') sys.stdout.write(colorful.struckthrough('struckthrough') + '\n') # foreground colors sys.stdout.write(colorful.red('red') + ' ') sys.stdout.write(colorful.green('green') + ' ') sys.stdout.write(colorful.yellow('yellow') + ' ') sys.stdout.write(colorful.blue('blue') + ' ') sys.stdout.write(colorful.magenta('magenta') + ' ') sys.stdout.write(colorful.cyan('cyan') + ' ') sys.stdout.write(colorful.white('white') + '\n') # background colors sys.stdout.write(colorful.on_red('red') + ' ') sys.stdout.write(colorful.on_green('green') + ' ') sys.stdout.write(colorful.on_yellow('yellow') + ' ') sys.stdout.write(colorful.on_blue('blue') + ' ') sys.stdout.write(colorful.on_magenta('magenta') + ' ') sys.stdout.write(colorful.on_cyan('cyan') + ' ') sys.stdout.write(colorful.on_white('white') + '\n')
def show(): """ Show the modifiers and colors """ with colorful.with_style('monokai') as c: # modifiers sys.stdout.write(c.bold('bold') + ' ') sys.stdout.write(c.dimmed('dimmed') + ' ') sys.stdout.write(c.italic('italic') + ' ') sys.stdout.write(c.underlined('underlined') + ' ') sys.stdout.write(c.inversed('inversed') + ' ') sys.stdout.write(c.concealed('concealed') + ' ') sys.stdout.write(c.struckthrough('struckthrough') + '\n') # foreground colors sys.stdout.write(c.orange('orange') + ' ') sys.stdout.write(c.magenta('magenta') + ' ') sys.stdout.write(c.purple('purple') + ' ') sys.stdout.write(c.blue('blue') + ' ') sys.stdout.write(c.seaGreen('sea green') + ' ') sys.stdout.write(c.green('green') + ' ') sys.stdout.write(c.yellow('yellow') + '\n') # background colors sys.stdout.write(c.on_orange('orange') + ' ') sys.stdout.write(c.on_magenta('magenta') + ' ') sys.stdout.write(c.on_purple('purple') + ' ') sys.stdout.write(c.on_blue('blue') + ' ') sys.stdout.write(c.on_seaGreen('sea green') + ' ') sys.stdout.write(c.gray_on_green('green') + ' ') sys.stdout.write(c.gray_on_yellow('yellow') + '\n')
def detect_color_support(env): # noqa """ Detect what color palettes are supported. It'll return a valid color mode to use with colorful. :param dict env: the environment dict like returned by ``os.envion`` """ if env.get('COLORFUL_DISABLE', '0') == '1': return NO_COLORS if env.get('COLORFUL_FORCE_8_COLORS', '0') == '1': return ANSI_8_COLORS if env.get('COLORFUL_FORCE_16_COLORS', '0') == '1': return ANSI_16_COLORS if env.get('COLORFUL_FORCE_256_COLORS', '0') == '1': return ANSI_256_COLORS if env.get('COLORFUL_FORCE_TRUE_COLORS', '0') == '1': return TRUE_COLORS # if we are not a tty if not sys.stdout.isatty(): return NO_COLORS colorterm_env = env.get('COLORTERM') if colorterm_env: if colorterm_env in {'truecolor', '24bit'}: return TRUE_COLORS if colorterm_env in {'8bit'}: return ANSI_256_COLORS termprog_env = env.get('TERM_PROGRAM') if termprog_env: if termprog_env in {'iTerm.app', 'Hyper'}: return TRUE_COLORS if termprog_env in {'Apple_Terminal'}: return ANSI_256_COLORS term_env = env.get('TERM') if term_env: if term_env in {'screen-256', 'screen-256color', 'xterm-256', 'xterm-256color'}: return ANSI_256_COLORS if term_env in {'screen', 'xterm', 'vt100', 'color', 'ansi', 'cygwin', 'linux'}: return ANSI_16_COLORS if colorterm_env: # if there was no match with $TERM either but we # had one with $COLORTERM, we use it! return ANSI_16_COLORS return ANSI_8_COLORS
def rgb_to_ansi256(r, g, b): """ Convert RGB to ANSI 256 color """ if r == g and g == b: if r < 8: return 16 if r > 248: return 231 return round(((r - 8) / 247.0) * 24) + 232 ansi_r = 36 * round(r / 255.0 * 5.0) ansi_g = 6 * round(g / 255.0 * 5.0) ansi_b = round(b / 255.0 * 5.0) ansi = 16 + ansi_r + ansi_g + ansi_b return ansi
def rgb_to_ansi16(r, g, b, use_bright=False): """ Convert RGB to ANSI 16 color """ ansi_b = round(b / 255.0) << 2 ansi_g = round(g / 255.0) << 1 ansi_r = round(r / 255.0) ansi = (90 if use_bright else 30) + (ansi_b | ansi_g | ansi_r) return ansi
def hex_to_rgb(value): """ Convert the given hex string to a valid RGB channel triplet. """ value = value.lstrip('#') check_hex(value) length = len(value) step = int(length / 3) return tuple(int(value[i:i+step], 16) for i in range(0, length, step))
def check_hex(value): """ Check if the given hex value is a valid RGB color It should match the format: [0-9a-fA-F] and be of length 3 or 6. """ length = len(value) if length not in (3, 6): raise ValueError('Hex string #{} is too long'.format(value)) regex = r'[0-9a-f]{{{length}}}'.format(length=length) if not re.search(regex, value, re.I): raise ValueError('Invalid Hex String: #{}'.format(value))
def show(): """ Show the modifiers and colors """ with colorful.with_style('solarized') as c: # modifiers sys.stdout.write(c.bold('bold') + ' ') sys.stdout.write(c.dimmed('dimmed') + ' ') sys.stdout.write(c.italic('italic') + ' ') sys.stdout.write(c.underlined('underlined') + ' ') sys.stdout.write(c.inversed('inversed') + ' ') sys.stdout.write(c.concealed('concealed') + ' ') sys.stdout.write(c.struckthrough('struckthrough') + '\n') # foreground colors sys.stdout.write(c.yellow('yellow') + ' ') sys.stdout.write(c.red('orange') + ' ') sys.stdout.write(c.red('red') + ' ') sys.stdout.write(c.magenta('magenta') + ' ') sys.stdout.write(c.magenta('violet') + ' ') sys.stdout.write(c.blue('blue') + ' ') sys.stdout.write(c.cyan('cyan') + ' ') sys.stdout.write(c.green('green') + '\n') # background colors sys.stdout.write(c.on_yellow('yellow') + ' ') sys.stdout.write(c.on_red('orange') + ' ') sys.stdout.write(c.on_red('red') + ' ') sys.stdout.write(c.on_magenta('magenta') + ' ') sys.stdout.write(c.on_magenta('violet') + ' ') sys.stdout.write(c.on_blue('blue') + ' ') sys.stdout.write(c.on_cyan('cyan') + ' ') sys.stdout.write(c.on_green('green') + '\n')
def translate_rgb_to_ansi_code(red, green, blue, offset, colormode): """ Translate the given RGB color into the appropriate ANSI escape code for the given color mode. The offset is used for the base color which is used. The ``colormode`` has to be one of: * 0: no colors / disabled * 8: use ANSI 8 colors * 16: use ANSI 16 colors (same as 8 but with brightness) * 256: use ANSI 256 colors * 0xFFFFFF / 16777215: use 16 Million true colors :param int red: the red channel value :param int green: the green channel value :param int blue: the blue channel value :param int offset: the offset to use for the base color :param int colormode: the color mode to use. See explanation above """ if colormode == terminal.NO_COLORS: # colors are disabled, thus return empty string return '', '' if colormode == terminal.ANSI_8_COLORS or colormode == terminal.ANSI_16_COLORS: color_code = ansi.rgb_to_ansi16(red, green, blue) start_code = ansi.ANSI_ESCAPE_CODE.format( code=color_code + offset - ansi.FOREGROUND_COLOR_OFFSET) end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET) return start_code, end_code if colormode == terminal.ANSI_256_COLORS: color_code = ansi.rgb_to_ansi256(red, green, blue) start_code = ansi.ANSI_ESCAPE_CODE.format(code='{base};5;{code}'.format( base=8 + offset, code=color_code)) end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET) return start_code, end_code if colormode == terminal.TRUE_COLORS: start_code = ansi.ANSI_ESCAPE_CODE.format(code='{base};2;{red};{green};{blue}'.format( base=8 + offset, red=red, green=green, blue=blue)) end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET) return start_code, end_code raise ColorfulError('invalid color mode "{0}"'.format(colormode))
def translate_colorname_to_ansi_code(colorname, offset, colormode, colorpalette): """ Translate the given color name to a valid ANSI escape code. :parma str colorname: the name of the color to resolve :parma str offset: the offset for the color code :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping :returns str: the color as ANSI escape code :raises ColorfulError: if the given color name is invalid """ try: red, green, blue = colorpalette[colorname] except KeyError: raise ColorfulError('the color "{0}" is unknown. Use a color in your color palette (by default: X11 rgb.txt)'.format( # noqa colorname)) else: return translate_rgb_to_ansi_code(red, green, blue, offset, colormode)
def resolve_modifier_to_ansi_code(modifiername, colormode): """ Resolve the given modifier name to a valid ANSI escape code. :param str modifiername: the name of the modifier to resolve :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :returns str: the ANSI escape code for the modifier :raises ColorfulError: if the given modifier name is invalid """ if colormode == terminal.NO_COLORS: # return empty string if colors are disabled return '', '' try: start_code, end_code = ansi.MODIFIERS[modifiername] except KeyError: raise ColorfulError('the modifier "{0}" is unknown. Use one of: {1}'.format( modifiername, ansi.MODIFIERS.keys())) else: return ansi.ANSI_ESCAPE_CODE.format( code=start_code), ansi.ANSI_ESCAPE_CODE.format( code=end_code)
def translate_style(style, colormode, colorpalette): """ Translate the given style to an ANSI escape code sequence. ``style`` examples are: * green * bold * red_on_black * bold_green * italic_yellow_on_cyan :param str style: the style to translate :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping """ style_parts = iter(style.split('_')) ansi_start_sequence = [] ansi_end_sequence = [] try: # consume all modifiers part = None for mod_part in style_parts: part = mod_part if part not in ansi.MODIFIERS: break # all modifiers have been consumed mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(part, colormode) ansi_start_sequence.append(mod_start_code) ansi_end_sequence.append(mod_end_code) else: # we've consumed all parts, thus we can exit raise StopIteration() # next part has to be a foreground color or the 'on' keyword # which means we have to consume background colors if part != 'on': ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) # consume the required 'on' keyword after the foreground color next(style_parts) # next part has to be the background color part = next(style_parts) ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) except StopIteration: # we've consumed all parts of the styling string pass # construct and return ANSI escape code sequence return ''.join(ansi_start_sequence), ''.join(ansi_end_sequence)
def style_string(string, ansi_style, colormode, nested=False): """ Style the given string according to the given ANSI style string. :param str string: the string to style :param tuple ansi_style: the styling string returned by ``translate_style`` :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :returns: a string containing proper ANSI sequence """ ansi_start_code, ansi_end_code = ansi_style # replace nest placeholders with the current begin style if PY2: if isinstance(string, str): string = string.decode(DEFAULT_ENCODING) string = UNICODE(string).replace(ansi.NEST_PLACEHOLDER, ansi_start_code) return '{start_code}{string}{end_code}{nest_ph}'.format( start_code=ansi_start_code, string=string, end_code=ansi_end_code, nest_ph=ansi.NEST_PLACEHOLDER if nested else '')
def colorpalette(self, colorpalette): """ Set the colorpalette which should be used """ if isinstance(colorpalette, str): # we assume it's a path to a color file colorpalette = colors.parse_colors(colorpalette) self._colorpalette = colors.sanitize_color_palette(colorpalette)
def setup(self, colormode=None, colorpalette=None, extend_colors=False): """ Setup this colorful object by setting a ``colormode`` and the ``colorpalette`. The ``extend_colors`` flag is used to extend the currently active color palette instead of replacing it. :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the colorpalette to use. This ``dict`` should map color names to it's corresponding RGB value :param bool extend_colors: extend the active color palette instead of replacing it """ if colormode: self.colormode = colormode if colorpalette: if extend_colors: self.update_palette(colorpalette) else: self.colorpalette = colorpalette
def use_style(self, style_name): """ Use a predefined style as color palette :param str style_name: the name of the style """ try: style = getattr(styles, style_name.upper()) except AttributeError: raise ColorfulError('the style "{0}" is undefined'.format( style_name)) else: self.colorpalette = style
def format(self, string, *args, **kwargs): """ Format the given string with the given ``args`` and ``kwargs``. The string can contain references to ``c`` which is provided by this colorful object. :param str string: the string to format """ return string.format(c=self, *args, **kwargs)
def print(self, *objects, **options): """ Print the given objects to the given file stream. See https://docs.python.org/3/library/functions.html#print The only difference to the ``print()`` built-in is that ``Colorful.print()`` formats the string with ``c=self``. With that stylings are possible :param str sep: the seperater between the objects :param str end: the ending delimiter after all objects :param file: the file stream to write to :param bool flush: if the stream should be flushed """ # NOTE: change signature to same as print() built-in function as # soon as Python 2.7 is not supported anymore. # If causes problems because of the keyword args after *args allowed_options = {'sep', 'end', 'file', 'flush'} given_options = set(options.keys()) if not given_options.issubset(allowed_options): raise TypeError('Colorful.print() got unexpected keyword arguments: {0}'.format( ', '.join(given_options.difference(allowed_options)))) sep = options.get('sep', ' ') end = options.get('end', '\n') file = options.get('file', sys.stdout) flush = options.get('flush', False) styled_objects = [self.format(o) for o in objects] print(*styled_objects, sep=sep, end=end, file=file) # NOTE: if Python 2.7 support is dropped we can directly forward the # flush keyword argument to the print() function. if flush: file.flush()
def readattr(path, name): """ Read attribute from sysfs and return as string """ try: f = open(USB_SYS_PREFIX + path + "/" + name) return f.readline().rstrip("\n") except IOError: return None
def find_ports(device): """ Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason. """ bus_id = device.bus dev_id = device.address for dirent in os.listdir(USB_SYS_PREFIX): matches = re.match(USB_PORTS_STR + '$', dirent) if matches: bus_str = readattr(dirent, 'busnum') if bus_str: busnum = float(bus_str) else: busnum = None dev_str = readattr(dirent, 'devnum') if dev_str: devnum = float(dev_str) else: devnum = None if busnum == bus_id and devnum == dev_id: return str(matches.groups()[1])
def set_calibration_data(self, scale=None, offset=None): """ Set device calibration data based on settings in /etc/temper.conf. """ if scale is not None and offset is not None: self._scale = scale self._offset = offset elif scale is None and offset is None: self._scale = 1.0 self._offset = 0.0 try: f = open('/etc/temper.conf', 'r') except IOError: f = None if f: lines = f.read().split('\n') f.close() for line in lines: matches = re.match(CALIB_LINE_STR, line) if matches: bus = int(matches.groups()[0]) ports = matches.groups()[1] scale = float(matches.groups()[2]) offset = float(matches.groups()[3]) if (str(ports) == str(self._ports)) and (str(bus) == str(self._bus)): self._scale = scale self._offset = offset else: raise RuntimeError("Must set both scale and offset, or neither")
def get_data(self, reset_device=False): """ Get data from the USB device. """ try: if reset_device: self._device.reset() # detach kernel driver from both interfaces if attached, so we can set_configuration() for interface in [0,1]: if self._device.is_kernel_driver_active(interface): LOGGER.debug('Detaching kernel driver for interface %d ' 'of %r on ports %r', interface, self._device, self._ports) self._device.detach_kernel_driver(interface) self._device.set_configuration() # Prevent kernel message: # "usbfs: process <PID> (python) did not claim interface x before use" # This will become unnecessary once pull-request #124 for # PyUSB has been accepted and we depend on a fixed release # of PyUSB. Until then, and even with the fix applied, it # does not hurt to explicitly claim the interface. usb.util.claim_interface(self._device, INTERFACE) # Turns out we don't actually need that ctrl_transfer. # Disabling this reduces number of USBErrors from ~7/30 to 0! #self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09, # wValue=0x0201, wIndex=0x00, data_or_wLength='\x01\x01', # timeout=TIMEOUT) # Magic: Our TEMPerV1.4 likes to be asked twice. When # only asked once, it get's stuck on the next access and # requires a reset. self._control_transfer(COMMANDS['temp']) self._interrupt_read() # Turns out a whole lot of that magic seems unnecessary. #self._control_transfer(COMMANDS['ini1']) #self._interrupt_read() #self._control_transfer(COMMANDS['ini2']) #self._interrupt_read() #self._interrupt_read() # Get temperature self._control_transfer(COMMANDS['temp']) temp_data = self._interrupt_read() # Get humidity if self._device.product == 'TEMPer1F_H1_V1.4': humidity_data = temp_data else: humidity_data = None # Combine temperature and humidity data data = {'temp_data': temp_data, 'humidity_data': humidity_data} # Be a nice citizen and undo potential interface claiming. # Also see: https://github.com/walac/pyusb/blob/master/docs/tutorial.rst#dont-be-selfish usb.util.dispose_resources(self._device) return data except usb.USBError as err: if not reset_device: LOGGER.warning("Encountered %s, resetting %r and trying again.", err, self._device) return self.get_data(True) # Catch the permissions exception and add our message if "not permitted" in str(err): raise Exception( "Permission problem accessing USB. " "Maybe I need to run as root?") else: LOGGER.error(err) raise
def get_temperature(self, format='celsius', sensor=0): """ Get device temperature reading. """ results = self.get_temperatures(sensors=[sensor,]) if format == 'celsius': return results[sensor]['temperature_c'] elif format == 'fahrenheit': return results[sensor]['temperature_f'] elif format == 'millicelsius': return results[sensor]['temperature_mc'] else: raise ValueError("Unknown format")
def get_temperatures(self, sensors=None): """ Get device temperature reading. Params: - sensors: optional list of sensors to get a reading for, examples: [0,] - get reading for sensor 0 [0, 1,] - get reading for sensors 0 and 1 None - get readings for all sensors """ _sensors = sensors if _sensors is None: _sensors = list(range(0, self._sensor_count)) if not set(_sensors).issubset(list(range(0, self._sensor_count))): raise ValueError( 'Some or all of the sensors in the list %s are out of range ' 'given a sensor_count of %d. Valid range: %s' % ( _sensors, self._sensor_count, list(range(0, self._sensor_count)), ) ) data = self.get_data() data = data['temp_data'] results = {} # Interpret device response for sensor in _sensors: offset = self.lookup_offset(sensor) celsius = struct.unpack_from('>h', data, offset)[0] / 256.0 # Apply scaling and offset (if any) celsius = celsius * self._scale + self._offset results[sensor] = { 'ports': self.get_ports(), 'bus': self.get_bus(), 'sensor': sensor, 'temperature_f': celsius * 1.8 + 32.0, 'temperature_c': celsius, 'temperature_mc': celsius * 1000, 'temperature_k': celsius + 273.15, } return results
def get_humidity(self, sensors=None): """ Get device humidity reading. Params: - sensors: optional list of sensors to get a reading for, examples: [0,] - get reading for sensor 0 [0, 1,] - get reading for sensors 0 and 1 None - get readings for all sensors """ _sensors = sensors if _sensors is None: _sensors = list(range(0, self._sensor_count)) if not set(_sensors).issubset(list(range(0, self._sensor_count))): raise ValueError( 'Some or all of the sensors in the list %s are out of range ' 'given a sensor_count of %d. Valid range: %s' % ( _sensors, self._sensor_count, list(range(0, self._sensor_count)), ) ) data = self.get_data() data = data['humidity_data'] results = {} # Interpret device response for sensor in _sensors: offset = self.lookup_humidity_offset(sensor) if offset is None: continue humidity = (struct.unpack_from('>H', data, offset)[0] * 32) / 1000.0 results[sensor] = { 'ports': self.get_ports(), 'bus': self.get_bus(), 'sensor': sensor, 'humidity_pc': humidity, } return results
def _control_transfer(self, data): """ Send device a control request with standard parameters and <data> as payload. """ LOGGER.debug('Ctrl transfer: %r', data) self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09, wValue=0x0200, wIndex=0x01, data_or_wLength=data, timeout=TIMEOUT)
def _interrupt_read(self): """ Read data from device. """ data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT) LOGGER.debug('Read data: %r', data) return data
def __check_looks_like_uri(self, uri): """Checks the URI looks like a RAW uri in github: - 'https://raw.githubusercontent.com/github/hubot/master/README.md' - 'https://github.com/github/hubot/raw/master/README.md' :param uri: uri of the file """ if uri.split('/')[2] == 'raw.githubusercontent.com': return True elif uri.split('/')[2] == 'github.com': if uri.split('/')[5] == 'raw': return True else: raise GithubFileNotFound('URI %s is not a valid link to a raw file in Github' % uri)
def read_file_from_uri(self, uri): """Reads the file from Github :param uri: URI of the Github raw File :returns: UTF-8 text with the content """ logger.debug("Reading %s" % (uri)) self.__check_looks_like_uri(uri) try: req = urllib.request.Request(uri) req.add_header('Authorization', 'token %s' % self.token) r = urllib.request.urlopen(req) except urllib.error.HTTPError as err: if err.code == 404: raise GithubFileNotFound('File %s is not available. Check the URL to ensure it really exists' % uri) else: raise return r.read().decode("utf-8")
def measure_memory(cls, obj, seen=None): """Recursively finds size of objects""" size = sys.getsizeof(obj) if seen is None: seen = set() obj_id = id(obj) if obj_id in seen: return 0 # Important mark as seen *before* entering recursion to gracefully handle # self-referential objects seen.add(obj_id) if isinstance(obj, dict): size += sum([cls.measure_memory(v, seen) for v in obj.values()]) size += sum([cls.measure_memory(k, seen) for k in obj.keys()]) elif hasattr(obj, '__dict__'): size += cls.measure_memory(obj.__dict__, seen) elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): size += sum([cls.measure_memory(i, seen) for i in obj]) return size
def __feed_arthur(self): """ Feed Ocean with backend data collected from arthur redis queue""" with self.ARTHUR_FEED_LOCK: # This is a expensive operation so don't do it always if (time.time() - self.ARTHUR_LAST_MEMORY_CHECK) > 5 * self.ARTHUR_LAST_MEMORY_CHECK_TIME: self.ARTHUR_LAST_MEMORY_CHECK = time.time() logger.debug("Measuring the memory used by the raw items dict ...") try: memory_size = self.measure_memory(self.arthur_items) / (1024 * 1024) except RuntimeError as ex: # During memory usage measure, other thread could change the dict logger.warning("Can't get the memory used by the raw items dict: %s", ex) memory_size = self.ARTHUR_LAST_MEMORY_SIZE self.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECK logger.debug("Arthur items memory size: %0.2f MB (%is to check)", memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME) self.ARTHUR_LAST_MEMORY_SIZE = memory_size # Don't feed items from redis if the current python dict is # larger than ARTHUR_MAX_MEMORY_SIZE if self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE: logger.debug("Items queue full. Not collecting items from redis queue.") return logger.info("Collecting items from redis queue") db_url = self.config.get_conf()['es_collection']['redis_url'] conn = redis.StrictRedis.from_url(db_url) logger.debug("Redis connection stablished with %s.", db_url) # Get and remove queued items in an atomic transaction pipe = conn.pipeline() # pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.lrange(Q_STORAGE_ITEMS, 0, self.ARTHUR_REDIS_ITEMS - 1) pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -1) items = pipe.execute()[0] for item in items: arthur_item = pickle.loads(item) if arthur_item['tag'] not in self.arthur_items: self.arthur_items[arthur_item['tag']] = [] self.arthur_items[arthur_item['tag']].append(arthur_item) for tag in self.arthur_items: if self.arthur_items[tag]: logger.debug("Arthur items for %s: %i", tag, len(self.arthur_items[tag]))
def __feed_backend_arthur(self, repo): """ Feed Ocean with backend data collected from arthur redis queue""" # Always get pending items from arthur for all data sources self.__feed_arthur() tag = self.backend_tag(repo) logger.debug("Arthur items available for %s", self.arthur_items.keys()) logger.debug("Getting arthur items for %s.", tag) if tag in self.arthur_items: logger.debug("Found items for %s.", tag) while self.arthur_items[tag]: yield self.arthur_items[tag].pop()
def __create_arthur_json(self, repo, backend_args): """ Create the JSON for configuring arthur to collect data https://github.com/grimoirelab/arthur#adding-tasks Sample for git: { "tasks": [ { "task_id": "arthur.git", "backend": "git", "backend_args": { "gitpath": "/tmp/arthur_git/", "uri": "https://github.com/grimoirelab/arthur.git" }, "category": "commit", "archive_args": { "archive_path": '/tmp/test_archives', "fetch_from_archive": false, "archive_after": None }, "scheduler_args": { "delay": 10 } } ] } """ backend_args = self._compose_arthur_params(self.backend_section, repo) if self.backend_section == 'git': backend_args['gitpath'] = os.path.join(self.REPOSITORY_DIR, repo) backend_args['tag'] = self.backend_tag(repo) ajson = {"tasks": [{}]} # This is the perceval tag ajson["tasks"][0]['task_id'] = self.backend_tag(repo) ajson["tasks"][0]['backend'] = self.backend_section.split(":")[0] ajson["tasks"][0]['backend_args'] = backend_args ajson["tasks"][0]['category'] = backend_args['category'] ajson["tasks"][0]['archive'] = {} ajson["tasks"][0]['scheduler'] = {"delay": self.ARTHUR_TASK_DELAY} # from-date or offset param must be added es_col_url = self._get_collection_url() es_index = self.conf[self.backend_section]['raw_index'] # Get the last activity for the data source es = ElasticSearch(es_col_url, es_index) connector = get_connector_from_name(self.backend_section) klass = connector[0] # Backend for the connector signature = inspect.signature(klass.fetch) last_activity = None filter_ = {"name": "tag", "value": backend_args['tag']} if 'from_date' in signature.parameters: last_activity = es.get_last_item_field('metadata__updated_on', [filter_]) if last_activity: ajson["tasks"][0]['backend_args']['from_date'] = last_activity.isoformat() elif 'offset' in signature.parameters: last_activity = es.get_last_item_field('offset', [filter_]) if last_activity: ajson["tasks"][0]['backend_args']['offset'] = last_activity if last_activity: logging.info("Getting raw item with arthur since %s", last_activity) return(ajson)
def sha_github_file(cls, config, repo_file, repository_api, repository_branch): """ Return the GitHub SHA for a file in the repository """ repo_file_sha = None cfg = config.get_conf() github_token = cfg['sortinghat']['identities_api_token'] headers = {"Authorization": "token " + github_token} url_dir = repository_api + "/git/trees/" + repository_branch logger.debug("Gettting sha data from tree: %s", url_dir) raw_repo_file_info = requests.get(url_dir, headers=headers) raw_repo_file_info.raise_for_status() for rfile in raw_repo_file_info.json()['tree']: if rfile['path'] == repo_file: logger.debug("SHA found: %s, ", rfile["sha"]) repo_file_sha = rfile["sha"] break return repo_file_sha
def __get_uuids_from_profile_name(self, profile_name): """ Get the uuid for a profile name """ uuids = [] with self.db.connect() as session: query = session.query(Profile).\ filter(Profile.name == profile_name) profiles = query.all() if profiles: for p in profiles: uuids.append(p.uuid) return uuids
def micro_mordred(cfg_path, backend_sections, raw, arthur, identities, enrich, panels): """Execute the raw and/or the enrich phases of a given backend section defined in a Mordred configuration file. :param cfg_path: the path of a Mordred configuration file :param backend_sections: the backend sections where the raw and/or enrich phases will be executed :param raw: if true, it activates the collection of raw data :param arthur: if true, it enables Arthur to collect the raw data :param identities: if true, it activates the identities merge in SortingHat :param enrich: if true, it activates the collection of enrich data :param panels: if true, it activates the upload of panels """ config = Config(cfg_path) if raw: for backend in backend_sections: get_raw(config, backend, arthur) if identities: get_identities(config) if enrich: for backend in backend_sections: get_enrich(config, backend) if panels: get_panels(config)
def get_raw(config, backend_section, arthur): """Execute the raw phase for a given backend section, optionally using Arthur :param config: a Mordred config object :param backend_section: the backend section where the raw phase is executed :param arthur: if true, it enables Arthur to collect the raw data """ if arthur: task = TaskRawDataArthurCollection(config, backend_section=backend_section) else: task = TaskRawDataCollection(config, backend_section=backend_section) TaskProjects(config).execute() try: task.execute() logging.info("Loading raw data finished!") except Exception as e: logging.error(str(e)) sys.exit(-1)
def get_identities(config): """Execute the merge identities phase :param config: a Mordred config object """ TaskProjects(config).execute() task = TaskIdentitiesMerge(config) task.execute() logging.info("Merging identities finished!")