id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
241,300
fictorial/filesysdb
filesysdb/__init__.py
_update_indexes_for_mutated_object
def _update_indexes_for_mutated_object(collection, obj): """If an object is updated, this will simply remove it and re-add it to the indexes defined on the collection.""" for index in _db[collection].indexes.values(): _remove_from_index(index, obj) _add_to_index(index, obj)
python
def _update_indexes_for_mutated_object(collection, obj): """If an object is updated, this will simply remove it and re-add it to the indexes defined on the collection.""" for index in _db[collection].indexes.values(): _remove_from_index(index, obj) _add_to_index(index, obj)
[ "def", "_update_indexes_for_mutated_object", "(", "collection", ",", "obj", ")", ":", "for", "index", "in", "_db", "[", "collection", "]", ".", "indexes", ".", "values", "(", ")", ":", "_remove_from_index", "(", "index", ",", "obj", ")", "_add_to_index", "(", "index", ",", "obj", ")" ]
If an object is updated, this will simply remove it and re-add it to the indexes defined on the collection.
[ "If", "an", "object", "is", "updated", "this", "will", "simply", "remove", "it", "and", "re", "-", "add", "it", "to", "the", "indexes", "defined", "on", "the", "collection", "." ]
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L276-L282
241,301
fictorial/filesysdb
filesysdb/__init__.py
_update_indexes_for_deleted_object
def _update_indexes_for_deleted_object(collection, obj): """If an object is deleted, it should no longer be indexed so this removes the object from all indexes on the given collection.""" for index in _db[collection].indexes.values(): _remove_from_index(index, obj)
python
def _update_indexes_for_deleted_object(collection, obj): """If an object is deleted, it should no longer be indexed so this removes the object from all indexes on the given collection.""" for index in _db[collection].indexes.values(): _remove_from_index(index, obj)
[ "def", "_update_indexes_for_deleted_object", "(", "collection", ",", "obj", ")", ":", "for", "index", "in", "_db", "[", "collection", "]", ".", "indexes", ".", "values", "(", ")", ":", "_remove_from_index", "(", "index", ",", "obj", ")" ]
If an object is deleted, it should no longer be indexed so this removes the object from all indexes on the given collection.
[ "If", "an", "object", "is", "deleted", "it", "should", "no", "longer", "be", "indexed", "so", "this", "removes", "the", "object", "from", "all", "indexes", "on", "the", "given", "collection", "." ]
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L285-L290
241,302
JNRowe/jnrbase
jnrbase/iso_8601.py
parse_delta
def parse_delta(__string: str) -> datetime.timedelta: """Parse ISO-8601 duration string. Args: __string: Duration string to parse Returns: Parsed delta object """ if not __string: return datetime.timedelta(0) match = re.fullmatch(r""" P ((?P<days>\d+)D)? T? ((?P<hours>\d{1,2})H)? ((?P<minutes>\d{1,2})M)? ((?P<seconds>\d{1,2})?((?:\.(?P<microseconds>\d+))?S)?) """, __string, re.VERBOSE) if not match: raise ValueError('Unable to parse delta {!r}'.format(__string)) match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()} return datetime.timedelta(**match_dict)
python
def parse_delta(__string: str) -> datetime.timedelta: """Parse ISO-8601 duration string. Args: __string: Duration string to parse Returns: Parsed delta object """ if not __string: return datetime.timedelta(0) match = re.fullmatch(r""" P ((?P<days>\d+)D)? T? ((?P<hours>\d{1,2})H)? ((?P<minutes>\d{1,2})M)? ((?P<seconds>\d{1,2})?((?:\.(?P<microseconds>\d+))?S)?) """, __string, re.VERBOSE) if not match: raise ValueError('Unable to parse delta {!r}'.format(__string)) match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()} return datetime.timedelta(**match_dict)
[ "def", "parse_delta", "(", "__string", ":", "str", ")", "->", "datetime", ".", "timedelta", ":", "if", "not", "__string", ":", "return", "datetime", ".", "timedelta", "(", "0", ")", "match", "=", "re", ".", "fullmatch", "(", "r\"\"\"\n P\n ((?P<days>\\d+)D)?\n T?\n ((?P<hours>\\d{1,2})H)?\n ((?P<minutes>\\d{1,2})M)?\n ((?P<seconds>\\d{1,2})?((?:\\.(?P<microseconds>\\d+))?S)?)\n \"\"\"", ",", "__string", ",", "re", ".", "VERBOSE", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "'Unable to parse delta {!r}'", ".", "format", "(", "__string", ")", ")", "match_dict", "=", "{", "k", ":", "int", "(", "v", ")", "if", "v", "else", "0", "for", "k", ",", "v", "in", "match", ".", "groupdict", "(", ")", ".", "items", "(", ")", "}", "return", "datetime", ".", "timedelta", "(", "*", "*", "match_dict", ")" ]
Parse ISO-8601 duration string. Args: __string: Duration string to parse Returns: Parsed delta object
[ "Parse", "ISO", "-", "8601", "duration", "string", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/iso_8601.py#L27-L48
241,303
JNRowe/jnrbase
jnrbase/iso_8601.py
format_delta
def format_delta(__timedelta: datetime.timedelta) -> str: """Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration """ if __timedelta == datetime.timedelta(0): return '' days_s = '{}D'.format(__timedelta.days) if __timedelta.days else '' hours, minutes = divmod(__timedelta.seconds, 3600) minutes, seconds = divmod(minutes, 60) hours_s = '{:02d}H'.format(hours) if hours else '' minutes_s = '{:02d}M'.format(minutes) if minutes else '' seconds_s = '{:02d}S'.format(seconds) if seconds else '' return 'P{}{}{}{}{}'.format(days_s, 'T' if hours or minutes or seconds else '', hours_s, minutes_s, seconds_s)
python
def format_delta(__timedelta: datetime.timedelta) -> str: """Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration """ if __timedelta == datetime.timedelta(0): return '' days_s = '{}D'.format(__timedelta.days) if __timedelta.days else '' hours, minutes = divmod(__timedelta.seconds, 3600) minutes, seconds = divmod(minutes, 60) hours_s = '{:02d}H'.format(hours) if hours else '' minutes_s = '{:02d}M'.format(minutes) if minutes else '' seconds_s = '{:02d}S'.format(seconds) if seconds else '' return 'P{}{}{}{}{}'.format(days_s, 'T' if hours or minutes or seconds else '', hours_s, minutes_s, seconds_s)
[ "def", "format_delta", "(", "__timedelta", ":", "datetime", ".", "timedelta", ")", "->", "str", ":", "if", "__timedelta", "==", "datetime", ".", "timedelta", "(", "0", ")", ":", "return", "''", "days_s", "=", "'{}D'", ".", "format", "(", "__timedelta", ".", "days", ")", "if", "__timedelta", ".", "days", "else", "''", "hours", ",", "minutes", "=", "divmod", "(", "__timedelta", ".", "seconds", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "minutes", ",", "60", ")", "hours_s", "=", "'{:02d}H'", ".", "format", "(", "hours", ")", "if", "hours", "else", "''", "minutes_s", "=", "'{:02d}M'", ".", "format", "(", "minutes", ")", "if", "minutes", "else", "''", "seconds_s", "=", "'{:02d}S'", ".", "format", "(", "seconds", ")", "if", "seconds", "else", "''", "return", "'P{}{}{}{}{}'", ".", "format", "(", "days_s", ",", "'T'", "if", "hours", "or", "minutes", "or", "seconds", "else", "''", ",", "hours_s", ",", "minutes_s", ",", "seconds_s", ")" ]
Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration
[ "Format", "ISO", "-", "8601", "duration", "string", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/iso_8601.py#L51-L69
241,304
JNRowe/jnrbase
jnrbase/iso_8601.py
parse_datetime
def parse_datetime(__string: str) -> datetime.datetime: """Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object """ if not __string: datetime_ = datetime.datetime.now(datetime.timezone.utc) else: # pylint: disable=no-member datetime_ = ciso8601.parse_datetime(__string) if datetime_.tzinfo is None: datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc) return datetime_
python
def parse_datetime(__string: str) -> datetime.datetime: """Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object """ if not __string: datetime_ = datetime.datetime.now(datetime.timezone.utc) else: # pylint: disable=no-member datetime_ = ciso8601.parse_datetime(__string) if datetime_.tzinfo is None: datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc) return datetime_
[ "def", "parse_datetime", "(", "__string", ":", "str", ")", "->", "datetime", ".", "datetime", ":", "if", "not", "__string", ":", "datetime_", "=", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", "else", ":", "# pylint: disable=no-member", "datetime_", "=", "ciso8601", ".", "parse_datetime", "(", "__string", ")", "if", "datetime_", ".", "tzinfo", "is", "None", ":", "datetime_", "=", "datetime_", ".", "replace", "(", "tzinfo", "=", "datetime", ".", "timezone", ".", "utc", ")", "return", "datetime_" ]
Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object
[ "Parse", "ISO", "-", "8601", "datetime", "string", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/iso_8601.py#L72-L87
241,305
shreyaspotnis/rampage
rampage/daq/daq.py
print_device_info
def print_device_info(dev_name): """Prints information about the given device. Usage: print_device_info("Dev1") """ string_buffer = ctypes.create_string_buffer(1024) attributes = [pydaq.DAQmx_Dev_ProductType, pydaq.DAQmx_Dev_SerialNum, pydaq.DAQmx_Dev_AO_PhysicalChans, pydaq.DAQmx_Dev_CI_PhysicalChans, pydaq.DAQmx_Dev_CO_PhysicalChans, pydaq.DAQmx_Dev_DO_Lines] attribute_names = ['DAQmx_Dev_ProductType', 'DAQmx_Dev_SerialNum', 'DAQmx_Dev_AO_PhysicalChans', 'DAQmx_Dev_CI_PhysicalChans', 'DAQmx_Dev_CO_PhysicalChans', 'DAQmx_Dev_DO_Lines'] ret_values = [] for a in attributes: pydaq.DAQmxGetDeviceAttribute(dev_name, a, string_buffer) ret_values.append(str(string_buffer.value)) print('Device Name:\t' + dev_name) for n, v in zip(attribute_names, ret_values): print '\t' + n + ':\t' + v
python
def print_device_info(dev_name): """Prints information about the given device. Usage: print_device_info("Dev1") """ string_buffer = ctypes.create_string_buffer(1024) attributes = [pydaq.DAQmx_Dev_ProductType, pydaq.DAQmx_Dev_SerialNum, pydaq.DAQmx_Dev_AO_PhysicalChans, pydaq.DAQmx_Dev_CI_PhysicalChans, pydaq.DAQmx_Dev_CO_PhysicalChans, pydaq.DAQmx_Dev_DO_Lines] attribute_names = ['DAQmx_Dev_ProductType', 'DAQmx_Dev_SerialNum', 'DAQmx_Dev_AO_PhysicalChans', 'DAQmx_Dev_CI_PhysicalChans', 'DAQmx_Dev_CO_PhysicalChans', 'DAQmx_Dev_DO_Lines'] ret_values = [] for a in attributes: pydaq.DAQmxGetDeviceAttribute(dev_name, a, string_buffer) ret_values.append(str(string_buffer.value)) print('Device Name:\t' + dev_name) for n, v in zip(attribute_names, ret_values): print '\t' + n + ':\t' + v
[ "def", "print_device_info", "(", "dev_name", ")", ":", "string_buffer", "=", "ctypes", ".", "create_string_buffer", "(", "1024", ")", "attributes", "=", "[", "pydaq", ".", "DAQmx_Dev_ProductType", ",", "pydaq", ".", "DAQmx_Dev_SerialNum", ",", "pydaq", ".", "DAQmx_Dev_AO_PhysicalChans", ",", "pydaq", ".", "DAQmx_Dev_CI_PhysicalChans", ",", "pydaq", ".", "DAQmx_Dev_CO_PhysicalChans", ",", "pydaq", ".", "DAQmx_Dev_DO_Lines", "]", "attribute_names", "=", "[", "'DAQmx_Dev_ProductType'", ",", "'DAQmx_Dev_SerialNum'", ",", "'DAQmx_Dev_AO_PhysicalChans'", ",", "'DAQmx_Dev_CI_PhysicalChans'", ",", "'DAQmx_Dev_CO_PhysicalChans'", ",", "'DAQmx_Dev_DO_Lines'", "]", "ret_values", "=", "[", "]", "for", "a", "in", "attributes", ":", "pydaq", ".", "DAQmxGetDeviceAttribute", "(", "dev_name", ",", "a", ",", "string_buffer", ")", "ret_values", ".", "append", "(", "str", "(", "string_buffer", ".", "value", ")", ")", "print", "(", "'Device Name:\\t'", "+", "dev_name", ")", "for", "n", ",", "v", "in", "zip", "(", "attribute_names", ",", "ret_values", ")", ":", "print", "'\\t'", "+", "n", "+", "':\\t'", "+", "v" ]
Prints information about the given device. Usage: print_device_info("Dev1")
[ "Prints", "information", "about", "the", "given", "device", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L28-L53
241,306
shreyaspotnis/rampage
rampage/daq/daq.py
get_device_name_list
def get_device_name_list(): """Returns a list of device names installed.""" dev_names = ctypes.create_string_buffer(1024) pydaq.DAQmxGetSysDevNames(dev_names, len(dev_names)) return dev_names.value.split(', ')
python
def get_device_name_list(): """Returns a list of device names installed.""" dev_names = ctypes.create_string_buffer(1024) pydaq.DAQmxGetSysDevNames(dev_names, len(dev_names)) return dev_names.value.split(', ')
[ "def", "get_device_name_list", "(", ")", ":", "dev_names", "=", "ctypes", ".", "create_string_buffer", "(", "1024", ")", "pydaq", ".", "DAQmxGetSysDevNames", "(", "dev_names", ",", "len", "(", "dev_names", ")", ")", "return", "dev_names", ".", "value", ".", "split", "(", "', '", ")" ]
Returns a list of device names installed.
[ "Returns", "a", "list", "of", "device", "names", "installed", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L56-L60
241,307
shreyaspotnis/rampage
rampage/daq/daq.py
reset_analog_sample_clock
def reset_analog_sample_clock(state=False): """Reset the clock line. Use this just before starting a run to avoid timing issues. """ set_digital_line_state(expt_settings.dev1_clock_out_name, state) set_digital_line_state(expt_settings.dev2_clock_out_name, state) set_digital_line_state(expt_settings.dev3_clock_out_name, state) set_digital_line_state(expt_settings.dev4_clock_out_name, state)
python
def reset_analog_sample_clock(state=False): """Reset the clock line. Use this just before starting a run to avoid timing issues. """ set_digital_line_state(expt_settings.dev1_clock_out_name, state) set_digital_line_state(expt_settings.dev2_clock_out_name, state) set_digital_line_state(expt_settings.dev3_clock_out_name, state) set_digital_line_state(expt_settings.dev4_clock_out_name, state)
[ "def", "reset_analog_sample_clock", "(", "state", "=", "False", ")", ":", "set_digital_line_state", "(", "expt_settings", ".", "dev1_clock_out_name", ",", "state", ")", "set_digital_line_state", "(", "expt_settings", ".", "dev2_clock_out_name", ",", "state", ")", "set_digital_line_state", "(", "expt_settings", ".", "dev3_clock_out_name", ",", "state", ")", "set_digital_line_state", "(", "expt_settings", ".", "dev4_clock_out_name", ",", "state", ")" ]
Reset the clock line. Use this just before starting a run to avoid timing issues.
[ "Reset", "the", "clock", "line", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L69-L77
241,308
shreyaspotnis/rampage
rampage/daq/daq.py
set_digital_line_state
def set_digital_line_state(line_name, state): """Set the state of a single digital line. line_name (str) - The physical name of the line. e.g line_name="Dev1/port0/line3" This should be a single digital line. Specifying more than one would result in unexpected behaviour. For example "Dev1/port0/line0:5" is not allowed. see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/ for details of naming lines. state (bool) - state=True sets the line to high, state=False sets to low. """ # get the line number from the line name. Thats the number of bits to shift bits_to_shift = int(line_name.split('line')[-1]) dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift) # Note here that the number of samples written here are 2, which is the # minimum required for a buffered write. If we configure a timing for the # write, it is considered buffered. # see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/ DigitalOutputTask(line_name, dig_data).StartAndWait()
python
def set_digital_line_state(line_name, state): """Set the state of a single digital line. line_name (str) - The physical name of the line. e.g line_name="Dev1/port0/line3" This should be a single digital line. Specifying more than one would result in unexpected behaviour. For example "Dev1/port0/line0:5" is not allowed. see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/ for details of naming lines. state (bool) - state=True sets the line to high, state=False sets to low. """ # get the line number from the line name. Thats the number of bits to shift bits_to_shift = int(line_name.split('line')[-1]) dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift) # Note here that the number of samples written here are 2, which is the # minimum required for a buffered write. If we configure a timing for the # write, it is considered buffered. # see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/ DigitalOutputTask(line_name, dig_data).StartAndWait()
[ "def", "set_digital_line_state", "(", "line_name", ",", "state", ")", ":", "# get the line number from the line name. Thats the number of bits to shift", "bits_to_shift", "=", "int", "(", "line_name", ".", "split", "(", "'line'", ")", "[", "-", "1", "]", ")", "dig_data", "=", "np", ".", "ones", "(", "2", ",", "dtype", "=", "\"uint32\"", ")", "*", "bool", "(", "state", ")", "*", "(", "2", "**", "bits_to_shift", ")", "# Note here that the number of samples written here are 2, which is the", "# minimum required for a buffered write. If we configure a timing for the", "# write, it is considered buffered.", "# see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/", "DigitalOutputTask", "(", "line_name", ",", "dig_data", ")", ".", "StartAndWait", "(", ")" ]
Set the state of a single digital line. line_name (str) - The physical name of the line. e.g line_name="Dev1/port0/line3" This should be a single digital line. Specifying more than one would result in unexpected behaviour. For example "Dev1/port0/line0:5" is not allowed. see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/ for details of naming lines. state (bool) - state=True sets the line to high, state=False sets to low.
[ "Set", "the", "state", "of", "a", "single", "digital", "line", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L80-L100
241,309
shreyaspotnis/rampage
rampage/daq/daq.py
DigitalOutputTask.StartAndWait
def StartAndWait(self): """Starts the task and waits until it is done.""" self.StartTask() self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely) self.ClearTask()
python
def StartAndWait(self): """Starts the task and waits until it is done.""" self.StartTask() self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely) self.ClearTask()
[ "def", "StartAndWait", "(", "self", ")", ":", "self", ".", "StartTask", "(", ")", "self", ".", "WaitUntilTaskDone", "(", "pydaq", ".", "DAQmx_Val_WaitInfinitely", ")", "self", ".", "ClearTask", "(", ")" ]
Starts the task and waits until it is done.
[ "Starts", "the", "task", "and", "waits", "until", "it", "is", "done", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L164-L168
241,310
shreyaspotnis/rampage
rampage/daq/daq.py
DigitalOutputTask.isDone
def isDone(self): """Returns true if task is done.""" done = pydaq.bool32() self.IsTaskDone(ctypes.byref(done)) return done.value
python
def isDone(self): """Returns true if task is done.""" done = pydaq.bool32() self.IsTaskDone(ctypes.byref(done)) return done.value
[ "def", "isDone", "(", "self", ")", ":", "done", "=", "pydaq", ".", "bool32", "(", ")", "self", ".", "IsTaskDone", "(", "ctypes", ".", "byref", "(", "done", ")", ")", "return", "done", ".", "value" ]
Returns true if task is done.
[ "Returns", "true", "if", "task", "is", "done", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L170-L174
241,311
shreyaspotnis/rampage
rampage/daq/daq.py
DigitalOutputTaskWithCallbacks.padDigitalData
def padDigitalData(self, dig_data, n): """Pad dig_data with its last element so that the new array is a multiple of n. """ n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data # no need of padding else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
python
def padDigitalData(self, dig_data, n): """Pad dig_data with its last element so that the new array is a multiple of n. """ n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data # no need of padding else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
[ "def", "padDigitalData", "(", "self", ",", "dig_data", ",", "n", ")", ":", "n", "=", "int", "(", "n", ")", "l0", "=", "len", "(", "dig_data", ")", "if", "l0", "%", "n", "==", "0", ":", "return", "dig_data", "# no need of padding", "else", ":", "ladd", "=", "n", "-", "(", "l0", "%", "n", ")", "dig_data_add", "=", "np", ".", "zeros", "(", "ladd", ",", "dtype", "=", "\"uint32\"", ")", "dig_data_add", ".", "fill", "(", "dig_data", "[", "-", "1", "]", ")", "return", "np", ".", "concatenate", "(", "(", "dig_data", ",", "dig_data_add", ")", ")" ]
Pad dig_data with its last element so that the new array is a multiple of n.
[ "Pad", "dig_data", "with", "its", "last", "element", "so", "that", "the", "new", "array", "is", "a", "multiple", "of", "n", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L255-L267
241,312
shreyaspotnis/rampage
rampage/daq/daq.py
DigitalOutputTaskWithCallbacks.EveryNCallback
def EveryNCallback(self): """Called by PyDAQmx whenever a callback event occurs.""" # print('ncall ', self.n_callbacks) if self.do_callbacks: if self.n_callbacks >= self.callback_step: # print('n_callbacks', self.n_callbacks) for func, func_dict in self.callback_funcs: func(func_dict) print('func:::', func) self.latest_callback_index +=1 if self.latest_callback_index >= len(self.callback_function_list): # print('done with callbacks') self.do_callbacks = False else: out = self.callback_function_list[self.latest_callback_index] callback_time = out[0] self.callback_step = int(callback_time/expt_settings.callback_resolution) # print('updatin callback step', self.callback_step) self.callback_funcs = out[1] self.n_callbacks += 1 #print('n_callbacks', self.n_callbacks) return 0
python
def EveryNCallback(self): """Called by PyDAQmx whenever a callback event occurs.""" # print('ncall ', self.n_callbacks) if self.do_callbacks: if self.n_callbacks >= self.callback_step: # print('n_callbacks', self.n_callbacks) for func, func_dict in self.callback_funcs: func(func_dict) print('func:::', func) self.latest_callback_index +=1 if self.latest_callback_index >= len(self.callback_function_list): # print('done with callbacks') self.do_callbacks = False else: out = self.callback_function_list[self.latest_callback_index] callback_time = out[0] self.callback_step = int(callback_time/expt_settings.callback_resolution) # print('updatin callback step', self.callback_step) self.callback_funcs = out[1] self.n_callbacks += 1 #print('n_callbacks', self.n_callbacks) return 0
[ "def", "EveryNCallback", "(", "self", ")", ":", "# print('ncall ', self.n_callbacks)", "if", "self", ".", "do_callbacks", ":", "if", "self", ".", "n_callbacks", ">=", "self", ".", "callback_step", ":", "# print('n_callbacks', self.n_callbacks)", "for", "func", ",", "func_dict", "in", "self", ".", "callback_funcs", ":", "func", "(", "func_dict", ")", "print", "(", "'func:::'", ",", "func", ")", "self", ".", "latest_callback_index", "+=", "1", "if", "self", ".", "latest_callback_index", ">=", "len", "(", "self", ".", "callback_function_list", ")", ":", "# print('done with callbacks')", "self", ".", "do_callbacks", "=", "False", "else", ":", "out", "=", "self", ".", "callback_function_list", "[", "self", ".", "latest_callback_index", "]", "callback_time", "=", "out", "[", "0", "]", "self", ".", "callback_step", "=", "int", "(", "callback_time", "/", "expt_settings", ".", "callback_resolution", ")", "# print('updatin callback step', self.callback_step)", "self", ".", "callback_funcs", "=", "out", "[", "1", "]", "self", ".", "n_callbacks", "+=", "1", "#print('n_callbacks', self.n_callbacks)", "return", "0" ]
Called by PyDAQmx whenever a callback event occurs.
[ "Called", "by", "PyDAQmx", "whenever", "a", "callback", "event", "occurs", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L269-L292
241,313
bsvetchine/django-payzen
django_payzen/tools.py
get_vads_trans_id
def get_vads_trans_id(vads_site_id, vads_trans_date): """ Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.""" vads_trans_id = "" for i in range(0, 6): vads_trans_id += str(random.randint(0, 9)) return vads_trans_id
python
def get_vads_trans_id(vads_site_id, vads_trans_date): """ Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.""" vads_trans_id = "" for i in range(0, 6): vads_trans_id += str(random.randint(0, 9)) return vads_trans_id
[ "def", "get_vads_trans_id", "(", "vads_site_id", ",", "vads_trans_date", ")", ":", "vads_trans_id", "=", "\"\"", "for", "i", "in", "range", "(", "0", ",", "6", ")", ":", "vads_trans_id", "+=", "str", "(", "random", ".", "randint", "(", "0", ",", "9", ")", ")", "return", "vads_trans_id" ]
Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.
[ "Returns", "a", "default", "value", "for", "vads_trans_id", "field", "." ]
944c3026120151495310cb1eb3c6370dc2db3db9
https://github.com/bsvetchine/django-payzen/blob/944c3026120151495310cb1eb3c6370dc2db3db9/django_payzen/tools.py#L12-L26
241,314
bsvetchine/django-payzen
django_payzen/tools.py
get_signature
def get_signature(payment_request): """ Returns the signature for the transaction. To compute the signature, first you have to get the value of all the fields that starts by 'vads_', ordering them alphabetically. All the values are separated by the '+' character. Then you add the value of the payzen certificate. Finaly you hash the string using sha1.""" vads_args = {} for field in payment_request._meta.fields: if field.name[:5] == 'vads_': field_value = field.value_from_object(payment_request) if field_value: vads_args.update({ field.name: field_value }) base_str = '' for key in sorted(vads_args): base_str += str(vads_args[key]) + '+' base_str += app_settings.VADS_CERTIFICATE return hashlib.sha1(base_str.encode("utf-8")).hexdigest()
python
def get_signature(payment_request): """ Returns the signature for the transaction. To compute the signature, first you have to get the value of all the fields that starts by 'vads_', ordering them alphabetically. All the values are separated by the '+' character. Then you add the value of the payzen certificate. Finaly you hash the string using sha1.""" vads_args = {} for field in payment_request._meta.fields: if field.name[:5] == 'vads_': field_value = field.value_from_object(payment_request) if field_value: vads_args.update({ field.name: field_value }) base_str = '' for key in sorted(vads_args): base_str += str(vads_args[key]) + '+' base_str += app_settings.VADS_CERTIFICATE return hashlib.sha1(base_str.encode("utf-8")).hexdigest()
[ "def", "get_signature", "(", "payment_request", ")", ":", "vads_args", "=", "{", "}", "for", "field", "in", "payment_request", ".", "_meta", ".", "fields", ":", "if", "field", ".", "name", "[", ":", "5", "]", "==", "'vads_'", ":", "field_value", "=", "field", ".", "value_from_object", "(", "payment_request", ")", "if", "field_value", ":", "vads_args", ".", "update", "(", "{", "field", ".", "name", ":", "field_value", "}", ")", "base_str", "=", "''", "for", "key", "in", "sorted", "(", "vads_args", ")", ":", "base_str", "+=", "str", "(", "vads_args", "[", "key", "]", ")", "+", "'+'", "base_str", "+=", "app_settings", ".", "VADS_CERTIFICATE", "return", "hashlib", ".", "sha1", "(", "base_str", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")" ]
Returns the signature for the transaction. To compute the signature, first you have to get the value of all the fields that starts by 'vads_', ordering them alphabetically. All the values are separated by the '+' character. Then you add the value of the payzen certificate. Finaly you hash the string using sha1.
[ "Returns", "the", "signature", "for", "the", "transaction", "." ]
944c3026120151495310cb1eb3c6370dc2db3db9
https://github.com/bsvetchine/django-payzen/blob/944c3026120151495310cb1eb3c6370dc2db3db9/django_payzen/tools.py#L60-L82
241,315
bsvetchine/django-payzen
django_payzen/tools.py
process_response
def process_response(data): """Process a payment response.""" # We check if the signature is valid. If not return if not is_signature_valid(data): logger.warning( "Django-Payzen : Response signature detected as invalid", extra={"stack": True} ) return None from . import forms from . import models # The signature is valid vads_trans_id = data.get("vads_trans_id") vads_trans_date = data.get("vads_trans_date") vads_site_id = data.get("vads_site_id") try: instance = models.PaymentResponse.objects.get( vads_trans_id=vads_trans_id, vads_trans_date=vads_trans_date, vads_site_id=vads_site_id) form = forms.PaymentResponseForm(data, instance=instance) except models.PaymentResponse.DoesNotExist: form = forms.PaymentResponseForm(data) if form.is_valid(): response = form.save() logger.info("Django-Payzen : Transaction {} response received !" .format(response.vads_trans_id)) else: logger.error("Django-Payzen : Response could not be saved - {} {}" .format(form.errors, data), extra={"stack": True}) response = None return response
python
def process_response(data): """Process a payment response.""" # We check if the signature is valid. If not return if not is_signature_valid(data): logger.warning( "Django-Payzen : Response signature detected as invalid", extra={"stack": True} ) return None from . import forms from . import models # The signature is valid vads_trans_id = data.get("vads_trans_id") vads_trans_date = data.get("vads_trans_date") vads_site_id = data.get("vads_site_id") try: instance = models.PaymentResponse.objects.get( vads_trans_id=vads_trans_id, vads_trans_date=vads_trans_date, vads_site_id=vads_site_id) form = forms.PaymentResponseForm(data, instance=instance) except models.PaymentResponse.DoesNotExist: form = forms.PaymentResponseForm(data) if form.is_valid(): response = form.save() logger.info("Django-Payzen : Transaction {} response received !" .format(response.vads_trans_id)) else: logger.error("Django-Payzen : Response could not be saved - {} {}" .format(form.errors, data), extra={"stack": True}) response = None return response
[ "def", "process_response", "(", "data", ")", ":", "# We check if the signature is valid. If not return", "if", "not", "is_signature_valid", "(", "data", ")", ":", "logger", ".", "warning", "(", "\"Django-Payzen : Response signature detected as invalid\"", ",", "extra", "=", "{", "\"stack\"", ":", "True", "}", ")", "return", "None", "from", ".", "import", "forms", "from", ".", "import", "models", "# The signature is valid", "vads_trans_id", "=", "data", ".", "get", "(", "\"vads_trans_id\"", ")", "vads_trans_date", "=", "data", ".", "get", "(", "\"vads_trans_date\"", ")", "vads_site_id", "=", "data", ".", "get", "(", "\"vads_site_id\"", ")", "try", ":", "instance", "=", "models", ".", "PaymentResponse", ".", "objects", ".", "get", "(", "vads_trans_id", "=", "vads_trans_id", ",", "vads_trans_date", "=", "vads_trans_date", ",", "vads_site_id", "=", "vads_site_id", ")", "form", "=", "forms", ".", "PaymentResponseForm", "(", "data", ",", "instance", "=", "instance", ")", "except", "models", ".", "PaymentResponse", ".", "DoesNotExist", ":", "form", "=", "forms", ".", "PaymentResponseForm", "(", "data", ")", "if", "form", ".", "is_valid", "(", ")", ":", "response", "=", "form", ".", "save", "(", ")", "logger", ".", "info", "(", "\"Django-Payzen : Transaction {} response received !\"", ".", "format", "(", "response", ".", "vads_trans_id", ")", ")", "else", ":", "logger", ".", "error", "(", "\"Django-Payzen : Response could not be saved - {} {}\"", ".", "format", "(", "form", ".", "errors", ",", "data", ")", ",", "extra", "=", "{", "\"stack\"", ":", "True", "}", ")", "response", "=", "None", "return", "response" ]
Process a payment response.
[ "Process", "a", "payment", "response", "." ]
944c3026120151495310cb1eb3c6370dc2db3db9
https://github.com/bsvetchine/django-payzen/blob/944c3026120151495310cb1eb3c6370dc2db3db9/django_payzen/tools.py#L95-L129
241,316
fpoirotte/sphinxcontrib-varlinks
sphinxcontrib/varlinks.py
LinkSubstitutionPhase1.apply
def apply(self): """Create substitution nodes for hyperlinks""" # In this phase, we look for hyperlinks (references nodes) # that contain substitutions (of the form "|foo|"). # We then add actual "substitution"s nodes to those references, # so that they can be replaced by the substitution processor. subst_re = re.compile(self.subst_pattern) for link in self.document.traverse(self._maybe_hyperlink): if 'refuri' not in link: continue # Note: "target" nodes do not have a "name" attribute. if '|' not in link['refuri'] and '|' not in link.get('name', ''): continue # This list acts as a cache so that only one substitution node # is added as a child for each substitution name. substitutions = [] matches = subst_re.findall(link['refuri']) + \ subst_re.findall(link.get('name', '')) for subref_text in matches: if subref_text in substitutions: continue substitutions.append(subref_text) subref_node = nodes.substitution_reference(subref_text) link.append(subref_node) self.document.note_substitution_ref(subref_node, subref_text) # Build a map of substitutions names to child indices # (minus one since the actual link label is in link[0]). link['varlinks'] = \ dict(zip(substitutions, range(len(substitutions))))
python
def apply(self): """Create substitution nodes for hyperlinks""" # In this phase, we look for hyperlinks (references nodes) # that contain substitutions (of the form "|foo|"). # We then add actual "substitution"s nodes to those references, # so that they can be replaced by the substitution processor. subst_re = re.compile(self.subst_pattern) for link in self.document.traverse(self._maybe_hyperlink): if 'refuri' not in link: continue # Note: "target" nodes do not have a "name" attribute. if '|' not in link['refuri'] and '|' not in link.get('name', ''): continue # This list acts as a cache so that only one substitution node # is added as a child for each substitution name. substitutions = [] matches = subst_re.findall(link['refuri']) + \ subst_re.findall(link.get('name', '')) for subref_text in matches: if subref_text in substitutions: continue substitutions.append(subref_text) subref_node = nodes.substitution_reference(subref_text) link.append(subref_node) self.document.note_substitution_ref(subref_node, subref_text) # Build a map of substitutions names to child indices # (minus one since the actual link label is in link[0]). link['varlinks'] = \ dict(zip(substitutions, range(len(substitutions))))
[ "def", "apply", "(", "self", ")", ":", "# In this phase, we look for hyperlinks (references nodes)", "# that contain substitutions (of the form \"|foo|\").", "# We then add actual \"substitution\"s nodes to those references,", "# so that they can be replaced by the substitution processor.", "subst_re", "=", "re", ".", "compile", "(", "self", ".", "subst_pattern", ")", "for", "link", "in", "self", ".", "document", ".", "traverse", "(", "self", ".", "_maybe_hyperlink", ")", ":", "if", "'refuri'", "not", "in", "link", ":", "continue", "# Note: \"target\" nodes do not have a \"name\" attribute.", "if", "'|'", "not", "in", "link", "[", "'refuri'", "]", "and", "'|'", "not", "in", "link", ".", "get", "(", "'name'", ",", "''", ")", ":", "continue", "# This list acts as a cache so that only one substitution node", "# is added as a child for each substitution name.", "substitutions", "=", "[", "]", "matches", "=", "subst_re", ".", "findall", "(", "link", "[", "'refuri'", "]", ")", "+", "subst_re", ".", "findall", "(", "link", ".", "get", "(", "'name'", ",", "''", ")", ")", "for", "subref_text", "in", "matches", ":", "if", "subref_text", "in", "substitutions", ":", "continue", "substitutions", ".", "append", "(", "subref_text", ")", "subref_node", "=", "nodes", ".", "substitution_reference", "(", "subref_text", ")", "link", ".", "append", "(", "subref_node", ")", "self", ".", "document", ".", "note_substitution_ref", "(", "subref_node", ",", "subref_text", ")", "# Build a map of substitutions names to child indices", "# (minus one since the actual link label is in link[0]).", "link", "[", "'varlinks'", "]", "=", "dict", "(", "zip", "(", "substitutions", ",", "range", "(", "len", "(", "substitutions", ")", ")", ")", ")" ]
Create substitution nodes for hyperlinks
[ "Create", "substitution", "nodes", "for", "hyperlinks" ]
836899486e841fee4bac32a9d57da2786b2045c6
https://github.com/fpoirotte/sphinxcontrib-varlinks/blob/836899486e841fee4bac32a9d57da2786b2045c6/sphinxcontrib/varlinks.py#L36-L70
241,317
fpoirotte/sphinxcontrib-varlinks
sphinxcontrib/varlinks.py
LinkSubstitutionPhase2.apply
def apply(self): """Replace substitutions in hyperlinks with their contents""" # In this phase, we replace the substitutions in hyperlinks # with the contents of the sub-nodes introduced during phase 1. # We also remove those temporary nodes from the tree. subst_re = re.compile(self.subst_pattern) # Apply the substitutions to hyperlink references. for link in self.document.traverse(nodes.reference): substitutions = link.get('varlinks') if not substitutions: continue replacer = self._replace(substitutions, link.children, 1) link['refuri'] = subst_re.sub(replacer, link['refuri']) content = subst_re.sub(replacer, link[0]) # Cleanup the temporary nodes and recreate the node's content. link.clear() del link['varlinks'] link.append(nodes.Text(content)) # Do the same with hyperlink targets. for link in self.document.traverse(nodes.target): substitutions = link.get('varlinks') if not substitutions: continue replacer = self._replace(substitutions, link.children, 0) link['refuri'] = subst_re.sub(replacer, link['refuri']) # Cleanup the temporary nodes. link.clear() del link['varlinks']
python
def apply(self): """Replace substitutions in hyperlinks with their contents""" # In this phase, we replace the substitutions in hyperlinks # with the contents of the sub-nodes introduced during phase 1. # We also remove those temporary nodes from the tree. subst_re = re.compile(self.subst_pattern) # Apply the substitutions to hyperlink references. for link in self.document.traverse(nodes.reference): substitutions = link.get('varlinks') if not substitutions: continue replacer = self._replace(substitutions, link.children, 1) link['refuri'] = subst_re.sub(replacer, link['refuri']) content = subst_re.sub(replacer, link[0]) # Cleanup the temporary nodes and recreate the node's content. link.clear() del link['varlinks'] link.append(nodes.Text(content)) # Do the same with hyperlink targets. for link in self.document.traverse(nodes.target): substitutions = link.get('varlinks') if not substitutions: continue replacer = self._replace(substitutions, link.children, 0) link['refuri'] = subst_re.sub(replacer, link['refuri']) # Cleanup the temporary nodes. link.clear() del link['varlinks']
[ "def", "apply", "(", "self", ")", ":", "# In this phase, we replace the substitutions in hyperlinks", "# with the contents of the sub-nodes introduced during phase 1.", "# We also remove those temporary nodes from the tree.", "subst_re", "=", "re", ".", "compile", "(", "self", ".", "subst_pattern", ")", "# Apply the substitutions to hyperlink references.", "for", "link", "in", "self", ".", "document", ".", "traverse", "(", "nodes", ".", "reference", ")", ":", "substitutions", "=", "link", ".", "get", "(", "'varlinks'", ")", "if", "not", "substitutions", ":", "continue", "replacer", "=", "self", ".", "_replace", "(", "substitutions", ",", "link", ".", "children", ",", "1", ")", "link", "[", "'refuri'", "]", "=", "subst_re", ".", "sub", "(", "replacer", ",", "link", "[", "'refuri'", "]", ")", "content", "=", "subst_re", ".", "sub", "(", "replacer", ",", "link", "[", "0", "]", ")", "# Cleanup the temporary nodes and recreate the node's content.", "link", ".", "clear", "(", ")", "del", "link", "[", "'varlinks'", "]", "link", ".", "append", "(", "nodes", ".", "Text", "(", "content", ")", ")", "# Do the same with hyperlink targets.", "for", "link", "in", "self", ".", "document", ".", "traverse", "(", "nodes", ".", "target", ")", ":", "substitutions", "=", "link", ".", "get", "(", "'varlinks'", ")", "if", "not", "substitutions", ":", "continue", "replacer", "=", "self", ".", "_replace", "(", "substitutions", ",", "link", ".", "children", ",", "0", ")", "link", "[", "'refuri'", "]", "=", "subst_re", ".", "sub", "(", "replacer", ",", "link", "[", "'refuri'", "]", ")", "# Cleanup the temporary nodes.", "link", ".", "clear", "(", ")", "del", "link", "[", "'varlinks'", "]" ]
Replace substitutions in hyperlinks with their contents
[ "Replace", "substitutions", "in", "hyperlinks", "with", "their", "contents" ]
836899486e841fee4bac32a9d57da2786b2045c6
https://github.com/fpoirotte/sphinxcontrib-varlinks/blob/836899486e841fee4bac32a9d57da2786b2045c6/sphinxcontrib/varlinks.py#L92-L123
241,318
diffeo/yakonfig
yakonfig/yakonfig.py
set_global_config
def set_global_config(path_dict_or_stream): '''Set the global configuration. Call this from `main()` with a file system path, stream object, or a dict. Calling it repeatedly with the same path is safe. Calling it with a different path or repeatedly with a stream or dict requires an explicit call to :func:`clear_global_config`. :param path_dict_or_stream: source of configuration ''' path = None mapping = None stream = None global _config_file_path global _config_cache if isinstance(path_dict_or_stream, string_types): path = path_dict_or_stream if _config_file_path and _config_file_path != path: raise Exception('set_global_config(%r) differs from %r, ' 'consider calling clear_global_config first' % (path, _config_file_path)) _config_file_path = path stream = open(path) elif isinstance(path_dict_or_stream, collections.Mapping): mapping = path_dict_or_stream elif hasattr(path_dict_or_stream, 'read'): stream = path_dict_or_stream else: raise Exception('set_global_config(%r) instead of a path, ' 'mapping object, or stream open for reading' % path_dict_or_stream) if stream is not None: mapping = yaml.load(stream, Loader) _config_cache = mapping # TODO: convert to frozen dict? return _config_cache
python
def set_global_config(path_dict_or_stream): '''Set the global configuration. Call this from `main()` with a file system path, stream object, or a dict. Calling it repeatedly with the same path is safe. Calling it with a different path or repeatedly with a stream or dict requires an explicit call to :func:`clear_global_config`. :param path_dict_or_stream: source of configuration ''' path = None mapping = None stream = None global _config_file_path global _config_cache if isinstance(path_dict_or_stream, string_types): path = path_dict_or_stream if _config_file_path and _config_file_path != path: raise Exception('set_global_config(%r) differs from %r, ' 'consider calling clear_global_config first' % (path, _config_file_path)) _config_file_path = path stream = open(path) elif isinstance(path_dict_or_stream, collections.Mapping): mapping = path_dict_or_stream elif hasattr(path_dict_or_stream, 'read'): stream = path_dict_or_stream else: raise Exception('set_global_config(%r) instead of a path, ' 'mapping object, or stream open for reading' % path_dict_or_stream) if stream is not None: mapping = yaml.load(stream, Loader) _config_cache = mapping # TODO: convert to frozen dict? return _config_cache
[ "def", "set_global_config", "(", "path_dict_or_stream", ")", ":", "path", "=", "None", "mapping", "=", "None", "stream", "=", "None", "global", "_config_file_path", "global", "_config_cache", "if", "isinstance", "(", "path_dict_or_stream", ",", "string_types", ")", ":", "path", "=", "path_dict_or_stream", "if", "_config_file_path", "and", "_config_file_path", "!=", "path", ":", "raise", "Exception", "(", "'set_global_config(%r) differs from %r, '", "'consider calling clear_global_config first'", "%", "(", "path", ",", "_config_file_path", ")", ")", "_config_file_path", "=", "path", "stream", "=", "open", "(", "path", ")", "elif", "isinstance", "(", "path_dict_or_stream", ",", "collections", ".", "Mapping", ")", ":", "mapping", "=", "path_dict_or_stream", "elif", "hasattr", "(", "path_dict_or_stream", ",", "'read'", ")", ":", "stream", "=", "path_dict_or_stream", "else", ":", "raise", "Exception", "(", "'set_global_config(%r) instead of a path, '", "'mapping object, or stream open for reading'", "%", "path_dict_or_stream", ")", "if", "stream", "is", "not", "None", ":", "mapping", "=", "yaml", ".", "load", "(", "stream", ",", "Loader", ")", "_config_cache", "=", "mapping", "# TODO: convert to frozen dict?", "return", "_config_cache" ]
Set the global configuration. Call this from `main()` with a file system path, stream object, or a dict. Calling it repeatedly with the same path is safe. Calling it with a different path or repeatedly with a stream or dict requires an explicit call to :func:`clear_global_config`. :param path_dict_or_stream: source of configuration
[ "Set", "the", "global", "configuration", "." ]
412e195da29b4f4fc7b72967c192714a6f5eaeb5
https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/yakonfig.py#L70-L114
241,319
diffeo/yakonfig
yakonfig/yakonfig.py
_temporary_config
def _temporary_config(): '''Temporarily replace the global configuration. Use this in a 'with' statement. The inner block may freely manipulate the global configuration; the original global configuration is restored at exit. >>> with yakonfig.yakonfig._temporary_config(): ... yakonfig.yakonfig.set_global_config({'a': 'b'}) ... print yakonfig.yakonfig.get_global_config('a') b ''' global _config_cache, _config_file_path old_cc = _config_cache old_cfp = _config_file_path clear_global_config() yield _config_cache = old_cc _config_file_path = old_cfp
python
def _temporary_config(): '''Temporarily replace the global configuration. Use this in a 'with' statement. The inner block may freely manipulate the global configuration; the original global configuration is restored at exit. >>> with yakonfig.yakonfig._temporary_config(): ... yakonfig.yakonfig.set_global_config({'a': 'b'}) ... print yakonfig.yakonfig.get_global_config('a') b ''' global _config_cache, _config_file_path old_cc = _config_cache old_cfp = _config_file_path clear_global_config() yield _config_cache = old_cc _config_file_path = old_cfp
[ "def", "_temporary_config", "(", ")", ":", "global", "_config_cache", ",", "_config_file_path", "old_cc", "=", "_config_cache", "old_cfp", "=", "_config_file_path", "clear_global_config", "(", ")", "yield", "_config_cache", "=", "old_cc", "_config_file_path", "=", "old_cfp" ]
Temporarily replace the global configuration. Use this in a 'with' statement. The inner block may freely manipulate the global configuration; the original global configuration is restored at exit. >>> with yakonfig.yakonfig._temporary_config(): ... yakonfig.yakonfig.set_global_config({'a': 'b'}) ... print yakonfig.yakonfig.get_global_config('a') b
[ "Temporarily", "replace", "the", "global", "configuration", "." ]
412e195da29b4f4fc7b72967c192714a6f5eaeb5
https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/yakonfig.py#L142-L161
241,320
diffeo/yakonfig
yakonfig/yakonfig.py
Loader.include_yaml
def include_yaml(self, node): ''' load another yaml file from the path specified by node's value ''' filename = self.construct_scalar(node) if not filename.startswith('/'): if self._root is None: raise Exception('!include_yaml %s is a relative path, ' 'but stream lacks path' % filename) filename = os.path.join(self._root, self.construct_scalar(node)) with self.open(filename, 'r') as fin: return yaml.load(fin, Loader)
python
def include_yaml(self, node): ''' load another yaml file from the path specified by node's value ''' filename = self.construct_scalar(node) if not filename.startswith('/'): if self._root is None: raise Exception('!include_yaml %s is a relative path, ' 'but stream lacks path' % filename) filename = os.path.join(self._root, self.construct_scalar(node)) with self.open(filename, 'r') as fin: return yaml.load(fin, Loader)
[ "def", "include_yaml", "(", "self", ",", "node", ")", ":", "filename", "=", "self", ".", "construct_scalar", "(", "node", ")", "if", "not", "filename", ".", "startswith", "(", "'/'", ")", ":", "if", "self", ".", "_root", "is", "None", ":", "raise", "Exception", "(", "'!include_yaml %s is a relative path, '", "'but stream lacks path'", "%", "filename", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_root", ",", "self", ".", "construct_scalar", "(", "node", ")", ")", "with", "self", ".", "open", "(", "filename", ",", "'r'", ")", "as", "fin", ":", "return", "yaml", ".", "load", "(", "fin", ",", "Loader", ")" ]
load another yaml file from the path specified by node's value
[ "load", "another", "yaml", "file", "from", "the", "path", "specified", "by", "node", "s", "value" ]
412e195da29b4f4fc7b72967c192714a6f5eaeb5
https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/yakonfig.py#L39-L50
241,321
maxweisspoker/simplebitcoinfuncs
simplebitcoinfuncs/stealth.py
paystealth
def paystealth(stealthaddr,ephempriv=None,_doctest_nonce=-1): ''' Input a stealth address, and optionally an ephemeral private key, and generate a payment pubkey and stealth OP_RETURN data. (The OP_RETURN data is just a nonce and the ephemeral public key.) Works with standard single spend key stealth addresses, which begin with the '2a00' version bytes, and have 00-08 prefix bits and any 1-byte prefix. Prefix ff with 08 prefix bits and nonce starts at 0: >>> paystealth("vJmvinTgWP1phdFnACjc64U5iMExyv7JcQJVZjMA15MRf2KzmqjSpgDjmj8NxaFfiMBUEjaydmNfLBCcXstVDfkjwRoFQw7rLHWdFk", \ '824dc0ed612deca8664b3d421eaed28827eeb364ae76abc9a5924242ddca290a', 0) ('03e05931191100fa6cd072b1eda63079736464b950d2875e67f2ab2c8af9b07b8d', \ '0600000124025c6fb169b0ff1c95426fa073fadc62f50a6e98482ec8b3f26fb73006009d1c00') ''' if ephempriv is None: ephempriv = genkeyhex() addrhex = b58d(stealthaddr) assert len(addrhex) == 142 assert int(addrhex[-4:-2],16) < 9 # Assume one spend key, and 1-byte prefix and prefix-bits assert addrhex[:4] == '2a00' assert addrhex[70:72] == '01' scanpub = addrhex[4:70] spendpub = addrhex[72:-4] ephempub = privtopub(ephempriv,True) secret = sha256(multiplypub(scanpub,ephempriv,True)) paykey = addpubs(spendpub,privtopub(secret,False),True) if _doctest_nonce == -1: nonce = int(genkeyhex(),16) % (2**32) else: nonce = _doctest_nonce assert nonce < 4294967296 and nonce >= 0 startingnonce = nonce while True: if nonce > 4294967295: nonce = 0 noncehex = dechex(nonce,4) hashprefix = unhexlify(hash256('6a2606' + noncehex + ephempub))[::-1][:4] prebits = int(addrhex[-4:-2],16) if prebits == 0: break prefix = unhexlify(addrhex[-2:]) # Location of prefix should be explicit if it's ever more than 1 byte bytepos = 0 cont = False while prebits > 8: # Not necessary with asserted 1-byte prefix if hexstrlify(prefix)[2*bytepos:(2*bytepos)+2] != \ hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2]: cont = True break prebits = prebits - 8 bytepos = bytepos + 1 if cont: continue prefixhex = hexstrlify(prefix)[2*bytepos:(2*bytepos)+2] if prefixhex == "": prefixhex = hexstrlify(b"00") hashprefixhex = hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2] if hashprefixhex == "": hashprefixhex = hexstrlify(b"00") prefixbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(prefixhex, 16) hashbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(hashprefixhex, 16) if prefixbits == hashbits: cont = False else: cont = True if not cont: break nonce += 1 if nonce == startingnonce: raise Exception("No valid nonce was found. A different ephemeral key must be used.") return paykey, '06' + noncehex + ephempub
python
def paystealth(stealthaddr,ephempriv=None,_doctest_nonce=-1): ''' Input a stealth address, and optionally an ephemeral private key, and generate a payment pubkey and stealth OP_RETURN data. (The OP_RETURN data is just a nonce and the ephemeral public key.) Works with standard single spend key stealth addresses, which begin with the '2a00' version bytes, and have 00-08 prefix bits and any 1-byte prefix. Prefix ff with 08 prefix bits and nonce starts at 0: >>> paystealth("vJmvinTgWP1phdFnACjc64U5iMExyv7JcQJVZjMA15MRf2KzmqjSpgDjmj8NxaFfiMBUEjaydmNfLBCcXstVDfkjwRoFQw7rLHWdFk", \ '824dc0ed612deca8664b3d421eaed28827eeb364ae76abc9a5924242ddca290a', 0) ('03e05931191100fa6cd072b1eda63079736464b950d2875e67f2ab2c8af9b07b8d', \ '0600000124025c6fb169b0ff1c95426fa073fadc62f50a6e98482ec8b3f26fb73006009d1c00') ''' if ephempriv is None: ephempriv = genkeyhex() addrhex = b58d(stealthaddr) assert len(addrhex) == 142 assert int(addrhex[-4:-2],16) < 9 # Assume one spend key, and 1-byte prefix and prefix-bits assert addrhex[:4] == '2a00' assert addrhex[70:72] == '01' scanpub = addrhex[4:70] spendpub = addrhex[72:-4] ephempub = privtopub(ephempriv,True) secret = sha256(multiplypub(scanpub,ephempriv,True)) paykey = addpubs(spendpub,privtopub(secret,False),True) if _doctest_nonce == -1: nonce = int(genkeyhex(),16) % (2**32) else: nonce = _doctest_nonce assert nonce < 4294967296 and nonce >= 0 startingnonce = nonce while True: if nonce > 4294967295: nonce = 0 noncehex = dechex(nonce,4) hashprefix = unhexlify(hash256('6a2606' + noncehex + ephempub))[::-1][:4] prebits = int(addrhex[-4:-2],16) if prebits == 0: break prefix = unhexlify(addrhex[-2:]) # Location of prefix should be explicit if it's ever more than 1 byte bytepos = 0 cont = False while prebits > 8: # Not necessary with asserted 1-byte prefix if hexstrlify(prefix)[2*bytepos:(2*bytepos)+2] != \ hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2]: cont = True break prebits = prebits - 8 bytepos = bytepos + 1 if cont: continue prefixhex = hexstrlify(prefix)[2*bytepos:(2*bytepos)+2] if prefixhex == "": prefixhex = hexstrlify(b"00") hashprefixhex = hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2] if hashprefixhex == "": hashprefixhex = hexstrlify(b"00") prefixbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(prefixhex, 16) hashbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(hashprefixhex, 16) if prefixbits == hashbits: cont = False else: cont = True if not cont: break nonce += 1 if nonce == startingnonce: raise Exception("No valid nonce was found. A different ephemeral key must be used.") return paykey, '06' + noncehex + ephempub
[ "def", "paystealth", "(", "stealthaddr", ",", "ephempriv", "=", "None", ",", "_doctest_nonce", "=", "-", "1", ")", ":", "if", "ephempriv", "is", "None", ":", "ephempriv", "=", "genkeyhex", "(", ")", "addrhex", "=", "b58d", "(", "stealthaddr", ")", "assert", "len", "(", "addrhex", ")", "==", "142", "assert", "int", "(", "addrhex", "[", "-", "4", ":", "-", "2", "]", ",", "16", ")", "<", "9", "# Assume one spend key, and 1-byte prefix and prefix-bits", "assert", "addrhex", "[", ":", "4", "]", "==", "'2a00'", "assert", "addrhex", "[", "70", ":", "72", "]", "==", "'01'", "scanpub", "=", "addrhex", "[", "4", ":", "70", "]", "spendpub", "=", "addrhex", "[", "72", ":", "-", "4", "]", "ephempub", "=", "privtopub", "(", "ephempriv", ",", "True", ")", "secret", "=", "sha256", "(", "multiplypub", "(", "scanpub", ",", "ephempriv", ",", "True", ")", ")", "paykey", "=", "addpubs", "(", "spendpub", ",", "privtopub", "(", "secret", ",", "False", ")", ",", "True", ")", "if", "_doctest_nonce", "==", "-", "1", ":", "nonce", "=", "int", "(", "genkeyhex", "(", ")", ",", "16", ")", "%", "(", "2", "**", "32", ")", "else", ":", "nonce", "=", "_doctest_nonce", "assert", "nonce", "<", "4294967296", "and", "nonce", ">=", "0", "startingnonce", "=", "nonce", "while", "True", ":", "if", "nonce", ">", "4294967295", ":", "nonce", "=", "0", "noncehex", "=", "dechex", "(", "nonce", ",", "4", ")", "hashprefix", "=", "unhexlify", "(", "hash256", "(", "'6a2606'", "+", "noncehex", "+", "ephempub", ")", ")", "[", ":", ":", "-", "1", "]", "[", ":", "4", "]", "prebits", "=", "int", "(", "addrhex", "[", "-", "4", ":", "-", "2", "]", ",", "16", ")", "if", "prebits", "==", "0", ":", "break", "prefix", "=", "unhexlify", "(", "addrhex", "[", "-", "2", ":", "]", ")", "# Location of prefix should be explicit if it's ever more than 1 byte", "bytepos", "=", "0", "cont", "=", "False", "while", "prebits", ">", "8", ":", "# Not necessary with asserted 1-byte prefix", "if", "hexstrlify", "(", "prefix", ")", "[", "2", "*", "bytepos", ":", "(", "2", "*", "bytepos", ")", "+", "2", "]", "!=", "hexstrlify", "(", "hashprefix", ")", "[", "2", "*", "bytepos", ":", "(", "2", "*", "bytepos", ")", "+", "2", "]", ":", "cont", "=", "True", "break", "prebits", "=", "prebits", "-", "8", "bytepos", "=", "bytepos", "+", "1", "if", "cont", ":", "continue", "prefixhex", "=", "hexstrlify", "(", "prefix", ")", "[", "2", "*", "bytepos", ":", "(", "2", "*", "bytepos", ")", "+", "2", "]", "if", "prefixhex", "==", "\"\"", ":", "prefixhex", "=", "hexstrlify", "(", "b\"00\"", ")", "hashprefixhex", "=", "hexstrlify", "(", "hashprefix", ")", "[", "2", "*", "bytepos", ":", "(", "2", "*", "bytepos", ")", "+", "2", "]", "if", "hashprefixhex", "==", "\"\"", ":", "hashprefixhex", "=", "hexstrlify", "(", "b\"00\"", ")", "prefixbits", "=", "(", "(", "(", "1", "<<", "(", "8", "-", "prebits", ")", ")", "-", "1", ")", "^", "0xff", ")", "&", "int", "(", "prefixhex", ",", "16", ")", "hashbits", "=", "(", "(", "(", "1", "<<", "(", "8", "-", "prebits", ")", ")", "-", "1", ")", "^", "0xff", ")", "&", "int", "(", "hashprefixhex", ",", "16", ")", "if", "prefixbits", "==", "hashbits", ":", "cont", "=", "False", "else", ":", "cont", "=", "True", "if", "not", "cont", ":", "break", "nonce", "+=", "1", "if", "nonce", "==", "startingnonce", ":", "raise", "Exception", "(", "\"No valid nonce was found. A different ephemeral key must be used.\"", ")", "return", "paykey", ",", "'06'", "+", "noncehex", "+", "ephempub" ]
Input a stealth address, and optionally an ephemeral private key, and generate a payment pubkey and stealth OP_RETURN data. (The OP_RETURN data is just a nonce and the ephemeral public key.) Works with standard single spend key stealth addresses, which begin with the '2a00' version bytes, and have 00-08 prefix bits and any 1-byte prefix. Prefix ff with 08 prefix bits and nonce starts at 0: >>> paystealth("vJmvinTgWP1phdFnACjc64U5iMExyv7JcQJVZjMA15MRf2KzmqjSpgDjmj8NxaFfiMBUEjaydmNfLBCcXstVDfkjwRoFQw7rLHWdFk", \ '824dc0ed612deca8664b3d421eaed28827eeb364ae76abc9a5924242ddca290a', 0) ('03e05931191100fa6cd072b1eda63079736464b950d2875e67f2ab2c8af9b07b8d', \ '0600000124025c6fb169b0ff1c95426fa073fadc62f50a6e98482ec8b3f26fb73006009d1c00')
[ "Input", "a", "stealth", "address", "and", "optionally", "an", "ephemeral", "private", "key", "and", "generate", "a", "payment", "pubkey", "and", "stealth", "OP_RETURN", "data", "." ]
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/stealth.py#L36-L117
241,322
maxweisspoker/simplebitcoinfuncs
simplebitcoinfuncs/stealth.py
receivestealth
def receivestealth(scanpriv,spendpriv,ephempub): ''' Derive the private key for a stealth payment, using the scan and spend private keys, and the ephemeral public key. Input private keys should be 64-char hex strings, and ephemeral public key should be a 66-char hex compressed public key. >>> receivestealth('af4afaeb40810e5f8abdbb177c31a2d310913f91cf556f5350bca10cbfe8b9ec', \ 'd39758028e201e8edf6d6eec6910ae4038f9b1db3f2d4e2d109ed833be94a026', \ '03b8a715c9432b2b52af9d58aaaf0ccbdefe36d45e158589ecc21ba2f064ebb315') '6134396c3bc9a56ccaf80cd38728e6d3a7751524246e7924b21b08b0bfcc3cc4' ''' return addprivkeys(sha256(multiplypub(ephempub,scanpriv,True)),spendpriv)
python
def receivestealth(scanpriv,spendpriv,ephempub): ''' Derive the private key for a stealth payment, using the scan and spend private keys, and the ephemeral public key. Input private keys should be 64-char hex strings, and ephemeral public key should be a 66-char hex compressed public key. >>> receivestealth('af4afaeb40810e5f8abdbb177c31a2d310913f91cf556f5350bca10cbfe8b9ec', \ 'd39758028e201e8edf6d6eec6910ae4038f9b1db3f2d4e2d109ed833be94a026', \ '03b8a715c9432b2b52af9d58aaaf0ccbdefe36d45e158589ecc21ba2f064ebb315') '6134396c3bc9a56ccaf80cd38728e6d3a7751524246e7924b21b08b0bfcc3cc4' ''' return addprivkeys(sha256(multiplypub(ephempub,scanpriv,True)),spendpriv)
[ "def", "receivestealth", "(", "scanpriv", ",", "spendpriv", ",", "ephempub", ")", ":", "return", "addprivkeys", "(", "sha256", "(", "multiplypub", "(", "ephempub", ",", "scanpriv", ",", "True", ")", ")", ",", "spendpriv", ")" ]
Derive the private key for a stealth payment, using the scan and spend private keys, and the ephemeral public key. Input private keys should be 64-char hex strings, and ephemeral public key should be a 66-char hex compressed public key. >>> receivestealth('af4afaeb40810e5f8abdbb177c31a2d310913f91cf556f5350bca10cbfe8b9ec', \ 'd39758028e201e8edf6d6eec6910ae4038f9b1db3f2d4e2d109ed833be94a026', \ '03b8a715c9432b2b52af9d58aaaf0ccbdefe36d45e158589ecc21ba2f064ebb315') '6134396c3bc9a56ccaf80cd38728e6d3a7751524246e7924b21b08b0bfcc3cc4'
[ "Derive", "the", "private", "key", "for", "a", "stealth", "payment", "using", "the", "scan", "and", "spend", "private", "keys", "and", "the", "ephemeral", "public", "key", "." ]
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/stealth.py#L120-L134
241,323
ironfroggy/django-better-cache
bettercache/proxy.py
proxy
def proxy(request): """Pass an HTTP request on to another server.""" # TODO: don't hardcode http uri = "http://" + HOST + request.META['PATH_INFO'] if request.META['QUERY_STRING']: uri += '?' + request.META['QUERY_STRING'] headers = {} for name, val in six.iteritems(request.environ): if name.startswith('HTTP_'): name = header_name(name) headers[name] = val # TODO: try/except http = Http() http.follow_redirects = False logger.debug("GET for: %s" % uri) info, content = http.request(uri, 'GET', headers=headers) response = HttpResponse(content, status=info.pop('status')) for name, val in info.items(): if not is_hop_by_hop(name): response[name] = val logger.info("PROXY to: %s" % uri) return response
python
def proxy(request): """Pass an HTTP request on to another server.""" # TODO: don't hardcode http uri = "http://" + HOST + request.META['PATH_INFO'] if request.META['QUERY_STRING']: uri += '?' + request.META['QUERY_STRING'] headers = {} for name, val in six.iteritems(request.environ): if name.startswith('HTTP_'): name = header_name(name) headers[name] = val # TODO: try/except http = Http() http.follow_redirects = False logger.debug("GET for: %s" % uri) info, content = http.request(uri, 'GET', headers=headers) response = HttpResponse(content, status=info.pop('status')) for name, val in info.items(): if not is_hop_by_hop(name): response[name] = val logger.info("PROXY to: %s" % uri) return response
[ "def", "proxy", "(", "request", ")", ":", "# TODO: don't hardcode http", "uri", "=", "\"http://\"", "+", "HOST", "+", "request", ".", "META", "[", "'PATH_INFO'", "]", "if", "request", ".", "META", "[", "'QUERY_STRING'", "]", ":", "uri", "+=", "'?'", "+", "request", ".", "META", "[", "'QUERY_STRING'", "]", "headers", "=", "{", "}", "for", "name", ",", "val", "in", "six", ".", "iteritems", "(", "request", ".", "environ", ")", ":", "if", "name", ".", "startswith", "(", "'HTTP_'", ")", ":", "name", "=", "header_name", "(", "name", ")", "headers", "[", "name", "]", "=", "val", "# TODO: try/except", "http", "=", "Http", "(", ")", "http", ".", "follow_redirects", "=", "False", "logger", ".", "debug", "(", "\"GET for: %s\"", "%", "uri", ")", "info", ",", "content", "=", "http", ".", "request", "(", "uri", ",", "'GET'", ",", "headers", "=", "headers", ")", "response", "=", "HttpResponse", "(", "content", ",", "status", "=", "info", ".", "pop", "(", "'status'", ")", ")", "for", "name", ",", "val", "in", "info", ".", "items", "(", ")", ":", "if", "not", "is_hop_by_hop", "(", "name", ")", ":", "response", "[", "name", "]", "=", "val", "logger", ".", "info", "(", "\"PROXY to: %s\"", "%", "uri", ")", "return", "response" ]
Pass an HTTP request on to another server.
[ "Pass", "an", "HTTP", "request", "on", "to", "another", "server", "." ]
5350e8c646cef1c1ca74eab176f856ddd9eaf5c3
https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/proxy.py#L37-L62
241,324
mayfield/shellish
shellish/layout/table.py
tabulate
def tabulate(data, header=True, headers=None, accessors=None, **table_options): """ Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user. """ if header and not headers: data = iter(data) try: headers = next(data) except StopIteration: pass if headers and hasattr(headers, 'items') and accessors is None: # Dict mode; Build accessors and headers from keys of data. data = itertools.chain([headers], data) accessors = list(headers) headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split())) for x in accessors] t = Table(headers=headers, accessors=accessors, **table_options) try: t.print(data) except RowsNotFound: pass return t
python
def tabulate(data, header=True, headers=None, accessors=None, **table_options): """ Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user. """ if header and not headers: data = iter(data) try: headers = next(data) except StopIteration: pass if headers and hasattr(headers, 'items') and accessors is None: # Dict mode; Build accessors and headers from keys of data. data = itertools.chain([headers], data) accessors = list(headers) headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split())) for x in accessors] t = Table(headers=headers, accessors=accessors, **table_options) try: t.print(data) except RowsNotFound: pass return t
[ "def", "tabulate", "(", "data", ",", "header", "=", "True", ",", "headers", "=", "None", ",", "accessors", "=", "None", ",", "*", "*", "table_options", ")", ":", "if", "header", "and", "not", "headers", ":", "data", "=", "iter", "(", "data", ")", "try", ":", "headers", "=", "next", "(", "data", ")", "except", "StopIteration", ":", "pass", "if", "headers", "and", "hasattr", "(", "headers", ",", "'items'", ")", "and", "accessors", "is", "None", ":", "# Dict mode; Build accessors and headers from keys of data.", "data", "=", "itertools", ".", "chain", "(", "[", "headers", "]", ",", "data", ")", "accessors", "=", "list", "(", "headers", ")", "headers", "=", "[", "' '", ".", "join", "(", "map", "(", "str", ".", "capitalize", ",", "x", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "split", "(", ")", ")", ")", "for", "x", "in", "accessors", "]", "t", "=", "Table", "(", "headers", "=", "headers", ",", "accessors", "=", "accessors", ",", "*", "*", "table_options", ")", "try", ":", "t", ".", "print", "(", "data", ")", "except", "RowsNotFound", ":", "pass", "return", "t" ]
Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user.
[ "Shortcut", "function", "to", "produce", "tabular", "output", "of", "data", "without", "the", "need", "to", "create", "and", "configure", "a", "Table", "instance", "directly", ".", "The", "function", "does", "however", "return", "a", "table", "instance", "when", "it", "s", "done", "for", "any", "further", "use", "by", "the", "user", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L1018-L1041
241,325
mayfield/shellish
shellish/layout/table.py
TableRenderer.render_filter
def render_filter(self, next_filter): """ Produce formatted output from the raw data stream. """ next(next_filter) while True: data = (yield) res = [self.cell_format(access(data)) for access in self.accessors] next_filter.send(res)
python
def render_filter(self, next_filter): """ Produce formatted output from the raw data stream. """ next(next_filter) while True: data = (yield) res = [self.cell_format(access(data)) for access in self.accessors] next_filter.send(res)
[ "def", "render_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "while", "True", ":", "data", "=", "(", "yield", ")", "res", "=", "[", "self", ".", "cell_format", "(", "access", "(", "data", ")", ")", "for", "access", "in", "self", ".", "accessors", "]", "next_filter", ".", "send", "(", "res", ")" ]
Produce formatted output from the raw data stream.
[ "Produce", "formatted", "output", "from", "the", "raw", "data", "stream", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L472-L478
241,326
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.viewable_width
def viewable_width(self): """ The available combined character width when all padding is removed. """ return sum(self.widths) + sum(x['padding'] for x in self.colspec)
python
def viewable_width(self): """ The available combined character width when all padding is removed. """ return sum(self.widths) + sum(x['padding'] for x in self.colspec)
[ "def", "viewable_width", "(", "self", ")", ":", "return", "sum", "(", "self", ".", "widths", ")", "+", "sum", "(", "x", "[", "'padding'", "]", "for", "x", "in", "self", ".", "colspec", ")" ]
The available combined character width when all padding is removed.
[ "The", "available", "combined", "character", "width", "when", "all", "padding", "is", "removed", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L543-L546
241,327
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.print_row
def print_row(self, row, rstrip=True): """ Format and print the pre-rendered data to the output device. """ line = ''.join(map(str, row)) print(line.rstrip() if rstrip else line, file=self.table.file)
python
def print_row(self, row, rstrip=True): """ Format and print the pre-rendered data to the output device. """ line = ''.join(map(str, row)) print(line.rstrip() if rstrip else line, file=self.table.file)
[ "def", "print_row", "(", "self", ",", "row", ",", "rstrip", "=", "True", ")", ":", "line", "=", "''", ".", "join", "(", "map", "(", "str", ",", "row", ")", ")", "print", "(", "line", ".", "rstrip", "(", ")", "if", "rstrip", "else", "line", ",", "file", "=", "self", ".", "table", ".", "file", ")" ]
Format and print the pre-rendered data to the output device.
[ "Format", "and", "print", "the", "pre", "-", "rendered", "data", "to", "the", "output", "device", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L567-L570
241,328
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.format_fullwidth
def format_fullwidth(self, value): """ Return a full width column. Note that the padding is inherited from the first cell which inherits from column_padding. """ assert isinstance(value, VTMLBuffer) pad = self.colspec[0]['padding'] fmt = self.make_formatter(self.width - pad, pad, self.table.title_align) return VTMLBuffer('\n').join(fmt(value))
python
def format_fullwidth(self, value): """ Return a full width column. Note that the padding is inherited from the first cell which inherits from column_padding. """ assert isinstance(value, VTMLBuffer) pad = self.colspec[0]['padding'] fmt = self.make_formatter(self.width - pad, pad, self.table.title_align) return VTMLBuffer('\n').join(fmt(value))
[ "def", "format_fullwidth", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "VTMLBuffer", ")", "pad", "=", "self", ".", "colspec", "[", "0", "]", "[", "'padding'", "]", "fmt", "=", "self", ".", "make_formatter", "(", "self", ".", "width", "-", "pad", ",", "pad", ",", "self", ".", "table", ".", "title_align", ")", "return", "VTMLBuffer", "(", "'\\n'", ")", ".", "join", "(", "fmt", "(", "value", ")", ")" ]
Return a full width column. Note that the padding is inherited from the first cell which inherits from column_padding.
[ "Return", "a", "full", "width", "column", ".", "Note", "that", "the", "padding", "is", "inherited", "from", "the", "first", "cell", "which", "inherits", "from", "column_padding", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L595-L602
241,329
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.make_formatter
def make_formatter(self, width, padding, alignment, overflow=None): """ Create formatter function that factors the width and alignment settings. """ if overflow is None: overflow = self.overflow_default if overflow == 'clip': overflower = lambda x: [x.clip(width, self.table.cliptext)] elif overflow == 'wrap': overflower = lambda x: x.wrap(width) elif overflow == 'preformatted': overflower = lambda x: x.split('\n') else: raise RuntimeError("Unexpected overflow mode: %r" % overflow) align = self.get_aligner(alignment, width) pad = self.get_aligner('center', width + padding) return lambda value: [pad(align(x)) for x in overflower(value)]
python
def make_formatter(self, width, padding, alignment, overflow=None): """ Create formatter function that factors the width and alignment settings. """ if overflow is None: overflow = self.overflow_default if overflow == 'clip': overflower = lambda x: [x.clip(width, self.table.cliptext)] elif overflow == 'wrap': overflower = lambda x: x.wrap(width) elif overflow == 'preformatted': overflower = lambda x: x.split('\n') else: raise RuntimeError("Unexpected overflow mode: %r" % overflow) align = self.get_aligner(alignment, width) pad = self.get_aligner('center', width + padding) return lambda value: [pad(align(x)) for x in overflower(value)]
[ "def", "make_formatter", "(", "self", ",", "width", ",", "padding", ",", "alignment", ",", "overflow", "=", "None", ")", ":", "if", "overflow", "is", "None", ":", "overflow", "=", "self", ".", "overflow_default", "if", "overflow", "==", "'clip'", ":", "overflower", "=", "lambda", "x", ":", "[", "x", ".", "clip", "(", "width", ",", "self", ".", "table", ".", "cliptext", ")", "]", "elif", "overflow", "==", "'wrap'", ":", "overflower", "=", "lambda", "x", ":", "x", ".", "wrap", "(", "width", ")", "elif", "overflow", "==", "'preformatted'", ":", "overflower", "=", "lambda", "x", ":", "x", ".", "split", "(", "'\\n'", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unexpected overflow mode: %r\"", "%", "overflow", ")", "align", "=", "self", ".", "get_aligner", "(", "alignment", ",", "width", ")", "pad", "=", "self", ".", "get_aligner", "(", "'center'", ",", "width", "+", "padding", ")", "return", "lambda", "value", ":", "[", "pad", "(", "align", "(", "x", ")", ")", "for", "x", "in", "overflower", "(", "value", ")", "]" ]
Create formatter function that factors the width and alignment settings.
[ "Create", "formatter", "function", "that", "factors", "the", "width", "and", "alignment", "settings", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L604-L619
241,330
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.make_formatters
def make_formatters(self): """ Create a list formatter functions for each column. They can then be stored in the render spec for faster justification processing. """ return [self.make_formatter(inner_w, spec['padding'], spec['align'], spec['overflow']) for spec, inner_w in zip(self.colspec, self.widths)]
python
def make_formatters(self): """ Create a list formatter functions for each column. They can then be stored in the render spec for faster justification processing. """ return [self.make_formatter(inner_w, spec['padding'], spec['align'], spec['overflow']) for spec, inner_w in zip(self.colspec, self.widths)]
[ "def", "make_formatters", "(", "self", ")", ":", "return", "[", "self", ".", "make_formatter", "(", "inner_w", ",", "spec", "[", "'padding'", "]", ",", "spec", "[", "'align'", "]", ",", "spec", "[", "'overflow'", "]", ")", "for", "spec", ",", "inner_w", "in", "zip", "(", "self", ".", "colspec", ",", "self", ".", "widths", ")", "]" ]
Create a list formatter functions for each column. They can then be stored in the render spec for faster justification processing.
[ "Create", "a", "list", "formatter", "functions", "for", "each", "column", ".", "They", "can", "then", "be", "stored", "in", "the", "render", "spec", "for", "faster", "justification", "processing", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L621-L626
241,331
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer._uniform_dist
def _uniform_dist(self, spread, total): """ Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform. """ fraction, fixed_increment = math.modf(total / spread) fixed_increment = int(fixed_increment) balance = 0 dist = [] for _ in range(spread): balance += fraction withdrawl = 1 if balance > 0.5 else 0 if withdrawl: balance -= withdrawl dist.append(fixed_increment + withdrawl) return dist
python
def _uniform_dist(self, spread, total): """ Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform. """ fraction, fixed_increment = math.modf(total / spread) fixed_increment = int(fixed_increment) balance = 0 dist = [] for _ in range(spread): balance += fraction withdrawl = 1 if balance > 0.5 else 0 if withdrawl: balance -= withdrawl dist.append(fixed_increment + withdrawl) return dist
[ "def", "_uniform_dist", "(", "self", ",", "spread", ",", "total", ")", ":", "fraction", ",", "fixed_increment", "=", "math", ".", "modf", "(", "total", "/", "spread", ")", "fixed_increment", "=", "int", "(", "fixed_increment", ")", "balance", "=", "0", "dist", "=", "[", "]", "for", "_", "in", "range", "(", "spread", ")", ":", "balance", "+=", "fraction", "withdrawl", "=", "1", "if", "balance", ">", "0.5", "else", "0", "if", "withdrawl", ":", "balance", "-=", "withdrawl", "dist", ".", "append", "(", "fixed_increment", "+", "withdrawl", ")", "return", "dist" ]
Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform.
[ "Produce", "a", "uniform", "distribution", "of", "total", "across", "a", "list", "of", "spread", "size", ".", "The", "result", "is", "non", "-", "random", "and", "uniform", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L628-L641
241,332
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.get_filters
def get_filters(self): """ Coroutine based filters for render pipeline. """ return [ self.compute_style_filter, self.render_filter, self.calc_widths_filter, self.format_row_filter, self.align_rows_filter, ]
python
def get_filters(self): """ Coroutine based filters for render pipeline. """ return [ self.compute_style_filter, self.render_filter, self.calc_widths_filter, self.format_row_filter, self.align_rows_filter, ]
[ "def", "get_filters", "(", "self", ")", ":", "return", "[", "self", ".", "compute_style_filter", ",", "self", ".", "render_filter", ",", "self", ".", "calc_widths_filter", ",", "self", ".", "format_row_filter", ",", "self", ".", "align_rows_filter", ",", "]" ]
Coroutine based filters for render pipeline.
[ "Coroutine", "based", "filters", "for", "render", "pipeline", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L643-L651
241,333
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.format_row_filter
def format_row_filter(self, next_filter): """ Apply overflow, justification, padding and expansion to a row. """ next(next_filter) while True: items = (yield) assert all(isinstance(x, VTMLBuffer) for x in items) raw = (fn(x) for x, fn in zip(items, self.formatters)) for x in itertools.zip_longest(*raw): next_filter.send(x)
python
def format_row_filter(self, next_filter): """ Apply overflow, justification, padding and expansion to a row. """ next(next_filter) while True: items = (yield) assert all(isinstance(x, VTMLBuffer) for x in items) raw = (fn(x) for x, fn in zip(items, self.formatters)) for x in itertools.zip_longest(*raw): next_filter.send(x)
[ "def", "format_row_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "while", "True", ":", "items", "=", "(", "yield", ")", "assert", "all", "(", "isinstance", "(", "x", ",", "VTMLBuffer", ")", "for", "x", "in", "items", ")", "raw", "=", "(", "fn", "(", "x", ")", "for", "x", ",", "fn", "in", "zip", "(", "items", ",", "self", ".", "formatters", ")", ")", "for", "x", "in", "itertools", ".", "zip_longest", "(", "*", "raw", ")", ":", "next_filter", ".", "send", "(", "x", ")" ]
Apply overflow, justification, padding and expansion to a row.
[ "Apply", "overflow", "justification", "padding", "and", "expansion", "to", "a", "row", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L653-L661
241,334
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.width_normalize
def width_normalize(self, width): """ Handle a width style, which can be a fractional number representing a percentage of available width or positive integers which indicate a fixed width. """ if width is not None: if width > 0 and width < 1: return int(width * self.usable_width) else: return int(width)
python
def width_normalize(self, width): """ Handle a width style, which can be a fractional number representing a percentage of available width or positive integers which indicate a fixed width. """ if width is not None: if width > 0 and width < 1: return int(width * self.usable_width) else: return int(width)
[ "def", "width_normalize", "(", "self", ",", "width", ")", ":", "if", "width", "is", "not", "None", ":", "if", "width", ">", "0", "and", "width", "<", "1", ":", "return", "int", "(", "width", "*", "self", ".", "usable_width", ")", "else", ":", "return", "int", "(", "width", ")" ]
Handle a width style, which can be a fractional number representing a percentage of available width or positive integers which indicate a fixed width.
[ "Handle", "a", "width", "style", "which", "can", "be", "a", "fractional", "number", "representing", "a", "percentage", "of", "available", "width", "or", "positive", "integers", "which", "indicate", "a", "fixed", "width", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L733-L741
241,335
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.calc_widths_filter
def calc_widths_filter(self, next_filter): """ Coroutine to analyze the incoming data stream for creating optimal column width choices. This may buffer some of the incoming stream if there isn't enough information to make good choices about column widths. Also it may resize widths if certain conditions are met such as the terminal width resize event being detected. """ window_sent = not not self.data_window next_primed = False genexit = None if not self.data_window: start = time.monotonic() while len(self.data_window) < self.min_render_prefill or \ (len(self.data_window) < self.max_render_prefill and (time.monotonic() - start) < self.max_render_delay): try: self.data_window.append((yield)) except GeneratorExit as e: genexit = e break while True: if self.width != self.desired_width: self.headers_drawn = False # TODO: make optional self.width = self.desired_width remaining = self.usable_width widths = [x['width'] for x in self.colspec] preformatted = [i for i, x in enumerate(self.colspec) if x['overflow'] == 'preformatted'] unspec = [] for i, width in enumerate(widths): fixed_width = self.width_normalize(width) if fixed_width is None: unspec.append(i) else: widths[i] = fixed_width remaining -= fixed_width if unspec: if self.table.flex and self.data_window: for i, w in self.calc_flex(self.data_window, remaining, unspec, preformatted): widths[i] = w else: dist = self._uniform_dist(len(unspec), remaining) for i, width in zip(unspec, dist): widths[i] = width self.widths = widths self.formatters = self.make_formatters() if not next_primed: next(next_filter) next_primed = True if not window_sent: for x in self.data_window: next_filter.send(x) window_sent = True if genexit: raise genexit data = (yield) self.data_window.append(data) next_filter.send(data)
python
def calc_widths_filter(self, next_filter): """ Coroutine to analyze the incoming data stream for creating optimal column width choices. This may buffer some of the incoming stream if there isn't enough information to make good choices about column widths. Also it may resize widths if certain conditions are met such as the terminal width resize event being detected. """ window_sent = not not self.data_window next_primed = False genexit = None if not self.data_window: start = time.monotonic() while len(self.data_window) < self.min_render_prefill or \ (len(self.data_window) < self.max_render_prefill and (time.monotonic() - start) < self.max_render_delay): try: self.data_window.append((yield)) except GeneratorExit as e: genexit = e break while True: if self.width != self.desired_width: self.headers_drawn = False # TODO: make optional self.width = self.desired_width remaining = self.usable_width widths = [x['width'] for x in self.colspec] preformatted = [i for i, x in enumerate(self.colspec) if x['overflow'] == 'preformatted'] unspec = [] for i, width in enumerate(widths): fixed_width = self.width_normalize(width) if fixed_width is None: unspec.append(i) else: widths[i] = fixed_width remaining -= fixed_width if unspec: if self.table.flex and self.data_window: for i, w in self.calc_flex(self.data_window, remaining, unspec, preformatted): widths[i] = w else: dist = self._uniform_dist(len(unspec), remaining) for i, width in zip(unspec, dist): widths[i] = width self.widths = widths self.formatters = self.make_formatters() if not next_primed: next(next_filter) next_primed = True if not window_sent: for x in self.data_window: next_filter.send(x) window_sent = True if genexit: raise genexit data = (yield) self.data_window.append(data) next_filter.send(data)
[ "def", "calc_widths_filter", "(", "self", ",", "next_filter", ")", ":", "window_sent", "=", "not", "not", "self", ".", "data_window", "next_primed", "=", "False", "genexit", "=", "None", "if", "not", "self", ".", "data_window", ":", "start", "=", "time", ".", "monotonic", "(", ")", "while", "len", "(", "self", ".", "data_window", ")", "<", "self", ".", "min_render_prefill", "or", "(", "len", "(", "self", ".", "data_window", ")", "<", "self", ".", "max_render_prefill", "and", "(", "time", ".", "monotonic", "(", ")", "-", "start", ")", "<", "self", ".", "max_render_delay", ")", ":", "try", ":", "self", ".", "data_window", ".", "append", "(", "(", "yield", ")", ")", "except", "GeneratorExit", "as", "e", ":", "genexit", "=", "e", "break", "while", "True", ":", "if", "self", ".", "width", "!=", "self", ".", "desired_width", ":", "self", ".", "headers_drawn", "=", "False", "# TODO: make optional", "self", ".", "width", "=", "self", ".", "desired_width", "remaining", "=", "self", ".", "usable_width", "widths", "=", "[", "x", "[", "'width'", "]", "for", "x", "in", "self", ".", "colspec", "]", "preformatted", "=", "[", "i", "for", "i", ",", "x", "in", "enumerate", "(", "self", ".", "colspec", ")", "if", "x", "[", "'overflow'", "]", "==", "'preformatted'", "]", "unspec", "=", "[", "]", "for", "i", ",", "width", "in", "enumerate", "(", "widths", ")", ":", "fixed_width", "=", "self", ".", "width_normalize", "(", "width", ")", "if", "fixed_width", "is", "None", ":", "unspec", ".", "append", "(", "i", ")", "else", ":", "widths", "[", "i", "]", "=", "fixed_width", "remaining", "-=", "fixed_width", "if", "unspec", ":", "if", "self", ".", "table", ".", "flex", "and", "self", ".", "data_window", ":", "for", "i", ",", "w", "in", "self", ".", "calc_flex", "(", "self", ".", "data_window", ",", "remaining", ",", "unspec", ",", "preformatted", ")", ":", "widths", "[", "i", "]", "=", "w", "else", ":", "dist", "=", "self", ".", "_uniform_dist", "(", "len", "(", "unspec", ")", ",", "remaining", ")", "for", "i", ",", "width", "in", "zip", "(", "unspec", ",", "dist", ")", ":", "widths", "[", "i", "]", "=", "width", "self", ".", "widths", "=", "widths", "self", ".", "formatters", "=", "self", ".", "make_formatters", "(", ")", "if", "not", "next_primed", ":", "next", "(", "next_filter", ")", "next_primed", "=", "True", "if", "not", "window_sent", ":", "for", "x", "in", "self", ".", "data_window", ":", "next_filter", ".", "send", "(", "x", ")", "window_sent", "=", "True", "if", "genexit", ":", "raise", "genexit", "data", "=", "(", "yield", ")", "self", ".", "data_window", ".", "append", "(", "data", ")", "next_filter", ".", "send", "(", "data", ")" ]
Coroutine to analyze the incoming data stream for creating optimal column width choices. This may buffer some of the incoming stream if there isn't enough information to make good choices about column widths. Also it may resize widths if certain conditions are met such as the terminal width resize event being detected.
[ "Coroutine", "to", "analyze", "the", "incoming", "data", "stream", "for", "creating", "optimal", "column", "width", "choices", ".", "This", "may", "buffer", "some", "of", "the", "incoming", "stream", "if", "there", "isn", "t", "enough", "information", "to", "make", "good", "choices", "about", "column", "widths", ".", "Also", "it", "may", "resize", "widths", "if", "certain", "conditions", "are", "met", "such", "as", "the", "terminal", "width", "resize", "event", "being", "detected", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L743-L800
241,336
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.calc_flex
def calc_flex(self, data, max_width, cols, preformatted=None): """ Scan data returning the best width for each column given the max_width constraint. If some columns will overflow we calculate the best concession widths. """ if preformatted is None: preformatted = [] colstats = [] for i in cols: lengths = [len(xx) for x in data for xx in x[i].text().splitlines()] if self.headers: lengths.append(len(self.headers[i])) lengths.append(self.width_normalize(self.colspec[i]['minwidth'])) counts = collections.Counter(lengths) colstats.append({ "column": i, "preformatted": i in preformatted, "counts": counts, "offt": max(lengths), "chop_mass": 0, "chop_count": 0, "total_mass": sum(a * b for a, b in counts.items()) }) self.adjust_widths(max_width, colstats) required = sum(x['offt'] for x in colstats) justify = self.table.justify if self.table.justify is not None else \ self.justify_default if required < max_width and justify: # Fill remaining space proportionately. remaining = max_width for x in colstats: x['offt'] = int((x['offt'] / required) * max_width) remaining -= x['offt'] if remaining: dist = self._uniform_dist(len(cols), remaining) for adj, col in zip(dist, colstats): col['offt'] += adj return [(x['column'], x['offt']) for x in colstats]
python
def calc_flex(self, data, max_width, cols, preformatted=None): """ Scan data returning the best width for each column given the max_width constraint. If some columns will overflow we calculate the best concession widths. """ if preformatted is None: preformatted = [] colstats = [] for i in cols: lengths = [len(xx) for x in data for xx in x[i].text().splitlines()] if self.headers: lengths.append(len(self.headers[i])) lengths.append(self.width_normalize(self.colspec[i]['minwidth'])) counts = collections.Counter(lengths) colstats.append({ "column": i, "preformatted": i in preformatted, "counts": counts, "offt": max(lengths), "chop_mass": 0, "chop_count": 0, "total_mass": sum(a * b for a, b in counts.items()) }) self.adjust_widths(max_width, colstats) required = sum(x['offt'] for x in colstats) justify = self.table.justify if self.table.justify is not None else \ self.justify_default if required < max_width and justify: # Fill remaining space proportionately. remaining = max_width for x in colstats: x['offt'] = int((x['offt'] / required) * max_width) remaining -= x['offt'] if remaining: dist = self._uniform_dist(len(cols), remaining) for adj, col in zip(dist, colstats): col['offt'] += adj return [(x['column'], x['offt']) for x in colstats]
[ "def", "calc_flex", "(", "self", ",", "data", ",", "max_width", ",", "cols", ",", "preformatted", "=", "None", ")", ":", "if", "preformatted", "is", "None", ":", "preformatted", "=", "[", "]", "colstats", "=", "[", "]", "for", "i", "in", "cols", ":", "lengths", "=", "[", "len", "(", "xx", ")", "for", "x", "in", "data", "for", "xx", "in", "x", "[", "i", "]", ".", "text", "(", ")", ".", "splitlines", "(", ")", "]", "if", "self", ".", "headers", ":", "lengths", ".", "append", "(", "len", "(", "self", ".", "headers", "[", "i", "]", ")", ")", "lengths", ".", "append", "(", "self", ".", "width_normalize", "(", "self", ".", "colspec", "[", "i", "]", "[", "'minwidth'", "]", ")", ")", "counts", "=", "collections", ".", "Counter", "(", "lengths", ")", "colstats", ".", "append", "(", "{", "\"column\"", ":", "i", ",", "\"preformatted\"", ":", "i", "in", "preformatted", ",", "\"counts\"", ":", "counts", ",", "\"offt\"", ":", "max", "(", "lengths", ")", ",", "\"chop_mass\"", ":", "0", ",", "\"chop_count\"", ":", "0", ",", "\"total_mass\"", ":", "sum", "(", "a", "*", "b", "for", "a", ",", "b", "in", "counts", ".", "items", "(", ")", ")", "}", ")", "self", ".", "adjust_widths", "(", "max_width", ",", "colstats", ")", "required", "=", "sum", "(", "x", "[", "'offt'", "]", "for", "x", "in", "colstats", ")", "justify", "=", "self", ".", "table", ".", "justify", "if", "self", ".", "table", ".", "justify", "is", "not", "None", "else", "self", ".", "justify_default", "if", "required", "<", "max_width", "and", "justify", ":", "# Fill remaining space proportionately.", "remaining", "=", "max_width", "for", "x", "in", "colstats", ":", "x", "[", "'offt'", "]", "=", "int", "(", "(", "x", "[", "'offt'", "]", "/", "required", ")", "*", "max_width", ")", "remaining", "-=", "x", "[", "'offt'", "]", "if", "remaining", ":", "dist", "=", "self", ".", "_uniform_dist", "(", "len", "(", "cols", ")", ",", "remaining", ")", "for", "adj", ",", "col", "in", "zip", "(", "dist", ",", "colstats", ")", ":", "col", "[", "'offt'", "]", "+=", "adj", "return", "[", "(", "x", "[", "'column'", "]", ",", "x", "[", "'offt'", "]", ")", "for", "x", "in", "colstats", "]" ]
Scan data returning the best width for each column given the max_width constraint. If some columns will overflow we calculate the best concession widths.
[ "Scan", "data", "returning", "the", "best", "width", "for", "each", "column", "given", "the", "max_width", "constraint", ".", "If", "some", "columns", "will", "overflow", "we", "calculate", "the", "best", "concession", "widths", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L802-L839
241,337
mayfield/shellish
shellish/layout/table.py
VisualTableRenderer.adjust_widths
def adjust_widths(self, max_width, colstats): """ Adjust column widths based on the least negative affect it will have on the viewing experience. We take note of the total character mass that will be clipped when each column should be narrowed. The actual score for clipping is based on percentage of total character mass, which is the total number of characters in the column. """ adj_colstats = [] for x in colstats: if not x['preformatted']: adj_colstats.append(x) else: max_width -= x['offt'] next_score = lambda x: (x['counts'][x['offt']] + x['chop_mass'] + x['chop_count']) / x['total_mass'] cur_width = lambda: sum(x['offt'] for x in adj_colstats) min_width = lambda x: self.width_normalize( self.colspec[x['column']]['minwidth']) while cur_width() > max_width: nextaffects = [(next_score(x), i) for i, x in enumerate(adj_colstats) if x['offt'] > min_width(x)] if not nextaffects: break # All columns are as small as they can get. nextaffects.sort() chop = adj_colstats[nextaffects[0][1]] chop['chop_count'] += chop['counts'][chop['offt']] chop['chop_mass'] += chop['chop_count'] chop['offt'] -= 1
python
def adjust_widths(self, max_width, colstats): """ Adjust column widths based on the least negative affect it will have on the viewing experience. We take note of the total character mass that will be clipped when each column should be narrowed. The actual score for clipping is based on percentage of total character mass, which is the total number of characters in the column. """ adj_colstats = [] for x in colstats: if not x['preformatted']: adj_colstats.append(x) else: max_width -= x['offt'] next_score = lambda x: (x['counts'][x['offt']] + x['chop_mass'] + x['chop_count']) / x['total_mass'] cur_width = lambda: sum(x['offt'] for x in adj_colstats) min_width = lambda x: self.width_normalize( self.colspec[x['column']]['minwidth']) while cur_width() > max_width: nextaffects = [(next_score(x), i) for i, x in enumerate(adj_colstats) if x['offt'] > min_width(x)] if not nextaffects: break # All columns are as small as they can get. nextaffects.sort() chop = adj_colstats[nextaffects[0][1]] chop['chop_count'] += chop['counts'][chop['offt']] chop['chop_mass'] += chop['chop_count'] chop['offt'] -= 1
[ "def", "adjust_widths", "(", "self", ",", "max_width", ",", "colstats", ")", ":", "adj_colstats", "=", "[", "]", "for", "x", "in", "colstats", ":", "if", "not", "x", "[", "'preformatted'", "]", ":", "adj_colstats", ".", "append", "(", "x", ")", "else", ":", "max_width", "-=", "x", "[", "'offt'", "]", "next_score", "=", "lambda", "x", ":", "(", "x", "[", "'counts'", "]", "[", "x", "[", "'offt'", "]", "]", "+", "x", "[", "'chop_mass'", "]", "+", "x", "[", "'chop_count'", "]", ")", "/", "x", "[", "'total_mass'", "]", "cur_width", "=", "lambda", ":", "sum", "(", "x", "[", "'offt'", "]", "for", "x", "in", "adj_colstats", ")", "min_width", "=", "lambda", "x", ":", "self", ".", "width_normalize", "(", "self", ".", "colspec", "[", "x", "[", "'column'", "]", "]", "[", "'minwidth'", "]", ")", "while", "cur_width", "(", ")", ">", "max_width", ":", "nextaffects", "=", "[", "(", "next_score", "(", "x", ")", ",", "i", ")", "for", "i", ",", "x", "in", "enumerate", "(", "adj_colstats", ")", "if", "x", "[", "'offt'", "]", ">", "min_width", "(", "x", ")", "]", "if", "not", "nextaffects", ":", "break", "# All columns are as small as they can get.", "nextaffects", ".", "sort", "(", ")", "chop", "=", "adj_colstats", "[", "nextaffects", "[", "0", "]", "[", "1", "]", "]", "chop", "[", "'chop_count'", "]", "+=", "chop", "[", "'counts'", "]", "[", "chop", "[", "'offt'", "]", "]", "chop", "[", "'chop_mass'", "]", "+=", "chop", "[", "'chop_count'", "]", "chop", "[", "'offt'", "]", "-=", "1" ]
Adjust column widths based on the least negative affect it will have on the viewing experience. We take note of the total character mass that will be clipped when each column should be narrowed. The actual score for clipping is based on percentage of total character mass, which is the total number of characters in the column.
[ "Adjust", "column", "widths", "based", "on", "the", "least", "negative", "affect", "it", "will", "have", "on", "the", "viewing", "experience", ".", "We", "take", "note", "of", "the", "total", "character", "mass", "that", "will", "be", "clipped", "when", "each", "column", "should", "be", "narrowed", ".", "The", "actual", "score", "for", "clipping", "is", "based", "on", "percentage", "of", "total", "character", "mass", "which", "is", "the", "total", "number", "of", "characters", "in", "the", "column", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L841-L868
241,338
mayfield/shellish
shellish/layout/table.py
JSONTableRenderer.make_key
def make_key(self, value): """ Make camelCase variant of value. """ if value: parts = [self.key_filter.sub('', x) for x in self.key_split.split(value.lower())] key = parts[0] + ''.join(map(str.capitalize, parts[1:])) else: key = '' if key in self.seen_keys: i = 1 while '%s%d' % (key, i) in self.seen_keys: i += 1 key = '%s%d' % (key, i) self.seen_keys.add(key) return key
python
def make_key(self, value): """ Make camelCase variant of value. """ if value: parts = [self.key_filter.sub('', x) for x in self.key_split.split(value.lower())] key = parts[0] + ''.join(map(str.capitalize, parts[1:])) else: key = '' if key in self.seen_keys: i = 1 while '%s%d' % (key, i) in self.seen_keys: i += 1 key = '%s%d' % (key, i) self.seen_keys.add(key) return key
[ "def", "make_key", "(", "self", ",", "value", ")", ":", "if", "value", ":", "parts", "=", "[", "self", ".", "key_filter", ".", "sub", "(", "''", ",", "x", ")", "for", "x", "in", "self", ".", "key_split", ".", "split", "(", "value", ".", "lower", "(", ")", ")", "]", "key", "=", "parts", "[", "0", "]", "+", "''", ".", "join", "(", "map", "(", "str", ".", "capitalize", ",", "parts", "[", "1", ":", "]", ")", ")", "else", ":", "key", "=", "''", "if", "key", "in", "self", ".", "seen_keys", ":", "i", "=", "1", "while", "'%s%d'", "%", "(", "key", ",", "i", ")", "in", "self", ".", "seen_keys", ":", "i", "+=", "1", "key", "=", "'%s%d'", "%", "(", "key", ",", "i", ")", "self", ".", "seen_keys", ".", "add", "(", "key", ")", "return", "key" ]
Make camelCase variant of value.
[ "Make", "camelCase", "variant", "of", "value", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L921-L935
241,339
thisfred/val
val/_val.py
_build_type_validator
def _build_type_validator(value_type): """Build a validator that only checks the type of a value.""" def type_validator(data): """Validate instances of a particular type.""" if isinstance(data, value_type): return data raise NotValid('%r is not of type %r' % (data, value_type)) return type_validator
python
def _build_type_validator(value_type): """Build a validator that only checks the type of a value.""" def type_validator(data): """Validate instances of a particular type.""" if isinstance(data, value_type): return data raise NotValid('%r is not of type %r' % (data, value_type)) return type_validator
[ "def", "_build_type_validator", "(", "value_type", ")", ":", "def", "type_validator", "(", "data", ")", ":", "\"\"\"Validate instances of a particular type.\"\"\"", "if", "isinstance", "(", "data", ",", "value_type", ")", ":", "return", "data", "raise", "NotValid", "(", "'%r is not of type %r'", "%", "(", "data", ",", "value_type", ")", ")", "return", "type_validator" ]
Build a validator that only checks the type of a value.
[ "Build", "a", "validator", "that", "only", "checks", "the", "type", "of", "a", "value", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L25-L35
241,340
thisfred/val
val/_val.py
_build_static_validator
def _build_static_validator(exact_value): """Build a validator that checks if the data is equal to an exact value.""" def static_validator(data): """Validate by equality.""" if data == exact_value: return data raise NotValid('%r is not equal to %r' % (data, exact_value)) return static_validator
python
def _build_static_validator(exact_value): """Build a validator that checks if the data is equal to an exact value.""" def static_validator(data): """Validate by equality.""" if data == exact_value: return data raise NotValid('%r is not equal to %r' % (data, exact_value)) return static_validator
[ "def", "_build_static_validator", "(", "exact_value", ")", ":", "def", "static_validator", "(", "data", ")", ":", "\"\"\"Validate by equality.\"\"\"", "if", "data", "==", "exact_value", ":", "return", "data", "raise", "NotValid", "(", "'%r is not equal to %r'", "%", "(", "data", ",", "exact_value", ")", ")", "return", "static_validator" ]
Build a validator that checks if the data is equal to an exact value.
[ "Build", "a", "validator", "that", "checks", "if", "the", "data", "is", "equal", "to", "an", "exact", "value", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L38-L48
241,341
thisfred/val
val/_val.py
_build_iterable_validator
def _build_iterable_validator(iterable): """Build a validator from an iterable.""" sub_schemas = [parse_schema(s) for s in iterable] def item_validator(value): """Validate items in an iterable.""" for sub in sub_schemas: try: return sub(value) except NotValid: pass raise NotValid('%r invalidated by anything in %s.' % (value, iterable)) def iterable_validator(data): """Validate an iterable.""" if not type(data) is type(iterable): raise NotValid('%r is not of type %s' % (data, type(iterable))) return type(iterable)(item_validator(value) for value in data) return iterable_validator
python
def _build_iterable_validator(iterable): """Build a validator from an iterable.""" sub_schemas = [parse_schema(s) for s in iterable] def item_validator(value): """Validate items in an iterable.""" for sub in sub_schemas: try: return sub(value) except NotValid: pass raise NotValid('%r invalidated by anything in %s.' % (value, iterable)) def iterable_validator(data): """Validate an iterable.""" if not type(data) is type(iterable): raise NotValid('%r is not of type %s' % (data, type(iterable))) return type(iterable)(item_validator(value) for value in data) return iterable_validator
[ "def", "_build_iterable_validator", "(", "iterable", ")", ":", "sub_schemas", "=", "[", "parse_schema", "(", "s", ")", "for", "s", "in", "iterable", "]", "def", "item_validator", "(", "value", ")", ":", "\"\"\"Validate items in an iterable.\"\"\"", "for", "sub", "in", "sub_schemas", ":", "try", ":", "return", "sub", "(", "value", ")", "except", "NotValid", ":", "pass", "raise", "NotValid", "(", "'%r invalidated by anything in %s.'", "%", "(", "value", ",", "iterable", ")", ")", "def", "iterable_validator", "(", "data", ")", ":", "\"\"\"Validate an iterable.\"\"\"", "if", "not", "type", "(", "data", ")", "is", "type", "(", "iterable", ")", ":", "raise", "NotValid", "(", "'%r is not of type %s'", "%", "(", "data", ",", "type", "(", "iterable", ")", ")", ")", "return", "type", "(", "iterable", ")", "(", "item_validator", "(", "value", ")", "for", "value", "in", "data", ")", "return", "iterable_validator" ]
Build a validator from an iterable.
[ "Build", "a", "validator", "from", "an", "iterable", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L68-L90
241,342
thisfred/val
val/_val.py
_determine_keys
def _determine_keys(dictionary): """Determine the different kinds of keys.""" optional = {} defaults = {} mandatory = {} types = {} for key, value in dictionary.items(): if isinstance(key, Optional): optional[key.value] = parse_schema(value) if isinstance(value, BaseSchema) and\ value.default is not UNSPECIFIED: defaults[key.value] = (value.default, value.null_values) continue # pragma: nocover if type(key) is type: types[key] = parse_schema(value) continue mandatory[key] = parse_schema(value) return mandatory, optional, types, defaults
python
def _determine_keys(dictionary): """Determine the different kinds of keys.""" optional = {} defaults = {} mandatory = {} types = {} for key, value in dictionary.items(): if isinstance(key, Optional): optional[key.value] = parse_schema(value) if isinstance(value, BaseSchema) and\ value.default is not UNSPECIFIED: defaults[key.value] = (value.default, value.null_values) continue # pragma: nocover if type(key) is type: types[key] = parse_schema(value) continue mandatory[key] = parse_schema(value) return mandatory, optional, types, defaults
[ "def", "_determine_keys", "(", "dictionary", ")", ":", "optional", "=", "{", "}", "defaults", "=", "{", "}", "mandatory", "=", "{", "}", "types", "=", "{", "}", "for", "key", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "Optional", ")", ":", "optional", "[", "key", ".", "value", "]", "=", "parse_schema", "(", "value", ")", "if", "isinstance", "(", "value", ",", "BaseSchema", ")", "and", "value", ".", "default", "is", "not", "UNSPECIFIED", ":", "defaults", "[", "key", ".", "value", "]", "=", "(", "value", ".", "default", ",", "value", ".", "null_values", ")", "continue", "# pragma: nocover", "if", "type", "(", "key", ")", "is", "type", ":", "types", "[", "key", "]", "=", "parse_schema", "(", "value", ")", "continue", "mandatory", "[", "key", "]", "=", "parse_schema", "(", "value", ")", "return", "mandatory", ",", "optional", ",", "types", ",", "defaults" ]
Determine the different kinds of keys.
[ "Determine", "the", "different", "kinds", "of", "keys", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L93-L112
241,343
thisfred/val
val/_val.py
_validate_mandatory_keys
def _validate_mandatory_keys(mandatory, validated, data, to_validate): """Validate the manditory keys.""" errors = [] for key, sub_schema in mandatory.items(): if key not in data: errors.append('missing key: %r' % (key,)) continue try: validated[key] = sub_schema(data[key]) except NotValid as ex: errors.extend(['%r: %s' % (key, arg) for arg in ex.args]) to_validate.remove(key) return errors
python
def _validate_mandatory_keys(mandatory, validated, data, to_validate): """Validate the manditory keys.""" errors = [] for key, sub_schema in mandatory.items(): if key not in data: errors.append('missing key: %r' % (key,)) continue try: validated[key] = sub_schema(data[key]) except NotValid as ex: errors.extend(['%r: %s' % (key, arg) for arg in ex.args]) to_validate.remove(key) return errors
[ "def", "_validate_mandatory_keys", "(", "mandatory", ",", "validated", ",", "data", ",", "to_validate", ")", ":", "errors", "=", "[", "]", "for", "key", ",", "sub_schema", "in", "mandatory", ".", "items", "(", ")", ":", "if", "key", "not", "in", "data", ":", "errors", ".", "append", "(", "'missing key: %r'", "%", "(", "key", ",", ")", ")", "continue", "try", ":", "validated", "[", "key", "]", "=", "sub_schema", "(", "data", "[", "key", "]", ")", "except", "NotValid", "as", "ex", ":", "errors", ".", "extend", "(", "[", "'%r: %s'", "%", "(", "key", ",", "arg", ")", "for", "arg", "in", "ex", ".", "args", "]", ")", "to_validate", ".", "remove", "(", "key", ")", "return", "errors" ]
Validate the manditory keys.
[ "Validate", "the", "manditory", "keys", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L115-L127
241,344
thisfred/val
val/_val.py
_validate_optional_key
def _validate_optional_key(key, missing, value, validated, optional): """Validate an optional key.""" try: validated[key] = optional[key](value) except NotValid as ex: return ['%r: %s' % (key, arg) for arg in ex.args] if key in missing: missing.remove(key) return []
python
def _validate_optional_key(key, missing, value, validated, optional): """Validate an optional key.""" try: validated[key] = optional[key](value) except NotValid as ex: return ['%r: %s' % (key, arg) for arg in ex.args] if key in missing: missing.remove(key) return []
[ "def", "_validate_optional_key", "(", "key", ",", "missing", ",", "value", ",", "validated", ",", "optional", ")", ":", "try", ":", "validated", "[", "key", "]", "=", "optional", "[", "key", "]", "(", "value", ")", "except", "NotValid", "as", "ex", ":", "return", "[", "'%r: %s'", "%", "(", "key", ",", "arg", ")", "for", "arg", "in", "ex", ".", "args", "]", "if", "key", "in", "missing", ":", "missing", ".", "remove", "(", "key", ")", "return", "[", "]" ]
Validate an optional key.
[ "Validate", "an", "optional", "key", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L130-L138
241,345
thisfred/val
val/_val.py
_validate_type_key
def _validate_type_key(key, value, types, validated): """Validate a key's value by type.""" for key_schema, value_schema in types.items(): if not isinstance(key, key_schema): continue try: validated[key] = value_schema(value) except NotValid: continue else: return [] return ['%r: %r not matched' % (key, value)]
python
def _validate_type_key(key, value, types, validated): """Validate a key's value by type.""" for key_schema, value_schema in types.items(): if not isinstance(key, key_schema): continue try: validated[key] = value_schema(value) except NotValid: continue else: return [] return ['%r: %r not matched' % (key, value)]
[ "def", "_validate_type_key", "(", "key", ",", "value", ",", "types", ",", "validated", ")", ":", "for", "key_schema", ",", "value_schema", "in", "types", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "key", ",", "key_schema", ")", ":", "continue", "try", ":", "validated", "[", "key", "]", "=", "value_schema", "(", "value", ")", "except", "NotValid", ":", "continue", "else", ":", "return", "[", "]", "return", "[", "'%r: %r not matched'", "%", "(", "key", ",", "value", ")", "]" ]
Validate a key's value by type.
[ "Validate", "a", "key", "s", "value", "by", "type", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L141-L153
241,346
thisfred/val
val/_val.py
_validate_other_keys
def _validate_other_keys(optional, types, missing, validated, data, to_validate): """Validate the rest of the keys present in the data.""" errors = [] for key in to_validate: value = data[key] if key in optional: errors.extend( _validate_optional_key( key, missing, value, validated, optional)) continue errors.extend(_validate_type_key(key, value, types, validated)) return errors
python
def _validate_other_keys(optional, types, missing, validated, data, to_validate): """Validate the rest of the keys present in the data.""" errors = [] for key in to_validate: value = data[key] if key in optional: errors.extend( _validate_optional_key( key, missing, value, validated, optional)) continue errors.extend(_validate_type_key(key, value, types, validated)) return errors
[ "def", "_validate_other_keys", "(", "optional", ",", "types", ",", "missing", ",", "validated", ",", "data", ",", "to_validate", ")", ":", "errors", "=", "[", "]", "for", "key", "in", "to_validate", ":", "value", "=", "data", "[", "key", "]", "if", "key", "in", "optional", ":", "errors", ".", "extend", "(", "_validate_optional_key", "(", "key", ",", "missing", ",", "value", ",", "validated", ",", "optional", ")", ")", "continue", "errors", ".", "extend", "(", "_validate_type_key", "(", "key", ",", "value", ",", "types", ",", "validated", ")", ")", "return", "errors" ]
Validate the rest of the keys present in the data.
[ "Validate", "the", "rest", "of", "the", "keys", "present", "in", "the", "data", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L156-L168
241,347
thisfred/val
val/_val.py
_build_dict_validator
def _build_dict_validator(dictionary): """Build a validator from a dictionary.""" mandatory, optional, types, defaults = _determine_keys(dictionary) def dict_validator(data): """Validate dictionaries.""" missing = list(defaults.keys()) if not isinstance(data, dict): raise NotValid('%r is not of type dict' % (data,)) validated = {} to_validate = list(data.keys()) errors = _validate_mandatory_keys( mandatory, validated, data, to_validate) errors.extend( _validate_other_keys( optional, types, missing, validated, data, to_validate)) if errors: raise NotValid(*errors) for key in missing: validated[key] = defaults[key][0] return validated return dict_validator
python
def _build_dict_validator(dictionary): """Build a validator from a dictionary.""" mandatory, optional, types, defaults = _determine_keys(dictionary) def dict_validator(data): """Validate dictionaries.""" missing = list(defaults.keys()) if not isinstance(data, dict): raise NotValid('%r is not of type dict' % (data,)) validated = {} to_validate = list(data.keys()) errors = _validate_mandatory_keys( mandatory, validated, data, to_validate) errors.extend( _validate_other_keys( optional, types, missing, validated, data, to_validate)) if errors: raise NotValid(*errors) for key in missing: validated[key] = defaults[key][0] return validated return dict_validator
[ "def", "_build_dict_validator", "(", "dictionary", ")", ":", "mandatory", ",", "optional", ",", "types", ",", "defaults", "=", "_determine_keys", "(", "dictionary", ")", "def", "dict_validator", "(", "data", ")", ":", "\"\"\"Validate dictionaries.\"\"\"", "missing", "=", "list", "(", "defaults", ".", "keys", "(", ")", ")", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "NotValid", "(", "'%r is not of type dict'", "%", "(", "data", ",", ")", ")", "validated", "=", "{", "}", "to_validate", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "errors", "=", "_validate_mandatory_keys", "(", "mandatory", ",", "validated", ",", "data", ",", "to_validate", ")", "errors", ".", "extend", "(", "_validate_other_keys", "(", "optional", ",", "types", ",", "missing", ",", "validated", ",", "data", ",", "to_validate", ")", ")", "if", "errors", ":", "raise", "NotValid", "(", "*", "errors", ")", "for", "key", "in", "missing", ":", "validated", "[", "key", "]", "=", "defaults", "[", "key", "]", "[", "0", "]", "return", "validated", "return", "dict_validator" ]
Build a validator from a dictionary.
[ "Build", "a", "validator", "from", "a", "dictionary", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L171-L195
241,348
thisfred/val
val/_val.py
parse_schema
def parse_schema(schema): """Parse a val schema definition.""" if isinstance(schema, BaseSchema): return schema.validate if type(schema) is type: return _build_type_validator(schema) if isinstance(schema, dict): return _build_dict_validator(schema) if type(schema) in (list, tuple, set): return _build_iterable_validator(schema) if callable(schema): return _build_callable_validator(schema) return _build_static_validator(schema)
python
def parse_schema(schema): """Parse a val schema definition.""" if isinstance(schema, BaseSchema): return schema.validate if type(schema) is type: return _build_type_validator(schema) if isinstance(schema, dict): return _build_dict_validator(schema) if type(schema) in (list, tuple, set): return _build_iterable_validator(schema) if callable(schema): return _build_callable_validator(schema) return _build_static_validator(schema)
[ "def", "parse_schema", "(", "schema", ")", ":", "if", "isinstance", "(", "schema", ",", "BaseSchema", ")", ":", "return", "schema", ".", "validate", "if", "type", "(", "schema", ")", "is", "type", ":", "return", "_build_type_validator", "(", "schema", ")", "if", "isinstance", "(", "schema", ",", "dict", ")", ":", "return", "_build_dict_validator", "(", "schema", ")", "if", "type", "(", "schema", ")", "in", "(", "list", ",", "tuple", ",", "set", ")", ":", "return", "_build_iterable_validator", "(", "schema", ")", "if", "callable", "(", "schema", ")", ":", "return", "_build_callable_validator", "(", "schema", ")", "return", "_build_static_validator", "(", "schema", ")" ]
Parse a val schema definition.
[ "Parse", "a", "val", "schema", "definition", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L198-L216
241,349
thisfred/val
val/_val.py
BaseSchema.validate
def validate(self, data): """Validate data. Raise NotValid error for invalid data.""" validated = self._validated(data) errors = [] for validator in self.additional_validators: if not validator(validated): errors.append( "%s invalidated by '%s'" % ( validated, _get_repr(validator))) if errors: raise NotValid(*errors) if self.default is UNSPECIFIED: return validated if self.null_values is not UNSPECIFIED\ and validated in self.null_values: return self.default if validated is None: return self.default return validated
python
def validate(self, data): """Validate data. Raise NotValid error for invalid data.""" validated = self._validated(data) errors = [] for validator in self.additional_validators: if not validator(validated): errors.append( "%s invalidated by '%s'" % ( validated, _get_repr(validator))) if errors: raise NotValid(*errors) if self.default is UNSPECIFIED: return validated if self.null_values is not UNSPECIFIED\ and validated in self.null_values: return self.default if validated is None: return self.default return validated
[ "def", "validate", "(", "self", ",", "data", ")", ":", "validated", "=", "self", ".", "_validated", "(", "data", ")", "errors", "=", "[", "]", "for", "validator", "in", "self", ".", "additional_validators", ":", "if", "not", "validator", "(", "validated", ")", ":", "errors", ".", "append", "(", "\"%s invalidated by '%s'\"", "%", "(", "validated", ",", "_get_repr", "(", "validator", ")", ")", ")", "if", "errors", ":", "raise", "NotValid", "(", "*", "errors", ")", "if", "self", ".", "default", "is", "UNSPECIFIED", ":", "return", "validated", "if", "self", ".", "null_values", "is", "not", "UNSPECIFIED", "and", "validated", "in", "self", ".", "null_values", ":", "return", "self", ".", "default", "if", "validated", "is", "None", ":", "return", "self", ".", "default", "return", "validated" ]
Validate data. Raise NotValid error for invalid data.
[ "Validate", "data", ".", "Raise", "NotValid", "error", "for", "invalid", "data", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L243-L265
241,350
thisfred/val
val/_val.py
Or._validated
def _validated(self, data): """Validate data if any subschema validates it.""" errors = [] for sub in self.schemas: try: return sub(data) except NotValid as ex: errors.extend(ex.args) raise NotValid(' and '.join(errors))
python
def _validated(self, data): """Validate data if any subschema validates it.""" errors = [] for sub in self.schemas: try: return sub(data) except NotValid as ex: errors.extend(ex.args) raise NotValid(' and '.join(errors))
[ "def", "_validated", "(", "self", ",", "data", ")", ":", "errors", "=", "[", "]", "for", "sub", "in", "self", ".", "schemas", ":", "try", ":", "return", "sub", "(", "data", ")", "except", "NotValid", "as", "ex", ":", "errors", ".", "extend", "(", "ex", ".", "args", ")", "raise", "NotValid", "(", "' and '", ".", "join", "(", "errors", ")", ")" ]
Validate data if any subschema validates it.
[ "Validate", "data", "if", "any", "subschema", "validates", "it", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L310-L319
241,351
thisfred/val
val/_val.py
And._validated
def _validated(self, data): """Validate data if all subschemas validate it.""" for sub in self.schemas: data = sub(data) return data
python
def _validated(self, data): """Validate data if all subschemas validate it.""" for sub in self.schemas: data = sub(data) return data
[ "def", "_validated", "(", "self", ",", "data", ")", ":", "for", "sub", "in", "self", ".", "schemas", ":", "data", "=", "sub", "(", "data", ")", "return", "data" ]
Validate data if all subschemas validate it.
[ "Validate", "data", "if", "all", "subschemas", "validate", "it", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L339-L343
241,352
thisfred/val
val/_val.py
Convert._validated
def _validated(self, data): """Convert data or die trying.""" try: return self.convert(data) except (TypeError, ValueError) as ex: raise NotValid(*ex.args)
python
def _validated(self, data): """Convert data or die trying.""" try: return self.convert(data) except (TypeError, ValueError) as ex: raise NotValid(*ex.args)
[ "def", "_validated", "(", "self", ",", "data", ")", ":", "try", ":", "return", "self", ".", "convert", "(", "data", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "ex", ":", "raise", "NotValid", "(", "*", "ex", ".", "args", ")" ]
Convert data or die trying.
[ "Convert", "data", "or", "die", "trying", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L358-L363
241,353
thisfred/val
val/_val.py
Ordered._validated
def _validated(self, values): """Validate if the values are validated one by one in order.""" if self.length != len(values): raise NotValid( "%r does not have exactly %d values. (Got %d.)" % ( values, self.length, len(values))) return type(self.schemas)( self.schemas[i].validate(v) for i, v in enumerate(values))
python
def _validated(self, values): """Validate if the values are validated one by one in order.""" if self.length != len(values): raise NotValid( "%r does not have exactly %d values. (Got %d.)" % ( values, self.length, len(values))) return type(self.schemas)( self.schemas[i].validate(v) for i, v in enumerate(values))
[ "def", "_validated", "(", "self", ",", "values", ")", ":", "if", "self", ".", "length", "!=", "len", "(", "values", ")", ":", "raise", "NotValid", "(", "\"%r does not have exactly %d values. (Got %d.)\"", "%", "(", "values", ",", "self", ".", "length", ",", "len", "(", "values", ")", ")", ")", "return", "type", "(", "self", ".", "schemas", ")", "(", "self", ".", "schemas", "[", "i", "]", ".", "validate", "(", "v", ")", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ")" ]
Validate if the values are validated one by one in order.
[ "Validate", "if", "the", "values", "are", "validated", "one", "by", "one", "in", "order", "." ]
ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L380-L387
241,354
esterhui/pypu
pypu/service_facebook.py
service_facebook._connectToFB
def _connectToFB(self): """Establish the actual TCP connection to FB""" if self.connected_to_fb: logger.debug("Already connected to fb") return True logger.debug("Connecting to fb") token = facebook_login.get_fb_token() try: self.fb = facebook.GraphAPI(token) except: print("Couldn't connect to fb") return False self.connected_to_fb=True return True
python
def _connectToFB(self): """Establish the actual TCP connection to FB""" if self.connected_to_fb: logger.debug("Already connected to fb") return True logger.debug("Connecting to fb") token = facebook_login.get_fb_token() try: self.fb = facebook.GraphAPI(token) except: print("Couldn't connect to fb") return False self.connected_to_fb=True return True
[ "def", "_connectToFB", "(", "self", ")", ":", "if", "self", ".", "connected_to_fb", ":", "logger", ".", "debug", "(", "\"Already connected to fb\"", ")", "return", "True", "logger", ".", "debug", "(", "\"Connecting to fb\"", ")", "token", "=", "facebook_login", ".", "get_fb_token", "(", ")", "try", ":", "self", ".", "fb", "=", "facebook", ".", "GraphAPI", "(", "token", ")", "except", ":", "print", "(", "\"Couldn't connect to fb\"", ")", "return", "False", "self", ".", "connected_to_fb", "=", "True", "return", "True" ]
Establish the actual TCP connection to FB
[ "Establish", "the", "actual", "TCP", "connection", "to", "FB" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L45-L64
241,355
esterhui/pypu
pypu/service_facebook.py
service_facebook.KnowsFile
def KnowsFile(self,filename): """Looks at extension and decides if it knows how to manage this file""" if self._isMediaFile(filename) or self._isConfigFile(filename): return True return False
python
def KnowsFile(self,filename): """Looks at extension and decides if it knows how to manage this file""" if self._isMediaFile(filename) or self._isConfigFile(filename): return True return False
[ "def", "KnowsFile", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "_isMediaFile", "(", "filename", ")", "or", "self", ".", "_isConfigFile", "(", "filename", ")", ":", "return", "True", "return", "False" ]
Looks at extension and decides if it knows how to manage this file
[ "Looks", "at", "extension", "and", "decides", "if", "it", "knows", "how", "to", "manage", "this", "file" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L86-L91
241,356
esterhui/pypu
pypu/service_facebook.py
service_facebook.Remove
def Remove(self,directory,filename): """Deletes files from fb""" if self._isMediaFile(filename): return self._remove_media(directory,filename) elif self._isConfigFile(filename): return True print "Not handled!" return False
python
def Remove(self,directory,filename): """Deletes files from fb""" if self._isMediaFile(filename): return self._remove_media(directory,filename) elif self._isConfigFile(filename): return True print "Not handled!" return False
[ "def", "Remove", "(", "self", ",", "directory", ",", "filename", ")", ":", "if", "self", ".", "_isMediaFile", "(", "filename", ")", ":", "return", "self", ".", "_remove_media", "(", "directory", ",", "filename", ")", "elif", "self", ".", "_isConfigFile", "(", "filename", ")", ":", "return", "True", "print", "\"Not handled!\"", "return", "False" ]
Deletes files from fb
[ "Deletes", "files", "from", "fb" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L103-L111
241,357
esterhui/pypu
pypu/service_facebook.py
service_facebook._update_config
def _update_config(self,directory,filename): """Manages FB config files""" basefilename=os.path.splitext(filename)[0] ext=os.path.splitext(filename)[1].lower() #if filename==LOCATION_FILE: #return self._update_config_location(directory) #FIXME #elif filename==TAG_FILE: #return self._update_config_tags(directory) if filename==SET_FILE: print("%s - Moving photos to album"%(filename)) return self._upload_media(directory,movealbum_request=True) elif filename==MEGAPIXEL_FILE: print("%s - Resizing photos"%(filename)) return self._upload_media(directory,resize_request=True) elif ext in self.FB_META_EXTENSIONS: print("%s - Changing photo title"%(basefilename)) return self._upload_media(directory,basefilename,changetitle_request=True) return False
python
def _update_config(self,directory,filename): """Manages FB config files""" basefilename=os.path.splitext(filename)[0] ext=os.path.splitext(filename)[1].lower() #if filename==LOCATION_FILE: #return self._update_config_location(directory) #FIXME #elif filename==TAG_FILE: #return self._update_config_tags(directory) if filename==SET_FILE: print("%s - Moving photos to album"%(filename)) return self._upload_media(directory,movealbum_request=True) elif filename==MEGAPIXEL_FILE: print("%s - Resizing photos"%(filename)) return self._upload_media(directory,resize_request=True) elif ext in self.FB_META_EXTENSIONS: print("%s - Changing photo title"%(basefilename)) return self._upload_media(directory,basefilename,changetitle_request=True) return False
[ "def", "_update_config", "(", "self", ",", "directory", ",", "filename", ")", ":", "basefilename", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", ".", "lower", "(", ")", "#if filename==LOCATION_FILE:", "#return self._update_config_location(directory)", "#FIXME", "#elif filename==TAG_FILE:", "#return self._update_config_tags(directory)", "if", "filename", "==", "SET_FILE", ":", "print", "(", "\"%s - Moving photos to album\"", "%", "(", "filename", ")", ")", "return", "self", ".", "_upload_media", "(", "directory", ",", "movealbum_request", "=", "True", ")", "elif", "filename", "==", "MEGAPIXEL_FILE", ":", "print", "(", "\"%s - Resizing photos\"", "%", "(", "filename", ")", ")", "return", "self", ".", "_upload_media", "(", "directory", ",", "resize_request", "=", "True", ")", "elif", "ext", "in", "self", ".", "FB_META_EXTENSIONS", ":", "print", "(", "\"%s - Changing photo title\"", "%", "(", "basefilename", ")", ")", "return", "self", ".", "_upload_media", "(", "directory", ",", "basefilename", ",", "changetitle_request", "=", "True", ")", "return", "False" ]
Manages FB config files
[ "Manages", "FB", "config", "files" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L113-L132
241,358
esterhui/pypu
pypu/service_facebook.py
service_facebook._get_title
def _get_title(self,directory,filename): """Loads image title if any""" # =========== LOAD TITLE ======== fullfile=os.path.join(directory,filename+'.title') try: logger.debug('trying to open [%s]'%(fullfile)) _title=(open(fullfile).readline().strip()) logger.debug("_updatemeta: %s - title is '%s'",filename,_title) except: _title='' return _title
python
def _get_title(self,directory,filename): """Loads image title if any""" # =========== LOAD TITLE ======== fullfile=os.path.join(directory,filename+'.title') try: logger.debug('trying to open [%s]'%(fullfile)) _title=(open(fullfile).readline().strip()) logger.debug("_updatemeta: %s - title is '%s'",filename,_title) except: _title='' return _title
[ "def", "_get_title", "(", "self", ",", "directory", ",", "filename", ")", ":", "# =========== LOAD TITLE ========", "fullfile", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", "+", "'.title'", ")", "try", ":", "logger", ".", "debug", "(", "'trying to open [%s]'", "%", "(", "fullfile", ")", ")", "_title", "=", "(", "open", "(", "fullfile", ")", ".", "readline", "(", ")", ".", "strip", "(", ")", ")", "logger", ".", "debug", "(", "\"_updatemeta: %s - title is '%s'\"", ",", "filename", ",", "_title", ")", "except", ":", "_title", "=", "''", "return", "_title" ]
Loads image title if any
[ "Loads", "image", "title", "if", "any" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L134-L145
241,359
esterhui/pypu
pypu/service_facebook.py
service_facebook._load_megapixels
def _load_megapixels(self,directory): """Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found """ #FIXME: should check if DB tracking file before using it fullfile=os.path.join(directory,MEGAPIXEL_FILE) try: mp=float(open(fullfile).readline()) logger.debug("_load_megapixel: MP from file is %f",mp) except: logger.warning("Couldn't open image size file in %s, not scaling images"\ %(directory)) return None return mp
python
def _load_megapixels(self,directory): """Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found """ #FIXME: should check if DB tracking file before using it fullfile=os.path.join(directory,MEGAPIXEL_FILE) try: mp=float(open(fullfile).readline()) logger.debug("_load_megapixel: MP from file is %f",mp) except: logger.warning("Couldn't open image size file in %s, not scaling images"\ %(directory)) return None return mp
[ "def", "_load_megapixels", "(", "self", ",", "directory", ")", ":", "#FIXME: should check if DB tracking file before using it", "fullfile", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "MEGAPIXEL_FILE", ")", "try", ":", "mp", "=", "float", "(", "open", "(", "fullfile", ")", ".", "readline", "(", ")", ")", "logger", ".", "debug", "(", "\"_load_megapixel: MP from file is %f\"", ",", "mp", ")", "except", ":", "logger", ".", "warning", "(", "\"Couldn't open image size file in %s, not scaling images\"", "%", "(", "directory", ")", ")", "return", "None", "return", "mp" ]
Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found
[ "Opens", "megapixel", "file", "if", "contains", "3", ".", "5", "for", "instance", "will", "scale", "all", "uploaded", "photos", "in", "directory", "this", "this", "size", "the", "original", "photo", "is", "untouched", ".", "Returns", "None", "if", "file", "not", "found" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L147-L165
241,360
esterhui/pypu
pypu/service_facebook.py
service_facebook._load_sets
def _load_sets(self,directory): """Loads sets from set file and return as list of strings """ # --- Read sets out of file _sets=[] try: fullfile=os.path.join(directory,SET_FILE) lsets=open(fullfile).readline().split(',') for tag in lsets: _sets.append(tag.strip()) except: logger.error("No sets found in %s, FB needs an album name (%s)"\ %(directory,SET_FILE)) sys.exit(1) return _sets
python
def _load_sets(self,directory): """Loads sets from set file and return as list of strings """ # --- Read sets out of file _sets=[] try: fullfile=os.path.join(directory,SET_FILE) lsets=open(fullfile).readline().split(',') for tag in lsets: _sets.append(tag.strip()) except: logger.error("No sets found in %s, FB needs an album name (%s)"\ %(directory,SET_FILE)) sys.exit(1) return _sets
[ "def", "_load_sets", "(", "self", ",", "directory", ")", ":", "# --- Read sets out of file", "_sets", "=", "[", "]", "try", ":", "fullfile", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "SET_FILE", ")", "lsets", "=", "open", "(", "fullfile", ")", ".", "readline", "(", ")", ".", "split", "(", "','", ")", "for", "tag", "in", "lsets", ":", "_sets", ".", "append", "(", "tag", ".", "strip", "(", ")", ")", "except", ":", "logger", ".", "error", "(", "\"No sets found in %s, FB needs an album name (%s)\"", "%", "(", "directory", ",", "SET_FILE", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "_sets" ]
Loads sets from set file and return as list of strings
[ "Loads", "sets", "from", "set", "file", "and", "return", "as", "list", "of", "strings" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L167-L183
241,361
esterhui/pypu
pypu/service_facebook.py
service_facebook._get_album
def _get_album(self,directory): """ Loads set name from SET_FILE, looks up album_id on fb, it it doesn't exists, creates album. Returns album id and album name """ if not self._connectToFB(): print("%s - Couldn't connect to fb"%(directory)) return None,None # Load sets from SET_FILE _sets=self._load_sets(directory) # Only grab the first set, FB supports only one set per photo myset=_sets[0] logger.debug("Getting album id for %s"%(myset)) # Connect to fb and get dicionary of photosets psets=self._getphotosets() # create if it doesn't exist if myset not in psets: logger.info('set [%s] not in fb sets, will create set'%(myset)) self._createphotoset(myset) # Now reaload photosets from fb psets=self._getphotosets() # Return the album id, album name return psets[myset]['id'],myset
python
def _get_album(self,directory): """ Loads set name from SET_FILE, looks up album_id on fb, it it doesn't exists, creates album. Returns album id and album name """ if not self._connectToFB(): print("%s - Couldn't connect to fb"%(directory)) return None,None # Load sets from SET_FILE _sets=self._load_sets(directory) # Only grab the first set, FB supports only one set per photo myset=_sets[0] logger.debug("Getting album id for %s"%(myset)) # Connect to fb and get dicionary of photosets psets=self._getphotosets() # create if it doesn't exist if myset not in psets: logger.info('set [%s] not in fb sets, will create set'%(myset)) self._createphotoset(myset) # Now reaload photosets from fb psets=self._getphotosets() # Return the album id, album name return psets[myset]['id'],myset
[ "def", "_get_album", "(", "self", ",", "directory", ")", ":", "if", "not", "self", ".", "_connectToFB", "(", ")", ":", "print", "(", "\"%s - Couldn't connect to fb\"", "%", "(", "directory", ")", ")", "return", "None", ",", "None", "# Load sets from SET_FILE", "_sets", "=", "self", ".", "_load_sets", "(", "directory", ")", "# Only grab the first set, FB supports only one set per photo", "myset", "=", "_sets", "[", "0", "]", "logger", ".", "debug", "(", "\"Getting album id for %s\"", "%", "(", "myset", ")", ")", "# Connect to fb and get dicionary of photosets", "psets", "=", "self", ".", "_getphotosets", "(", ")", "# create if it doesn't exist", "if", "myset", "not", "in", "psets", ":", "logger", ".", "info", "(", "'set [%s] not in fb sets, will create set'", "%", "(", "myset", ")", ")", "self", ".", "_createphotoset", "(", "myset", ")", "# Now reaload photosets from fb", "psets", "=", "self", ".", "_getphotosets", "(", ")", "# Return the album id, album name", "return", "psets", "[", "myset", "]", "[", "'id'", "]", ",", "myset" ]
Loads set name from SET_FILE, looks up album_id on fb, it it doesn't exists, creates album. Returns album id and album name
[ "Loads", "set", "name", "from", "SET_FILE", "looks", "up", "album_id", "on", "fb", "it", "it", "doesn", "t", "exists", "creates", "album", ".", "Returns", "album", "id", "and", "album", "name" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L201-L230
241,362
esterhui/pypu
pypu/service_facebook.py
service_facebook._getphoto_originalsize
def _getphoto_originalsize(self,pid): """Asks fb for photo original size returns tuple with width,height """ logger.debug('%s - Getting original size from fb'%(pid)) i=self.fb.get_object(pid) width=i['images'][0]['width'] height=i['images'][0]['height'] return (width,height)
python
def _getphoto_originalsize(self,pid): """Asks fb for photo original size returns tuple with width,height """ logger.debug('%s - Getting original size from fb'%(pid)) i=self.fb.get_object(pid) width=i['images'][0]['width'] height=i['images'][0]['height'] return (width,height)
[ "def", "_getphoto_originalsize", "(", "self", ",", "pid", ")", ":", "logger", ".", "debug", "(", "'%s - Getting original size from fb'", "%", "(", "pid", ")", ")", "i", "=", "self", ".", "fb", ".", "get_object", "(", "pid", ")", "width", "=", "i", "[", "'images'", "]", "[", "0", "]", "[", "'width'", "]", "height", "=", "i", "[", "'images'", "]", "[", "0", "]", "[", "'height'", "]", "return", "(", "width", ",", "height", ")" ]
Asks fb for photo original size returns tuple with width,height
[ "Asks", "fb", "for", "photo", "original", "size", "returns", "tuple", "with", "width", "height" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L251-L259
241,363
esterhui/pypu
pypu/service_facebook.py
service_facebook._getphoto_location
def _getphoto_location(self,pid): """Asks fb for photo location information returns tuple with lat,lon,accuracy """ logger.debug('%s - Getting location from fb'%(pid)) lat=None lon=None accuracy=None resp=self.fb.photos_geo_getLocation(photo_id=pid) if resp.attrib['stat']!='ok': logger.error("%s - fb: photos_geo_getLocation failed with status: %s",\ resp.attrib['stat']); return (None,None,None) for location in resp.find('photo'): lat=location.attrib['latitude'] lon=location.attrib['longitude'] accuracy=location.attrib['accuracy'] return (lat,lon,accuracy)
python
def _getphoto_location(self,pid): """Asks fb for photo location information returns tuple with lat,lon,accuracy """ logger.debug('%s - Getting location from fb'%(pid)) lat=None lon=None accuracy=None resp=self.fb.photos_geo_getLocation(photo_id=pid) if resp.attrib['stat']!='ok': logger.error("%s - fb: photos_geo_getLocation failed with status: %s",\ resp.attrib['stat']); return (None,None,None) for location in resp.find('photo'): lat=location.attrib['latitude'] lon=location.attrib['longitude'] accuracy=location.attrib['accuracy'] return (lat,lon,accuracy)
[ "def", "_getphoto_location", "(", "self", ",", "pid", ")", ":", "logger", ".", "debug", "(", "'%s - Getting location from fb'", "%", "(", "pid", ")", ")", "lat", "=", "None", "lon", "=", "None", "accuracy", "=", "None", "resp", "=", "self", ".", "fb", ".", "photos_geo_getLocation", "(", "photo_id", "=", "pid", ")", "if", "resp", ".", "attrib", "[", "'stat'", "]", "!=", "'ok'", ":", "logger", ".", "error", "(", "\"%s - fb: photos_geo_getLocation failed with status: %s\"", ",", "resp", ".", "attrib", "[", "'stat'", "]", ")", "return", "(", "None", ",", "None", ",", "None", ")", "for", "location", "in", "resp", ".", "find", "(", "'photo'", ")", ":", "lat", "=", "location", ".", "attrib", "[", "'latitude'", "]", "lon", "=", "location", ".", "attrib", "[", "'longitude'", "]", "accuracy", "=", "location", ".", "attrib", "[", "'accuracy'", "]", "return", "(", "lat", ",", "lon", ",", "accuracy", ")" ]
Asks fb for photo location information returns tuple with lat,lon,accuracy
[ "Asks", "fb", "for", "photo", "location", "information", "returns", "tuple", "with", "lat", "lon", "accuracy" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L262-L285
241,364
esterhui/pypu
pypu/service_facebook.py
service_facebook._remove_media
def _remove_media(self,directory,files=None): """Removes specified files from fb""" # Connect if we aren't already if not self._connectToFB(): logger.error("%s - Couldn't connect to fb") return False db=self._loadDB(directory) # If no files given, use files from DB in dir if not files: files=db.keys() #If only one file given, make it a list if isinstance(files,basestring): files=[files] for fn in files: print("%s - Deleting from fb [local copy intact]"%(fn)) try: pid=db[fn]['photoid'] except: logger.debug("%s - Was never in fb DB"%(fn)) continue try: self.fb.delete_object(pid) except facebook.GraphAPIError as e: print("%s - fb: delete failed with status: %s:%s"\ %(fn,e.type,e.message)) return False logger.debug('Removing %s from fb DB'%(fn)) del db[fn] self._saveDB(directory,db) return True
python
def _remove_media(self,directory,files=None): """Removes specified files from fb""" # Connect if we aren't already if not self._connectToFB(): logger.error("%s - Couldn't connect to fb") return False db=self._loadDB(directory) # If no files given, use files from DB in dir if not files: files=db.keys() #If only one file given, make it a list if isinstance(files,basestring): files=[files] for fn in files: print("%s - Deleting from fb [local copy intact]"%(fn)) try: pid=db[fn]['photoid'] except: logger.debug("%s - Was never in fb DB"%(fn)) continue try: self.fb.delete_object(pid) except facebook.GraphAPIError as e: print("%s - fb: delete failed with status: %s:%s"\ %(fn,e.type,e.message)) return False logger.debug('Removing %s from fb DB'%(fn)) del db[fn] self._saveDB(directory,db) return True
[ "def", "_remove_media", "(", "self", ",", "directory", ",", "files", "=", "None", ")", ":", "# Connect if we aren't already", "if", "not", "self", ".", "_connectToFB", "(", ")", ":", "logger", ".", "error", "(", "\"%s - Couldn't connect to fb\"", ")", "return", "False", "db", "=", "self", ".", "_loadDB", "(", "directory", ")", "# If no files given, use files from DB in dir", "if", "not", "files", ":", "files", "=", "db", ".", "keys", "(", ")", "#If only one file given, make it a list", "if", "isinstance", "(", "files", ",", "basestring", ")", ":", "files", "=", "[", "files", "]", "for", "fn", "in", "files", ":", "print", "(", "\"%s - Deleting from fb [local copy intact]\"", "%", "(", "fn", ")", ")", "try", ":", "pid", "=", "db", "[", "fn", "]", "[", "'photoid'", "]", "except", ":", "logger", ".", "debug", "(", "\"%s - Was never in fb DB\"", "%", "(", "fn", ")", ")", "continue", "try", ":", "self", ".", "fb", ".", "delete_object", "(", "pid", ")", "except", "facebook", ".", "GraphAPIError", "as", "e", ":", "print", "(", "\"%s - fb: delete failed with status: %s:%s\"", "%", "(", "fn", ",", "e", ".", "type", ",", "e", ".", "message", ")", ")", "return", "False", "logger", ".", "debug", "(", "'Removing %s from fb DB'", "%", "(", "fn", ")", ")", "del", "db", "[", "fn", "]", "self", ".", "_saveDB", "(", "directory", ",", "db", ")", "return", "True" ]
Removes specified files from fb
[ "Removes", "specified", "files", "from", "fb" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L287-L322
241,365
esterhui/pypu
pypu/service_facebook.py
service_facebook._upload_media
def _upload_media(self,directory,files=None,resize_request=None, \ movealbum_request=None,changetitle_request=None): """Uploads media file to FB, returns True if uploaded successfully, Will replace if already uploaded, If megapixels > 0, will scale photos before upload If no filename given, will go through all files in DB""" # Connect if we aren't already if not self._connectToFB(): logger.error("%s - Couldn't connect to fb") return False _megapixels=self._load_megapixels(directory) # Get an album ID (create album if not exists) _album_id,_album_name=self._get_album(directory) if not _megapixels: mpstring="original" else: mpstring=("%0.1f MP"%(_megapixels)) # If no files given, use files from DB in dir if not files: db=self._loadDB(directory) files=db.keys() #If only one file given, make it a list if isinstance(files,basestring): files=[files] files.sort() for filename in files: # Get title here if any title=self._get_title(directory,filename) if title: print("%s - Uploading to fb, album[%s] size=%s title=%s"\ %(filename,_album_name,mpstring,title)) else: print("%s - Uploading to fb, album[%s] size=%s"\ %(filename,_album_name,mpstring)) status=self._upload_or_replace_fb(directory,filename, \ _album_id, _megapixels,resize_request,movealbum_request,\ changetitle_request,title) if not status: return False return True
python
def _upload_media(self,directory,files=None,resize_request=None, \ movealbum_request=None,changetitle_request=None): """Uploads media file to FB, returns True if uploaded successfully, Will replace if already uploaded, If megapixels > 0, will scale photos before upload If no filename given, will go through all files in DB""" # Connect if we aren't already if not self._connectToFB(): logger.error("%s - Couldn't connect to fb") return False _megapixels=self._load_megapixels(directory) # Get an album ID (create album if not exists) _album_id,_album_name=self._get_album(directory) if not _megapixels: mpstring="original" else: mpstring=("%0.1f MP"%(_megapixels)) # If no files given, use files from DB in dir if not files: db=self._loadDB(directory) files=db.keys() #If only one file given, make it a list if isinstance(files,basestring): files=[files] files.sort() for filename in files: # Get title here if any title=self._get_title(directory,filename) if title: print("%s - Uploading to fb, album[%s] size=%s title=%s"\ %(filename,_album_name,mpstring,title)) else: print("%s - Uploading to fb, album[%s] size=%s"\ %(filename,_album_name,mpstring)) status=self._upload_or_replace_fb(directory,filename, \ _album_id, _megapixels,resize_request,movealbum_request,\ changetitle_request,title) if not status: return False return True
[ "def", "_upload_media", "(", "self", ",", "directory", ",", "files", "=", "None", ",", "resize_request", "=", "None", ",", "movealbum_request", "=", "None", ",", "changetitle_request", "=", "None", ")", ":", "# Connect if we aren't already", "if", "not", "self", ".", "_connectToFB", "(", ")", ":", "logger", ".", "error", "(", "\"%s - Couldn't connect to fb\"", ")", "return", "False", "_megapixels", "=", "self", ".", "_load_megapixels", "(", "directory", ")", "# Get an album ID (create album if not exists)", "_album_id", ",", "_album_name", "=", "self", ".", "_get_album", "(", "directory", ")", "if", "not", "_megapixels", ":", "mpstring", "=", "\"original\"", "else", ":", "mpstring", "=", "(", "\"%0.1f MP\"", "%", "(", "_megapixels", ")", ")", "# If no files given, use files from DB in dir", "if", "not", "files", ":", "db", "=", "self", ".", "_loadDB", "(", "directory", ")", "files", "=", "db", ".", "keys", "(", ")", "#If only one file given, make it a list", "if", "isinstance", "(", "files", ",", "basestring", ")", ":", "files", "=", "[", "files", "]", "files", ".", "sort", "(", ")", "for", "filename", "in", "files", ":", "# Get title here if any", "title", "=", "self", ".", "_get_title", "(", "directory", ",", "filename", ")", "if", "title", ":", "print", "(", "\"%s - Uploading to fb, album[%s] size=%s title=%s\"", "%", "(", "filename", ",", "_album_name", ",", "mpstring", ",", "title", ")", ")", "else", ":", "print", "(", "\"%s - Uploading to fb, album[%s] size=%s\"", "%", "(", "filename", ",", "_album_name", ",", "mpstring", ")", ")", "status", "=", "self", ".", "_upload_or_replace_fb", "(", "directory", ",", "filename", ",", "_album_id", ",", "_megapixels", ",", "resize_request", ",", "movealbum_request", ",", "changetitle_request", ",", "title", ")", "if", "not", "status", ":", "return", "False", "return", "True" ]
Uploads media file to FB, returns True if uploaded successfully, Will replace if already uploaded, If megapixels > 0, will scale photos before upload If no filename given, will go through all files in DB
[ "Uploads", "media", "file", "to", "FB", "returns", "True", "if", "uploaded", "successfully", "Will", "replace", "if", "already", "uploaded", "If", "megapixels", ">", "0", "will", "scale", "photos", "before", "upload", "If", "no", "filename", "given", "will", "go", "through", "all", "files", "in", "DB" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L324-L375
241,366
esterhui/pypu
pypu/service_facebook.py
service_facebook._title_uptodate
def _title_uptodate(self,fullfile,pid,_title): """Check fb photo title against provided title, returns true if they match""" i=self.fb.get_object(pid) if i.has_key('name'): if _title == i['name']: return True return False
python
def _title_uptodate(self,fullfile,pid,_title): """Check fb photo title against provided title, returns true if they match""" i=self.fb.get_object(pid) if i.has_key('name'): if _title == i['name']: return True return False
[ "def", "_title_uptodate", "(", "self", ",", "fullfile", ",", "pid", ",", "_title", ")", ":", "i", "=", "self", ".", "fb", ".", "get_object", "(", "pid", ")", "if", "i", ".", "has_key", "(", "'name'", ")", ":", "if", "_title", "==", "i", "[", "'name'", "]", ":", "return", "True", "return", "False" ]
Check fb photo title against provided title, returns true if they match
[ "Check", "fb", "photo", "title", "against", "provided", "title", "returns", "true", "if", "they", "match" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L468-L476
241,367
esterhui/pypu
pypu/service_facebook.py
service_facebook._already_in_album
def _already_in_album(self,fullfile,pid,album_id): """Check to see if photo with given pid is already in the album_id, returns true if this is the case """ logger.debug("fb: Checking if pid %s in album %s",pid,album_id) pid_in_album=[] # Get all photos in album photos = self.fb.get_connections(str(album_id),"photos")['data'] # Get all pids in fb album for photo in photos: pid_in_album.append(photo['id']) logger.debug("fb: album %d contains these photos: %s",album_id,pid_in_album) # Check if our pid matches if pid in pid_in_album: return True return False
python
def _already_in_album(self,fullfile,pid,album_id): """Check to see if photo with given pid is already in the album_id, returns true if this is the case """ logger.debug("fb: Checking if pid %s in album %s",pid,album_id) pid_in_album=[] # Get all photos in album photos = self.fb.get_connections(str(album_id),"photos")['data'] # Get all pids in fb album for photo in photos: pid_in_album.append(photo['id']) logger.debug("fb: album %d contains these photos: %s",album_id,pid_in_album) # Check if our pid matches if pid in pid_in_album: return True return False
[ "def", "_already_in_album", "(", "self", ",", "fullfile", ",", "pid", ",", "album_id", ")", ":", "logger", ".", "debug", "(", "\"fb: Checking if pid %s in album %s\"", ",", "pid", ",", "album_id", ")", "pid_in_album", "=", "[", "]", "# Get all photos in album", "photos", "=", "self", ".", "fb", ".", "get_connections", "(", "str", "(", "album_id", ")", ",", "\"photos\"", ")", "[", "'data'", "]", "# Get all pids in fb album", "for", "photo", "in", "photos", ":", "pid_in_album", ".", "append", "(", "photo", "[", "'id'", "]", ")", "logger", ".", "debug", "(", "\"fb: album %d contains these photos: %s\"", ",", "album_id", ",", "pid_in_album", ")", "# Check if our pid matches", "if", "pid", "in", "pid_in_album", ":", "return", "True", "return", "False" ]
Check to see if photo with given pid is already in the album_id, returns true if this is the case
[ "Check", "to", "see", "if", "photo", "with", "given", "pid", "is", "already", "in", "the", "album_id", "returns", "true", "if", "this", "is", "the", "case" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L478-L498
241,368
esterhui/pypu
pypu/service_facebook.py
service_facebook.PrintSets
def PrintSets(self): """Prints set name and number of photos in set""" sets=self._getphotosets() for setname in sets: print("%s [%d]"%(setname,sets[setname]['number_photos']))
python
def PrintSets(self): """Prints set name and number of photos in set""" sets=self._getphotosets() for setname in sets: print("%s [%d]"%(setname,sets[setname]['number_photos']))
[ "def", "PrintSets", "(", "self", ")", ":", "sets", "=", "self", ".", "_getphotosets", "(", ")", "for", "setname", "in", "sets", ":", "print", "(", "\"%s [%d]\"", "%", "(", "setname", ",", "sets", "[", "setname", "]", "[", "'number_photos'", "]", ")", ")" ]
Prints set name and number of photos in set
[ "Prints", "set", "name", "and", "number", "of", "photos", "in", "set" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L550-L554
241,369
JNRowe/jnrbase
jnrbase/context.py
chdir
def chdir(__path: str) -> ContextManager: """Context handler to temporarily switch directories. Args: __path: Directory to change to Yields: Execution context in ``path`` """ old = os.getcwd() try: os.chdir(__path) yield finally: os.chdir(old)
python
def chdir(__path: str) -> ContextManager: """Context handler to temporarily switch directories. Args: __path: Directory to change to Yields: Execution context in ``path`` """ old = os.getcwd() try: os.chdir(__path) yield finally: os.chdir(old)
[ "def", "chdir", "(", "__path", ":", "str", ")", "->", "ContextManager", ":", "old", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "__path", ")", "yield", "finally", ":", "os", ".", "chdir", "(", "old", ")" ]
Context handler to temporarily switch directories. Args: __path: Directory to change to Yields: Execution context in ``path``
[ "Context", "handler", "to", "temporarily", "switch", "directories", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/context.py#L27-L41
241,370
JNRowe/jnrbase
jnrbase/context.py
env
def env(**kwargs: Union[Dict[str, str], None]) -> ContextManager: """Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment """ old = os.environ.copy() try: os.environ.clear() # This apparent duplication is because ``putenv`` doesn’t update # ``os.environ``, and ``os.environ`` changes aren’t propagated to # subprocesses. for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value # NOQA: B003 os.putenv(key, value) yield finally: os.environ.clear() for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value)
python
def env(**kwargs: Union[Dict[str, str], None]) -> ContextManager: """Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment """ old = os.environ.copy() try: os.environ.clear() # This apparent duplication is because ``putenv`` doesn’t update # ``os.environ``, and ``os.environ`` changes aren’t propagated to # subprocesses. for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value # NOQA: B003 os.putenv(key, value) yield finally: os.environ.clear() for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value)
[ "def", "env", "(", "*", "*", "kwargs", ":", "Union", "[", "Dict", "[", "str", ",", "str", "]", ",", "None", "]", ")", "->", "ContextManager", ":", "old", "=", "os", ".", "environ", ".", "copy", "(", ")", "try", ":", "os", ".", "environ", ".", "clear", "(", ")", "# This apparent duplication is because ``putenv`` doesn’t update", "# ``os.environ``, and ``os.environ`` changes aren’t propagated to", "# subprocesses.", "for", "key", ",", "value", "in", "old", ".", "items", "(", ")", ":", "os", ".", "environ", "[", "key", "]", "=", "value", "# NOQA: B003", "os", ".", "putenv", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "del", "os", ".", "environ", "[", "key", "]", "else", ":", "os", ".", "environ", "[", "key", "]", "=", "value", "# NOQA: B003", "os", ".", "putenv", "(", "key", ",", "value", ")", "yield", "finally", ":", "os", ".", "environ", ".", "clear", "(", ")", "for", "key", ",", "value", "in", "old", ".", "items", "(", ")", ":", "os", ".", "environ", "[", "key", "]", "=", "value", "# NOQA: B003", "os", ".", "putenv", "(", "key", ",", "value", ")" ]
Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment
[ "Context", "handler", "to", "temporarily", "alter", "environment", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/context.py#L45-L77
241,371
stevepeak/stuffed
stuffed/address.py
Address.distance_to
def distance_to(self, address, measure="Miles", httpclient=None): """Distance to another address """ if isinstance(address, Address) and self.latlng and address.latlng: lat1, lon1 = map(float, self.latlng) lat2, lon2 = map(float, address.latlng) elif self.latlng and type(address) is tuple: lat1, lon1 = map(float, self.latlng) lat2, lon2 = address else: raise ValueError(":address must be type tuple or Address") radius = 6371 # km dlat = math.radians(lat2 - lat1) dlon = math.radians(lon2 - lon1) a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \ * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) d = radius * c # d is in kilometers if measure == self.KILOMETERS: return d elif measure == self.METERS: return d / 1000 elif measure == self.MILES: return d * .621371 else: return d
python
def distance_to(self, address, measure="Miles", httpclient=None): """Distance to another address """ if isinstance(address, Address) and self.latlng and address.latlng: lat1, lon1 = map(float, self.latlng) lat2, lon2 = map(float, address.latlng) elif self.latlng and type(address) is tuple: lat1, lon1 = map(float, self.latlng) lat2, lon2 = address else: raise ValueError(":address must be type tuple or Address") radius = 6371 # km dlat = math.radians(lat2 - lat1) dlon = math.radians(lon2 - lon1) a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \ * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) d = radius * c # d is in kilometers if measure == self.KILOMETERS: return d elif measure == self.METERS: return d / 1000 elif measure == self.MILES: return d * .621371 else: return d
[ "def", "distance_to", "(", "self", ",", "address", ",", "measure", "=", "\"Miles\"", ",", "httpclient", "=", "None", ")", ":", "if", "isinstance", "(", "address", ",", "Address", ")", "and", "self", ".", "latlng", "and", "address", ".", "latlng", ":", "lat1", ",", "lon1", "=", "map", "(", "float", ",", "self", ".", "latlng", ")", "lat2", ",", "lon2", "=", "map", "(", "float", ",", "address", ".", "latlng", ")", "elif", "self", ".", "latlng", "and", "type", "(", "address", ")", "is", "tuple", ":", "lat1", ",", "lon1", "=", "map", "(", "float", ",", "self", ".", "latlng", ")", "lat2", ",", "lon2", "=", "address", "else", ":", "raise", "ValueError", "(", "\":address must be type tuple or Address\"", ")", "radius", "=", "6371", "# km ", "dlat", "=", "math", ".", "radians", "(", "lat2", "-", "lat1", ")", "dlon", "=", "math", ".", "radians", "(", "lon2", "-", "lon1", ")", "a", "=", "math", ".", "sin", "(", "dlat", "/", "2", ")", "*", "math", ".", "sin", "(", "dlat", "/", "2", ")", "+", "math", ".", "cos", "(", "math", ".", "radians", "(", "lat1", ")", ")", "*", "math", ".", "cos", "(", "math", ".", "radians", "(", "lat2", ")", ")", "*", "math", ".", "sin", "(", "dlon", "/", "2", ")", "*", "math", ".", "sin", "(", "dlon", "/", "2", ")", "c", "=", "2", "*", "math", ".", "atan2", "(", "math", ".", "sqrt", "(", "a", ")", ",", "math", ".", "sqrt", "(", "1", "-", "a", ")", ")", "d", "=", "radius", "*", "c", "# d is in kilometers", "if", "measure", "==", "self", ".", "KILOMETERS", ":", "return", "d", "elif", "measure", "==", "self", ".", "METERS", ":", "return", "d", "/", "1000", "elif", "measure", "==", "self", ".", "MILES", ":", "return", "d", "*", ".621371", "else", ":", "return", "d" ]
Distance to another address
[ "Distance", "to", "another", "address" ]
cc18d5d34b36225035d618d666275c913f5f66de
https://github.com/stevepeak/stuffed/blob/cc18d5d34b36225035d618d666275c913f5f66de/stuffed/address.py#L93-L120
241,372
Fuyukai/ConfigMaster
configmaster/JSONConfigFile.py
json_dump_hook
def json_dump_hook(cfg, text: bool=False): """ Dumps all the data into a JSON file. """ data = cfg.config.dump() if not text: json.dump(data, cfg.fd) else: return json.dumps(data)
python
def json_dump_hook(cfg, text: bool=False): """ Dumps all the data into a JSON file. """ data = cfg.config.dump() if not text: json.dump(data, cfg.fd) else: return json.dumps(data)
[ "def", "json_dump_hook", "(", "cfg", ",", "text", ":", "bool", "=", "False", ")", ":", "data", "=", "cfg", ".", "config", ".", "dump", "(", ")", "if", "not", "text", ":", "json", ".", "dump", "(", "data", ",", "cfg", ".", "fd", ")", "else", ":", "return", "json", ".", "dumps", "(", "data", ")" ]
Dumps all the data into a JSON file.
[ "Dumps", "all", "the", "data", "into", "a", "JSON", "file", "." ]
8018aa415da55c84edaa8a49664f674758a14edd
https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/JSONConfigFile.py#L49-L58
241,373
skitazaki/python-clitool
clitool/config.py
ConfigLoader.load
def load(self, env=None): """ Load a section values of given environment. If nothing to specified, use environmental variable. If unknown environment was specified, warn it on logger. :param env: environment key to load in a coercive manner :type env: string :rtype: dict """ self._load() e = env or \ os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE) if e in self.config: return self.config[e] logging.warn("Environment '%s' was not found.", e)
python
def load(self, env=None): """ Load a section values of given environment. If nothing to specified, use environmental variable. If unknown environment was specified, warn it on logger. :param env: environment key to load in a coercive manner :type env: string :rtype: dict """ self._load() e = env or \ os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE) if e in self.config: return self.config[e] logging.warn("Environment '%s' was not found.", e)
[ "def", "load", "(", "self", ",", "env", "=", "None", ")", ":", "self", ".", "_load", "(", ")", "e", "=", "env", "or", "os", ".", "environ", ".", "get", "(", "RUNNING_MODE_ENVKEY", ",", "DEFAULT_RUNNING_MODE", ")", "if", "e", "in", "self", ".", "config", ":", "return", "self", ".", "config", "[", "e", "]", "logging", ".", "warn", "(", "\"Environment '%s' was not found.\"", ",", "e", ")" ]
Load a section values of given environment. If nothing to specified, use environmental variable. If unknown environment was specified, warn it on logger. :param env: environment key to load in a coercive manner :type env: string :rtype: dict
[ "Load", "a", "section", "values", "of", "given", "environment", ".", "If", "nothing", "to", "specified", "use", "environmental", "variable", ".", "If", "unknown", "environment", "was", "specified", "warn", "it", "on", "logger", "." ]
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/config.py#L78-L92
241,374
dusty-phillips/opterator
examples/ls.py
main
def main(show_details:['-l']=False, cols:['-w', '--width']='', *files): ''' List information about a particular file or set of files :param show_details: Whether to show detailed info about files :param cols: specify screen width ''' print(files) print(show_details) print(cols)
python
def main(show_details:['-l']=False, cols:['-w', '--width']='', *files): ''' List information about a particular file or set of files :param show_details: Whether to show detailed info about files :param cols: specify screen width ''' print(files) print(show_details) print(cols)
[ "def", "main", "(", "show_details", ":", "[", "'-l'", "]", "=", "False", ",", "cols", ":", "[", "'-w'", ",", "'--width'", "]", "=", "''", ",", "*", "files", ")", ":", "print", "(", "files", ")", "print", "(", "show_details", ")", "print", "(", "cols", ")" ]
List information about a particular file or set of files :param show_details: Whether to show detailed info about files :param cols: specify screen width
[ "List", "information", "about", "a", "particular", "file", "or", "set", "of", "files" ]
84fe31f22c73dc0a3666ed82c179461b1799c257
https://github.com/dusty-phillips/opterator/blob/84fe31f22c73dc0a3666ed82c179461b1799c257/examples/ls.py#L7-L16
241,375
jpablo128/simplystatic
simplystatic/util.py
random_date
def random_date(): '''Return a valid random date.''' d = datetime.datetime.now().date() d = d - datetime.timedelta(random.randint(20,2001)) return d
python
def random_date(): '''Return a valid random date.''' d = datetime.datetime.now().date() d = d - datetime.timedelta(random.randint(20,2001)) return d
[ "def", "random_date", "(", ")", ":", "d", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "date", "(", ")", "d", "=", "d", "-", "datetime", ".", "timedelta", "(", "random", ".", "randint", "(", "20", ",", "2001", ")", ")", "return", "d" ]
Return a valid random date.
[ "Return", "a", "valid", "random", "date", "." ]
91ac579c8f34fa240bef9b87adb0116c6b40b24d
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/util.py#L65-L69
241,376
jpablo128/simplystatic
simplystatic/util.py
random_md_page
def random_md_page(): '''Generate random markdown page content.. If the parameters are zero, instead of a fixed number of elements it uses a random number. ''' # headers #, ## # blockquote > # lists * # codeblock (indent 4 spaces) # hrule, 3 or more - in a line # emphasis: word surrounded by one * or _ lines = [] lines.append("\n# " + random_title(False) + "\n") # add title lines.append("\n" + random_text(1) + "\n") #and 1 paragraphs for h in range(1,random.randint(2,5)): lines.append("\n## " + random_title(False) + "\n") # add header lines.append("\n" + random_paragraphs(random.randint(1,5)) + "\n") #and some paragraphs for sh in range(1,random.randint(1,4)): lines.append("\n### " + random_title(False) +"\n") # add subheader lines.append("\n" + random_paragraphs(random.randint(4,13)) + "\n") #and some paragraphs txt = "\n".join(lines) return txt
python
def random_md_page(): '''Generate random markdown page content.. If the parameters are zero, instead of a fixed number of elements it uses a random number. ''' # headers #, ## # blockquote > # lists * # codeblock (indent 4 spaces) # hrule, 3 or more - in a line # emphasis: word surrounded by one * or _ lines = [] lines.append("\n# " + random_title(False) + "\n") # add title lines.append("\n" + random_text(1) + "\n") #and 1 paragraphs for h in range(1,random.randint(2,5)): lines.append("\n## " + random_title(False) + "\n") # add header lines.append("\n" + random_paragraphs(random.randint(1,5)) + "\n") #and some paragraphs for sh in range(1,random.randint(1,4)): lines.append("\n### " + random_title(False) +"\n") # add subheader lines.append("\n" + random_paragraphs(random.randint(4,13)) + "\n") #and some paragraphs txt = "\n".join(lines) return txt
[ "def", "random_md_page", "(", ")", ":", "# headers #, ##", "# blockquote >", "# lists *", "# codeblock (indent 4 spaces)", "# hrule, 3 or more - in a line", "# emphasis: word surrounded by one * or _", "lines", "=", "[", "]", "lines", ".", "append", "(", "\"\\n# \"", "+", "random_title", "(", "False", ")", "+", "\"\\n\"", ")", "# add title", "lines", ".", "append", "(", "\"\\n\"", "+", "random_text", "(", "1", ")", "+", "\"\\n\"", ")", "#and 1 paragraphs", "for", "h", "in", "range", "(", "1", ",", "random", ".", "randint", "(", "2", ",", "5", ")", ")", ":", "lines", ".", "append", "(", "\"\\n## \"", "+", "random_title", "(", "False", ")", "+", "\"\\n\"", ")", "# add header", "lines", ".", "append", "(", "\"\\n\"", "+", "random_paragraphs", "(", "random", ".", "randint", "(", "1", ",", "5", ")", ")", "+", "\"\\n\"", ")", "#and some paragraphs", "for", "sh", "in", "range", "(", "1", ",", "random", ".", "randint", "(", "1", ",", "4", ")", ")", ":", "lines", ".", "append", "(", "\"\\n### \"", "+", "random_title", "(", "False", ")", "+", "\"\\n\"", ")", "# add subheader", "lines", ".", "append", "(", "\"\\n\"", "+", "random_paragraphs", "(", "random", ".", "randint", "(", "4", ",", "13", ")", ")", "+", "\"\\n\"", ")", "#and some paragraphs", "txt", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "return", "txt" ]
Generate random markdown page content.. If the parameters are zero, instead of a fixed number of elements it uses a random number.
[ "Generate", "random", "markdown", "page", "content", ".." ]
91ac579c8f34fa240bef9b87adb0116c6b40b24d
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/util.py#L83-L106
241,377
thomasbiddle/Kippt-for-Python
kippt/lists.py
Lists.all
def all(self, **args): """ Return all Lists. """ limit = args['limit'] if 'limit' in args else 20 offset = args['offset'] if 'offset' in args else 0 r = requests.get( "https://kippt.com/api/lists?limit=%s&offset=%s" % (limit, offset), headers=self.kippt.header ) return (r.json())
python
def all(self, **args): """ Return all Lists. """ limit = args['limit'] if 'limit' in args else 20 offset = args['offset'] if 'offset' in args else 0 r = requests.get( "https://kippt.com/api/lists?limit=%s&offset=%s" % (limit, offset), headers=self.kippt.header ) return (r.json())
[ "def", "all", "(", "self", ",", "*", "*", "args", ")", ":", "limit", "=", "args", "[", "'limit'", "]", "if", "'limit'", "in", "args", "else", "20", "offset", "=", "args", "[", "'offset'", "]", "if", "'offset'", "in", "args", "else", "0", "r", "=", "requests", ".", "get", "(", "\"https://kippt.com/api/lists?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", ",", "headers", "=", "self", ".", "kippt", ".", "header", ")", "return", "(", "r", ".", "json", "(", ")", ")" ]
Return all Lists.
[ "Return", "all", "Lists", "." ]
dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267
https://github.com/thomasbiddle/Kippt-for-Python/blob/dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267/kippt/lists.py#L19-L30
241,378
thomasbiddle/Kippt-for-Python
kippt/lists.py
Lists.create
def create(self, title, **args): """ Create a new Kippt List. Parameters: - title (Required) - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/list.md """ # Merge our title as a parameter and JSONify it. data = json.dumps(dict({'title': title}, **args)) r = requests.post( "https://kippt.com/api/lists", headers=self.kippt.header, data=data ) return (r.json())
python
def create(self, title, **args): """ Create a new Kippt List. Parameters: - title (Required) - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/list.md """ # Merge our title as a parameter and JSONify it. data = json.dumps(dict({'title': title}, **args)) r = requests.post( "https://kippt.com/api/lists", headers=self.kippt.header, data=data ) return (r.json())
[ "def", "create", "(", "self", ",", "title", ",", "*", "*", "args", ")", ":", "# Merge our title as a parameter and JSONify it.", "data", "=", "json", ".", "dumps", "(", "dict", "(", "{", "'title'", ":", "title", "}", ",", "*", "*", "args", ")", ")", "r", "=", "requests", ".", "post", "(", "\"https://kippt.com/api/lists\"", ",", "headers", "=", "self", ".", "kippt", ".", "header", ",", "data", "=", "data", ")", "return", "(", "r", ".", "json", "(", ")", ")" ]
Create a new Kippt List. Parameters: - title (Required) - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/list.md
[ "Create", "a", "new", "Kippt", "List", "." ]
dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267
https://github.com/thomasbiddle/Kippt-for-Python/blob/dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267/kippt/lists.py#L48-L65
241,379
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
cli
def cli(config_path, verbose): """Record-Recommender command line version.""" global config, store if not config_path: config_path = '/etc/record_recommender.yml' config = get_config(config_path) setup_logging(config) store = FileStore(config)
python
def cli(config_path, verbose): """Record-Recommender command line version.""" global config, store if not config_path: config_path = '/etc/record_recommender.yml' config = get_config(config_path) setup_logging(config) store = FileStore(config)
[ "def", "cli", "(", "config_path", ",", "verbose", ")", ":", "global", "config", ",", "store", "if", "not", "config_path", ":", "config_path", "=", "'/etc/record_recommender.yml'", "config", "=", "get_config", "(", "config_path", ")", "setup_logging", "(", "config", ")", "store", "=", "FileStore", "(", "config", ")" ]
Record-Recommender command line version.
[ "Record", "-", "Recommender", "command", "line", "version", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L49-L56
241,380
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
fetch
def fetch(weeks, force): """Fetch newest PageViews and Downloads.""" weeks = get_last_weeks(weeks) print(weeks) recommender = RecordRecommender(config) recommender.fetch_weeks(weeks, overwrite=force)
python
def fetch(weeks, force): """Fetch newest PageViews and Downloads.""" weeks = get_last_weeks(weeks) print(weeks) recommender = RecordRecommender(config) recommender.fetch_weeks(weeks, overwrite=force)
[ "def", "fetch", "(", "weeks", ",", "force", ")", ":", "weeks", "=", "get_last_weeks", "(", "weeks", ")", "print", "(", "weeks", ")", "recommender", "=", "RecordRecommender", "(", "config", ")", "recommender", ".", "fetch_weeks", "(", "weeks", ",", "overwrite", "=", "force", ")" ]
Fetch newest PageViews and Downloads.
[ "Fetch", "newest", "PageViews", "and", "Downloads", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L75-L80
241,381
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
update_recommender
def update_recommender(ctx, weeks, processes): """ Download and build the recommendations. - Fetch new statistics from the current week. - Generate recommendations. - Update the recommendations. """ weeks = get_last_weeks(weeks) recommender = RecordRecommender(config) # Redownload incomplete weeks first_weeks = weeks[:2] recommender.fetch_weeks(first_weeks, overwrite=True) # Download missing weeks recommender.fetch_weeks(weeks, overwrite=False) print("Build Profiles") ctx.invoke(profiles, weeks=weeks) print("Generate Recommendations") ctx.invoke(build, processes=processes)
python
def update_recommender(ctx, weeks, processes): """ Download and build the recommendations. - Fetch new statistics from the current week. - Generate recommendations. - Update the recommendations. """ weeks = get_last_weeks(weeks) recommender = RecordRecommender(config) # Redownload incomplete weeks first_weeks = weeks[:2] recommender.fetch_weeks(first_weeks, overwrite=True) # Download missing weeks recommender.fetch_weeks(weeks, overwrite=False) print("Build Profiles") ctx.invoke(profiles, weeks=weeks) print("Generate Recommendations") ctx.invoke(build, processes=processes)
[ "def", "update_recommender", "(", "ctx", ",", "weeks", ",", "processes", ")", ":", "weeks", "=", "get_last_weeks", "(", "weeks", ")", "recommender", "=", "RecordRecommender", "(", "config", ")", "# Redownload incomplete weeks", "first_weeks", "=", "weeks", "[", ":", "2", "]", "recommender", ".", "fetch_weeks", "(", "first_weeks", ",", "overwrite", "=", "True", ")", "# Download missing weeks", "recommender", ".", "fetch_weeks", "(", "weeks", ",", "overwrite", "=", "False", ")", "print", "(", "\"Build Profiles\"", ")", "ctx", ".", "invoke", "(", "profiles", ",", "weeks", "=", "weeks", ")", "print", "(", "\"Generate Recommendations\"", ")", "ctx", ".", "invoke", "(", "build", ",", "processes", "=", "processes", ")" ]
Download and build the recommendations. - Fetch new statistics from the current week. - Generate recommendations. - Update the recommendations.
[ "Download", "and", "build", "the", "recommendations", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L87-L107
241,382
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
profiles
def profiles(weeks): """ Number of weeks to build. Starting with the current week. """ profiles = Profiles(store) weeks = get_last_weeks(weeks) if isinstance(weeks, int) else weeks print(weeks) profiles.create(weeks)
python
def profiles(weeks): """ Number of weeks to build. Starting with the current week. """ profiles = Profiles(store) weeks = get_last_weeks(weeks) if isinstance(weeks, int) else weeks print(weeks) profiles.create(weeks)
[ "def", "profiles", "(", "weeks", ")", ":", "profiles", "=", "Profiles", "(", "store", ")", "weeks", "=", "get_last_weeks", "(", "weeks", ")", "if", "isinstance", "(", "weeks", ",", "int", ")", "else", "weeks", "print", "(", "weeks", ")", "profiles", ".", "create", "(", "weeks", ")" ]
Number of weeks to build. Starting with the current week.
[ "Number", "of", "weeks", "to", "build", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L112-L121
241,383
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
build
def build(processes): """ Calculate all recommendations using the number of specified processes. The recommendations are calculated from the generated Profiles file. """ recommender = RecordRecommender(config) recommender.create_all_recommendations(processes, ip_views=True)
python
def build(processes): """ Calculate all recommendations using the number of specified processes. The recommendations are calculated from the generated Profiles file. """ recommender = RecordRecommender(config) recommender.create_all_recommendations(processes, ip_views=True)
[ "def", "build", "(", "processes", ")", ":", "recommender", "=", "RecordRecommender", "(", "config", ")", "recommender", ".", "create_all_recommendations", "(", "processes", ",", "ip_views", "=", "True", ")" ]
Calculate all recommendations using the number of specified processes. The recommendations are calculated from the generated Profiles file.
[ "Calculate", "all", "recommendations", "using", "the", "number", "of", "specified", "processes", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L126-L133
241,384
klmitch/appathy
appathy/response.py
ResponseObject._bind
def _bind(self, _descriptor): """ Bind a ResponseObject to a given action descriptor. This updates the default HTTP response code and selects the appropriate content type and serializer for the response. """ # If the method has a default code, use it self._defcode = getattr(_descriptor.method, '_wsgi_code', 200) # Set up content type and serializer self.content_type, self.serializer = _descriptor.serializer(self.req)
python
def _bind(self, _descriptor): """ Bind a ResponseObject to a given action descriptor. This updates the default HTTP response code and selects the appropriate content type and serializer for the response. """ # If the method has a default code, use it self._defcode = getattr(_descriptor.method, '_wsgi_code', 200) # Set up content type and serializer self.content_type, self.serializer = _descriptor.serializer(self.req)
[ "def", "_bind", "(", "self", ",", "_descriptor", ")", ":", "# If the method has a default code, use it", "self", ".", "_defcode", "=", "getattr", "(", "_descriptor", ".", "method", ",", "'_wsgi_code'", ",", "200", ")", "# Set up content type and serializer", "self", ".", "content_type", ",", "self", ".", "serializer", "=", "_descriptor", ".", "serializer", "(", "self", ".", "req", ")" ]
Bind a ResponseObject to a given action descriptor. This updates the default HTTP response code and selects the appropriate content type and serializer for the response.
[ "Bind", "a", "ResponseObject", "to", "a", "given", "action", "descriptor", ".", "This", "updates", "the", "default", "HTTP", "response", "code", "and", "selects", "the", "appropriate", "content", "type", "and", "serializer", "for", "the", "response", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L122-L133
241,385
klmitch/appathy
appathy/response.py
ResponseObject._serialize
def _serialize(self): """ Serialize the ResponseObject. Returns a webob `Response` object. """ # Do something appropriate if the response object is unbound if self._defcode is None: raise exceptions.UnboundResponse() # Build the response resp = self.response_class(request=self.req, status=self.code, headerlist=self._headers.items()) # Do we have a body? if self.result: resp.content_type = self.content_type resp.body = self.serializer(self.result) # Return the response return resp
python
def _serialize(self): """ Serialize the ResponseObject. Returns a webob `Response` object. """ # Do something appropriate if the response object is unbound if self._defcode is None: raise exceptions.UnboundResponse() # Build the response resp = self.response_class(request=self.req, status=self.code, headerlist=self._headers.items()) # Do we have a body? if self.result: resp.content_type = self.content_type resp.body = self.serializer(self.result) # Return the response return resp
[ "def", "_serialize", "(", "self", ")", ":", "# Do something appropriate if the response object is unbound", "if", "self", ".", "_defcode", "is", "None", ":", "raise", "exceptions", ".", "UnboundResponse", "(", ")", "# Build the response", "resp", "=", "self", ".", "response_class", "(", "request", "=", "self", ".", "req", ",", "status", "=", "self", ".", "code", ",", "headerlist", "=", "self", ".", "_headers", ".", "items", "(", ")", ")", "# Do we have a body?", "if", "self", ".", "result", ":", "resp", ".", "content_type", "=", "self", ".", "content_type", "resp", ".", "body", "=", "self", ".", "serializer", "(", "self", ".", "result", ")", "# Return the response", "return", "resp" ]
Serialize the ResponseObject. Returns a webob `Response` object.
[ "Serialize", "the", "ResponseObject", ".", "Returns", "a", "webob", "Response", "object", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L135-L155
241,386
klmitch/appathy
appathy/response.py
ResponseObject.code
def code(self): """ The HTTP response code associated with this ResponseObject. If instantiated directly without overriding the code, returns 200 even if the default for the method is some other value. Can be set or deleted; in the latter case, the default will be restored. """ if self._code is not None: return self._code elif self._defcode is not None: return self._defcode return 200
python
def code(self): """ The HTTP response code associated with this ResponseObject. If instantiated directly without overriding the code, returns 200 even if the default for the method is some other value. Can be set or deleted; in the latter case, the default will be restored. """ if self._code is not None: return self._code elif self._defcode is not None: return self._defcode return 200
[ "def", "code", "(", "self", ")", ":", "if", "self", ".", "_code", "is", "not", "None", ":", "return", "self", ".", "_code", "elif", "self", ".", "_defcode", "is", "not", "None", ":", "return", "self", ".", "_defcode", "return", "200" ]
The HTTP response code associated with this ResponseObject. If instantiated directly without overriding the code, returns 200 even if the default for the method is some other value. Can be set or deleted; in the latter case, the default will be restored.
[ "The", "HTTP", "response", "code", "associated", "with", "this", "ResponseObject", ".", "If", "instantiated", "directly", "without", "overriding", "the", "code", "returns", "200", "even", "if", "the", "default", "for", "the", "method", "is", "some", "other", "value", ".", "Can", "be", "set", "or", "deleted", ";", "in", "the", "latter", "case", "the", "default", "will", "be", "restored", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L158-L171
241,387
deviantony/valigator
valigator/notify.py
report_message
def report_message(report): """Report message.""" body = 'Error: return code != 0\n\n' body += 'Archive: {}\n\n'.format(report['archive']) body += 'Docker image: {}\n\n'.format(report['image']) body += 'Docker container: {}\n\n'.format(report['container_id']) return body
python
def report_message(report): """Report message.""" body = 'Error: return code != 0\n\n' body += 'Archive: {}\n\n'.format(report['archive']) body += 'Docker image: {}\n\n'.format(report['image']) body += 'Docker container: {}\n\n'.format(report['container_id']) return body
[ "def", "report_message", "(", "report", ")", ":", "body", "=", "'Error: return code != 0\\n\\n'", "body", "+=", "'Archive: {}\\n\\n'", ".", "format", "(", "report", "[", "'archive'", "]", ")", "body", "+=", "'Docker image: {}\\n\\n'", ".", "format", "(", "report", "[", "'image'", "]", ")", "body", "+=", "'Docker container: {}\\n\\n'", ".", "format", "(", "report", "[", "'container_id'", "]", ")", "return", "body" ]
Report message.
[ "Report", "message", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/notify.py#L53-L59
241,388
deviantony/valigator
valigator/notify.py
task_failure_message
def task_failure_message(task_report): """Task failure message.""" trace_list = traceback.format_tb(task_report['traceback']) body = 'Error: task failure\n\n' body += 'Task ID: {}\n\n'.format(task_report['task_id']) body += 'Archive: {}\n\n'.format(task_report['archive']) body += 'Docker image: {}\n\n'.format(task_report['image']) body += 'Exception: {}\n\n'.format(task_report['exception']) body += 'Traceback:\n {} {}'.format( string.join(trace_list[:-1], ''), trace_list[-1]) return body
python
def task_failure_message(task_report): """Task failure message.""" trace_list = traceback.format_tb(task_report['traceback']) body = 'Error: task failure\n\n' body += 'Task ID: {}\n\n'.format(task_report['task_id']) body += 'Archive: {}\n\n'.format(task_report['archive']) body += 'Docker image: {}\n\n'.format(task_report['image']) body += 'Exception: {}\n\n'.format(task_report['exception']) body += 'Traceback:\n {} {}'.format( string.join(trace_list[:-1], ''), trace_list[-1]) return body
[ "def", "task_failure_message", "(", "task_report", ")", ":", "trace_list", "=", "traceback", ".", "format_tb", "(", "task_report", "[", "'traceback'", "]", ")", "body", "=", "'Error: task failure\\n\\n'", "body", "+=", "'Task ID: {}\\n\\n'", ".", "format", "(", "task_report", "[", "'task_id'", "]", ")", "body", "+=", "'Archive: {}\\n\\n'", ".", "format", "(", "task_report", "[", "'archive'", "]", ")", "body", "+=", "'Docker image: {}\\n\\n'", ".", "format", "(", "task_report", "[", "'image'", "]", ")", "body", "+=", "'Exception: {}\\n\\n'", ".", "format", "(", "task_report", "[", "'exception'", "]", ")", "body", "+=", "'Traceback:\\n {} {}'", ".", "format", "(", "string", ".", "join", "(", "trace_list", "[", ":", "-", "1", "]", ",", "''", ")", ",", "trace_list", "[", "-", "1", "]", ")", "return", "body" ]
Task failure message.
[ "Task", "failure", "message", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/notify.py#L62-L72
241,389
deviantony/valigator
valigator/notify.py
MailNotifier.send_email
def send_email(self, message): """Initiate a SMTP session and send an email.""" msg = MIMEMultipart() msg['From'] = self.from_address msg['To'] = self.to_address msg['Subject'] = self.title msg.attach(MIMEText('<pre>' + cgi.escape(message) + '</pre>', 'html')) smtp = smtplib.SMTP(self.server, self.port, timeout=self.timeout) if self.tls_auth: smtp.starttls() smtp.login(self.user, self.password) smtp.sendmail(self.from_address, self.to_address, msg.as_string()) smtp.quit()
python
def send_email(self, message): """Initiate a SMTP session and send an email.""" msg = MIMEMultipart() msg['From'] = self.from_address msg['To'] = self.to_address msg['Subject'] = self.title msg.attach(MIMEText('<pre>' + cgi.escape(message) + '</pre>', 'html')) smtp = smtplib.SMTP(self.server, self.port, timeout=self.timeout) if self.tls_auth: smtp.starttls() smtp.login(self.user, self.password) smtp.sendmail(self.from_address, self.to_address, msg.as_string()) smtp.quit()
[ "def", "send_email", "(", "self", ",", "message", ")", ":", "msg", "=", "MIMEMultipart", "(", ")", "msg", "[", "'From'", "]", "=", "self", ".", "from_address", "msg", "[", "'To'", "]", "=", "self", ".", "to_address", "msg", "[", "'Subject'", "]", "=", "self", ".", "title", "msg", ".", "attach", "(", "MIMEText", "(", "'<pre>'", "+", "cgi", ".", "escape", "(", "message", ")", "+", "'</pre>'", ",", "'html'", ")", ")", "smtp", "=", "smtplib", ".", "SMTP", "(", "self", ".", "server", ",", "self", ".", "port", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "self", ".", "tls_auth", ":", "smtp", ".", "starttls", "(", ")", "smtp", ".", "login", "(", "self", ".", "user", ",", "self", ".", "password", ")", "smtp", ".", "sendmail", "(", "self", ".", "from_address", ",", "self", ".", "to_address", ",", "msg", ".", "as_string", "(", ")", ")", "smtp", ".", "quit", "(", ")" ]
Initiate a SMTP session and send an email.
[ "Initiate", "a", "SMTP", "session", "and", "send", "an", "email", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/notify.py#L27-L40
241,390
honzamach/pynspect
pynspect/jpath.py
jpath_parse
def jpath_parse(jpath): """ Parse given JPath into chunks. Returns list of dictionaries describing all of the JPath chunks. :param str jpath: JPath to be parsed into chunks :return: JPath chunks as list of dicts :rtype: :py:class:`list` :raises JPathException: in case of invalid JPath syntax """ result = [] breadcrumbs = [] # Split JPath into chunks based on '.' character. chunks = jpath.split('.') for chnk in chunks: match = RE_JPATH_CHUNK.match(chnk) if match: res = {} # Record whole match. res['m'] = chnk # Record breadcrumb path. breadcrumbs.append(chnk) res['p'] = '.'.join(breadcrumbs) # Handle node name. res['n'] = match.group(1) # Handle node index (optional, may be omitted). if match.group(2): res['i'] = match.group(3) if str(res['i']) == '#': res['i'] = -1 elif str(res['i']) == '*': pass else: res['i'] = int(res['i']) - 1 result.append(res) else: raise JPathException("Invalid JPath chunk '{}'".format(chnk)) return result
python
def jpath_parse(jpath): """ Parse given JPath into chunks. Returns list of dictionaries describing all of the JPath chunks. :param str jpath: JPath to be parsed into chunks :return: JPath chunks as list of dicts :rtype: :py:class:`list` :raises JPathException: in case of invalid JPath syntax """ result = [] breadcrumbs = [] # Split JPath into chunks based on '.' character. chunks = jpath.split('.') for chnk in chunks: match = RE_JPATH_CHUNK.match(chnk) if match: res = {} # Record whole match. res['m'] = chnk # Record breadcrumb path. breadcrumbs.append(chnk) res['p'] = '.'.join(breadcrumbs) # Handle node name. res['n'] = match.group(1) # Handle node index (optional, may be omitted). if match.group(2): res['i'] = match.group(3) if str(res['i']) == '#': res['i'] = -1 elif str(res['i']) == '*': pass else: res['i'] = int(res['i']) - 1 result.append(res) else: raise JPathException("Invalid JPath chunk '{}'".format(chnk)) return result
[ "def", "jpath_parse", "(", "jpath", ")", ":", "result", "=", "[", "]", "breadcrumbs", "=", "[", "]", "# Split JPath into chunks based on '.' character.", "chunks", "=", "jpath", ".", "split", "(", "'.'", ")", "for", "chnk", "in", "chunks", ":", "match", "=", "RE_JPATH_CHUNK", ".", "match", "(", "chnk", ")", "if", "match", ":", "res", "=", "{", "}", "# Record whole match.", "res", "[", "'m'", "]", "=", "chnk", "# Record breadcrumb path.", "breadcrumbs", ".", "append", "(", "chnk", ")", "res", "[", "'p'", "]", "=", "'.'", ".", "join", "(", "breadcrumbs", ")", "# Handle node name.", "res", "[", "'n'", "]", "=", "match", ".", "group", "(", "1", ")", "# Handle node index (optional, may be omitted).", "if", "match", ".", "group", "(", "2", ")", ":", "res", "[", "'i'", "]", "=", "match", ".", "group", "(", "3", ")", "if", "str", "(", "res", "[", "'i'", "]", ")", "==", "'#'", ":", "res", "[", "'i'", "]", "=", "-", "1", "elif", "str", "(", "res", "[", "'i'", "]", ")", "==", "'*'", ":", "pass", "else", ":", "res", "[", "'i'", "]", "=", "int", "(", "res", "[", "'i'", "]", ")", "-", "1", "result", ".", "append", "(", "res", ")", "else", ":", "raise", "JPathException", "(", "\"Invalid JPath chunk '{}'\"", ".", "format", "(", "chnk", ")", ")", "return", "result" ]
Parse given JPath into chunks. Returns list of dictionaries describing all of the JPath chunks. :param str jpath: JPath to be parsed into chunks :return: JPath chunks as list of dicts :rtype: :py:class:`list` :raises JPathException: in case of invalid JPath syntax
[ "Parse", "given", "JPath", "into", "chunks", "." ]
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/jpath.py#L191-L235
241,391
honzamach/pynspect
pynspect/jpath.py
jpath_values
def jpath_values(structure, jpath): """ Return all values at given JPath within given data structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: found values as a list :rtype: :py:class:`list` """ # Current working node set. nodes_a = [structure] # Next iteration working node set. nodes_b = [] # Process sequentially all JPath chunks. chunks = jpath_parse_c(jpath) for chnk in chunks: # Process all currently active nodes. for node in nodes_a: key = chnk['n'] if not isinstance(node, dict) and not isinstance(node, collections.Mapping): continue # Process indexed nodes. if 'i' in chnk: idx = chnk['i'] # Skip the node, if the key does not exist, the value is not # a list-like object or the list is empty. if not key in node or not (isinstance(node[key], (list, collections.MutableSequence))) or not node[key]: continue try: # Handle '*' special index - append all nodes. if str(idx) == '*': nodes_b.extend(node[key]) # Append only node at particular index. else: nodes_b.append(node[key][idx]) except: pass # Process unindexed nodes. else: # Skip the node, if the key does not exist. if not key in node: continue # Handle list values - expand them. if isinstance(node[key], (list, collections.MutableSequence)): for i in node[key]: nodes_b.append(i) # Handle scalar values. else: nodes_b.append(node[key]) nodes_a = nodes_b nodes_b = [] return nodes_a
python
def jpath_values(structure, jpath): """ Return all values at given JPath within given data structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: found values as a list :rtype: :py:class:`list` """ # Current working node set. nodes_a = [structure] # Next iteration working node set. nodes_b = [] # Process sequentially all JPath chunks. chunks = jpath_parse_c(jpath) for chnk in chunks: # Process all currently active nodes. for node in nodes_a: key = chnk['n'] if not isinstance(node, dict) and not isinstance(node, collections.Mapping): continue # Process indexed nodes. if 'i' in chnk: idx = chnk['i'] # Skip the node, if the key does not exist, the value is not # a list-like object or the list is empty. if not key in node or not (isinstance(node[key], (list, collections.MutableSequence))) or not node[key]: continue try: # Handle '*' special index - append all nodes. if str(idx) == '*': nodes_b.extend(node[key]) # Append only node at particular index. else: nodes_b.append(node[key][idx]) except: pass # Process unindexed nodes. else: # Skip the node, if the key does not exist. if not key in node: continue # Handle list values - expand them. if isinstance(node[key], (list, collections.MutableSequence)): for i in node[key]: nodes_b.append(i) # Handle scalar values. else: nodes_b.append(node[key]) nodes_a = nodes_b nodes_b = [] return nodes_a
[ "def", "jpath_values", "(", "structure", ",", "jpath", ")", ":", "# Current working node set.", "nodes_a", "=", "[", "structure", "]", "# Next iteration working node set.", "nodes_b", "=", "[", "]", "# Process sequentially all JPath chunks.", "chunks", "=", "jpath_parse_c", "(", "jpath", ")", "for", "chnk", "in", "chunks", ":", "# Process all currently active nodes.", "for", "node", "in", "nodes_a", ":", "key", "=", "chnk", "[", "'n'", "]", "if", "not", "isinstance", "(", "node", ",", "dict", ")", "and", "not", "isinstance", "(", "node", ",", "collections", ".", "Mapping", ")", ":", "continue", "# Process indexed nodes.", "if", "'i'", "in", "chnk", ":", "idx", "=", "chnk", "[", "'i'", "]", "# Skip the node, if the key does not exist, the value is not", "# a list-like object or the list is empty.", "if", "not", "key", "in", "node", "or", "not", "(", "isinstance", "(", "node", "[", "key", "]", ",", "(", "list", ",", "collections", ".", "MutableSequence", ")", ")", ")", "or", "not", "node", "[", "key", "]", ":", "continue", "try", ":", "# Handle '*' special index - append all nodes.", "if", "str", "(", "idx", ")", "==", "'*'", ":", "nodes_b", ".", "extend", "(", "node", "[", "key", "]", ")", "# Append only node at particular index.", "else", ":", "nodes_b", ".", "append", "(", "node", "[", "key", "]", "[", "idx", "]", ")", "except", ":", "pass", "# Process unindexed nodes.", "else", ":", "# Skip the node, if the key does not exist.", "if", "not", "key", "in", "node", ":", "continue", "# Handle list values - expand them.", "if", "isinstance", "(", "node", "[", "key", "]", ",", "(", "list", ",", "collections", ".", "MutableSequence", ")", ")", ":", "for", "i", "in", "node", "[", "key", "]", ":", "nodes_b", ".", "append", "(", "i", ")", "# Handle scalar values.", "else", ":", "nodes_b", ".", "append", "(", "node", "[", "key", "]", ")", "nodes_a", "=", "nodes_b", "nodes_b", "=", "[", "]", "return", "nodes_a" ]
Return all values at given JPath within given data structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: found values as a list :rtype: :py:class:`list`
[ "Return", "all", "values", "at", "given", "JPath", "within", "given", "data", "structure", "." ]
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/jpath.py#L252-L313
241,392
honzamach/pynspect
pynspect/jpath.py
jpath_exists
def jpath_exists(structure, jpath): """ Check if node at given JPath within given data structure does exist. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: True or False :rtype: bool """ result = jpath_value(structure, jpath) if not result is None: return True return False
python
def jpath_exists(structure, jpath): """ Check if node at given JPath within given data structure does exist. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: True or False :rtype: bool """ result = jpath_value(structure, jpath) if not result is None: return True return False
[ "def", "jpath_exists", "(", "structure", ",", "jpath", ")", ":", "result", "=", "jpath_value", "(", "structure", ",", "jpath", ")", "if", "not", "result", "is", "None", ":", "return", "True", "return", "False" ]
Check if node at given JPath within given data structure does exist. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :return: True or False :rtype: bool
[ "Check", "if", "node", "at", "given", "JPath", "within", "given", "data", "structure", "does", "exist", "." ]
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/jpath.py#L333-L345
241,393
honzamach/pynspect
pynspect/jpath.py
jpath_set
def jpath_set(structure, jpath, value, overwrite = True, unique = False): """ Set given JPath to given value within given structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :param any value: value of any type to be set at given path :param bool overwrite: enable/disable overwriting of already existing value :param bool unique: ensure uniqueness of value, works only for lists :return: numerical return code, one of the (:py:data:`RC_VALUE_SET`, :py:data:`RC_VALUE_EXISTS`, :py:data:`RC_VALUE_DUPLICATE`) :rtype: int """ chunks = jpath_parse_c(jpath) size = len(chunks) - 1 current = structure # Process chunks in order, enumeration is used for detection of the last JPath chunk. for i, chnk in enumerate(chunks): key = chnk['n'] if not isinstance(current, dict) and not isinstance(current, collections.Mapping): raise JPathException("Expected dict-like structure to attach node '{}'".format(chnk['p'])) # Process indexed nodes. if 'i' in chnk: idx = chnk['i'] # Automatically create nodes for non-existent keys. if not key in current: current[key] = [] if not isinstance(current[key], list) and not isinstance(current[key], collections.MutableSequence): raise JPathException("Expected list-like object under structure key '{}'".format(key)) # Detection of the last JPath chunk - node somewhere in the middle. if i != size: # Attempt to access node at given index. try: current = current[key][idx] # IndexError: list index out of range # Node at given index does not exist, append new one. Using insert() # does not work, item is appended to the end of the list anyway. # TypeError: list indices must be integers or slices, not str # In the case list index was '*', we are appending to the end of # list. except (IndexError, TypeError): current[key].append({}) current = current[key][-1] # Detection of the last JPath chunk - node at the end. else: # Attempt to insert value at given index. try: if overwrite or not current[key][idx]: current[key][idx] = value else: return RC_VALUE_EXISTS # IndexError: list index out of range # Node at given index does not exist, append new one. Using insert() # does not work, item is appended to the end of the list anyway. # TypeError: list indices must be integers or slices, not str # In the case list index was '*', we are appending to the end of # list. except (IndexError, TypeError): # At this point only deal with unique, overwrite does not make # sense, because we would not be here otherwise. if not unique or not value in current[key]: current[key].append(value) else: return RC_VALUE_DUPLICATE # Process unindexed nodes. else: # Detection of the last JPath chunk - node somewhere in the middle. if i != size: # Automatically create nodes for non-existent keys. if not key in current: current[key] = {} if not isinstance(current[key], dict) and not isinstance(current[key], collections.Mapping): raise JPathException("Expected dict-like object under structure key '{}'".format(key)) current = current[key] # Detection of the last JPath chunk - node at the end. else: if overwrite or not key in current: current[key] = value else: return RC_VALUE_EXISTS return RC_VALUE_SET
python
def jpath_set(structure, jpath, value, overwrite = True, unique = False): """ Set given JPath to given value within given structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :param any value: value of any type to be set at given path :param bool overwrite: enable/disable overwriting of already existing value :param bool unique: ensure uniqueness of value, works only for lists :return: numerical return code, one of the (:py:data:`RC_VALUE_SET`, :py:data:`RC_VALUE_EXISTS`, :py:data:`RC_VALUE_DUPLICATE`) :rtype: int """ chunks = jpath_parse_c(jpath) size = len(chunks) - 1 current = structure # Process chunks in order, enumeration is used for detection of the last JPath chunk. for i, chnk in enumerate(chunks): key = chnk['n'] if not isinstance(current, dict) and not isinstance(current, collections.Mapping): raise JPathException("Expected dict-like structure to attach node '{}'".format(chnk['p'])) # Process indexed nodes. if 'i' in chnk: idx = chnk['i'] # Automatically create nodes for non-existent keys. if not key in current: current[key] = [] if not isinstance(current[key], list) and not isinstance(current[key], collections.MutableSequence): raise JPathException("Expected list-like object under structure key '{}'".format(key)) # Detection of the last JPath chunk - node somewhere in the middle. if i != size: # Attempt to access node at given index. try: current = current[key][idx] # IndexError: list index out of range # Node at given index does not exist, append new one. Using insert() # does not work, item is appended to the end of the list anyway. # TypeError: list indices must be integers or slices, not str # In the case list index was '*', we are appending to the end of # list. except (IndexError, TypeError): current[key].append({}) current = current[key][-1] # Detection of the last JPath chunk - node at the end. else: # Attempt to insert value at given index. try: if overwrite or not current[key][idx]: current[key][idx] = value else: return RC_VALUE_EXISTS # IndexError: list index out of range # Node at given index does not exist, append new one. Using insert() # does not work, item is appended to the end of the list anyway. # TypeError: list indices must be integers or slices, not str # In the case list index was '*', we are appending to the end of # list. except (IndexError, TypeError): # At this point only deal with unique, overwrite does not make # sense, because we would not be here otherwise. if not unique or not value in current[key]: current[key].append(value) else: return RC_VALUE_DUPLICATE # Process unindexed nodes. else: # Detection of the last JPath chunk - node somewhere in the middle. if i != size: # Automatically create nodes for non-existent keys. if not key in current: current[key] = {} if not isinstance(current[key], dict) and not isinstance(current[key], collections.Mapping): raise JPathException("Expected dict-like object under structure key '{}'".format(key)) current = current[key] # Detection of the last JPath chunk - node at the end. else: if overwrite or not key in current: current[key] = value else: return RC_VALUE_EXISTS return RC_VALUE_SET
[ "def", "jpath_set", "(", "structure", ",", "jpath", ",", "value", ",", "overwrite", "=", "True", ",", "unique", "=", "False", ")", ":", "chunks", "=", "jpath_parse_c", "(", "jpath", ")", "size", "=", "len", "(", "chunks", ")", "-", "1", "current", "=", "structure", "# Process chunks in order, enumeration is used for detection of the last JPath chunk.", "for", "i", ",", "chnk", "in", "enumerate", "(", "chunks", ")", ":", "key", "=", "chnk", "[", "'n'", "]", "if", "not", "isinstance", "(", "current", ",", "dict", ")", "and", "not", "isinstance", "(", "current", ",", "collections", ".", "Mapping", ")", ":", "raise", "JPathException", "(", "\"Expected dict-like structure to attach node '{}'\"", ".", "format", "(", "chnk", "[", "'p'", "]", ")", ")", "# Process indexed nodes.", "if", "'i'", "in", "chnk", ":", "idx", "=", "chnk", "[", "'i'", "]", "# Automatically create nodes for non-existent keys.", "if", "not", "key", "in", "current", ":", "current", "[", "key", "]", "=", "[", "]", "if", "not", "isinstance", "(", "current", "[", "key", "]", ",", "list", ")", "and", "not", "isinstance", "(", "current", "[", "key", "]", ",", "collections", ".", "MutableSequence", ")", ":", "raise", "JPathException", "(", "\"Expected list-like object under structure key '{}'\"", ".", "format", "(", "key", ")", ")", "# Detection of the last JPath chunk - node somewhere in the middle.", "if", "i", "!=", "size", ":", "# Attempt to access node at given index.", "try", ":", "current", "=", "current", "[", "key", "]", "[", "idx", "]", "# IndexError: list index out of range", "# Node at given index does not exist, append new one. Using insert()", "# does not work, item is appended to the end of the list anyway.", "# TypeError: list indices must be integers or slices, not str", "# In the case list index was '*', we are appending to the end of", "# list.", "except", "(", "IndexError", ",", "TypeError", ")", ":", "current", "[", "key", "]", ".", "append", "(", "{", "}", ")", "current", "=", "current", "[", "key", "]", "[", "-", "1", "]", "# Detection of the last JPath chunk - node at the end.", "else", ":", "# Attempt to insert value at given index.", "try", ":", "if", "overwrite", "or", "not", "current", "[", "key", "]", "[", "idx", "]", ":", "current", "[", "key", "]", "[", "idx", "]", "=", "value", "else", ":", "return", "RC_VALUE_EXISTS", "# IndexError: list index out of range", "# Node at given index does not exist, append new one. Using insert()", "# does not work, item is appended to the end of the list anyway.", "# TypeError: list indices must be integers or slices, not str", "# In the case list index was '*', we are appending to the end of", "# list.", "except", "(", "IndexError", ",", "TypeError", ")", ":", "# At this point only deal with unique, overwrite does not make", "# sense, because we would not be here otherwise.", "if", "not", "unique", "or", "not", "value", "in", "current", "[", "key", "]", ":", "current", "[", "key", "]", ".", "append", "(", "value", ")", "else", ":", "return", "RC_VALUE_DUPLICATE", "# Process unindexed nodes.", "else", ":", "# Detection of the last JPath chunk - node somewhere in the middle.", "if", "i", "!=", "size", ":", "# Automatically create nodes for non-existent keys.", "if", "not", "key", "in", "current", ":", "current", "[", "key", "]", "=", "{", "}", "if", "not", "isinstance", "(", "current", "[", "key", "]", ",", "dict", ")", "and", "not", "isinstance", "(", "current", "[", "key", "]", ",", "collections", ".", "Mapping", ")", ":", "raise", "JPathException", "(", "\"Expected dict-like object under structure key '{}'\"", ".", "format", "(", "key", ")", ")", "current", "=", "current", "[", "key", "]", "# Detection of the last JPath chunk - node at the end.", "else", ":", "if", "overwrite", "or", "not", "key", "in", "current", ":", "current", "[", "key", "]", "=", "value", "else", ":", "return", "RC_VALUE_EXISTS", "return", "RC_VALUE_SET" ]
Set given JPath to given value within given structure. For performance reasons this method is intentionally not written as recursive. :param str structure: data structure to be searched :param str jpath: JPath to be evaluated :param any value: value of any type to be set at given path :param bool overwrite: enable/disable overwriting of already existing value :param bool unique: ensure uniqueness of value, works only for lists :return: numerical return code, one of the (:py:data:`RC_VALUE_SET`, :py:data:`RC_VALUE_EXISTS`, :py:data:`RC_VALUE_DUPLICATE`) :rtype: int
[ "Set", "given", "JPath", "to", "given", "value", "within", "given", "structure", "." ]
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/jpath.py#L348-L440
241,394
shreyaspotnis/rampage
rampage/queuer.py
queue_ramp_dicts
def queue_ramp_dicts(ramp_dict_list, server_ip_and_port): """Simple utility function to queue up a list of dictionaries.""" client = server.ClientForServer(server.BECServer, server_ip_and_port) for dct in ramp_dict_list: client.queue_ramp(dct) client.start({})
python
def queue_ramp_dicts(ramp_dict_list, server_ip_and_port): """Simple utility function to queue up a list of dictionaries.""" client = server.ClientForServer(server.BECServer, server_ip_and_port) for dct in ramp_dict_list: client.queue_ramp(dct) client.start({})
[ "def", "queue_ramp_dicts", "(", "ramp_dict_list", ",", "server_ip_and_port", ")", ":", "client", "=", "server", ".", "ClientForServer", "(", "server", ".", "BECServer", ",", "server_ip_and_port", ")", "for", "dct", "in", "ramp_dict_list", ":", "client", ".", "queue_ramp", "(", "dct", ")", "client", ".", "start", "(", "{", "}", ")" ]
Simple utility function to queue up a list of dictionaries.
[ "Simple", "utility", "function", "to", "queue", "up", "a", "list", "of", "dictionaries", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/queuer.py#L48-L53
241,395
shreyaspotnis/rampage
rampage/queuer.py
flatten_dict
def flatten_dict(dct, separator='-->', allowed_types=[int, float, bool]): """Returns a list of string identifiers for each element in dct. Recursively scans through dct and finds every element whose type is in allowed_types and adds a string indentifier for it. eg: dct = { 'a': 'a string', 'b': { 'c': 1.0, 'd': True } } flatten_dict(dct) would return ['a', 'b-->c', 'b-->d'] """ flat_list = [] for key in sorted(dct): if key[:2] == '__': continue key_type = type(dct[key]) if key_type in allowed_types: flat_list.append(str(key)) elif key_type is dict: sub_list = flatten_dict(dct[key]) sub_list = [str(key) + separator + sl for sl in sub_list] flat_list += sub_list return flat_list
python
def flatten_dict(dct, separator='-->', allowed_types=[int, float, bool]): """Returns a list of string identifiers for each element in dct. Recursively scans through dct and finds every element whose type is in allowed_types and adds a string indentifier for it. eg: dct = { 'a': 'a string', 'b': { 'c': 1.0, 'd': True } } flatten_dict(dct) would return ['a', 'b-->c', 'b-->d'] """ flat_list = [] for key in sorted(dct): if key[:2] == '__': continue key_type = type(dct[key]) if key_type in allowed_types: flat_list.append(str(key)) elif key_type is dict: sub_list = flatten_dict(dct[key]) sub_list = [str(key) + separator + sl for sl in sub_list] flat_list += sub_list return flat_list
[ "def", "flatten_dict", "(", "dct", ",", "separator", "=", "'-->'", ",", "allowed_types", "=", "[", "int", ",", "float", ",", "bool", "]", ")", ":", "flat_list", "=", "[", "]", "for", "key", "in", "sorted", "(", "dct", ")", ":", "if", "key", "[", ":", "2", "]", "==", "'__'", ":", "continue", "key_type", "=", "type", "(", "dct", "[", "key", "]", ")", "if", "key_type", "in", "allowed_types", ":", "flat_list", ".", "append", "(", "str", "(", "key", ")", ")", "elif", "key_type", "is", "dict", ":", "sub_list", "=", "flatten_dict", "(", "dct", "[", "key", "]", ")", "sub_list", "=", "[", "str", "(", "key", ")", "+", "separator", "+", "sl", "for", "sl", "in", "sub_list", "]", "flat_list", "+=", "sub_list", "return", "flat_list" ]
Returns a list of string identifiers for each element in dct. Recursively scans through dct and finds every element whose type is in allowed_types and adds a string indentifier for it. eg: dct = { 'a': 'a string', 'b': { 'c': 1.0, 'd': True } } flatten_dict(dct) would return ['a', 'b-->c', 'b-->d']
[ "Returns", "a", "list", "of", "string", "identifiers", "for", "each", "element", "in", "dct", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/queuer.py#L57-L86
241,396
shreyaspotnis/rampage
rampage/queuer.py
set_dict_item
def set_dict_item(dct, name_string, set_to): """Sets dictionary item identified by name_string to set_to. name_string is the indentifier generated using flatten_dict. Maintains the type of the orginal object in dct and tries to convert set_to to that type. """ key_strings = str(name_string).split('-->') d = dct for ks in key_strings[:-1]: d = d[ks] item_type = type(d[key_strings[-1]]) d[key_strings[-1]] = item_type(set_to)
python
def set_dict_item(dct, name_string, set_to): """Sets dictionary item identified by name_string to set_to. name_string is the indentifier generated using flatten_dict. Maintains the type of the orginal object in dct and tries to convert set_to to that type. """ key_strings = str(name_string).split('-->') d = dct for ks in key_strings[:-1]: d = d[ks] item_type = type(d[key_strings[-1]]) d[key_strings[-1]] = item_type(set_to)
[ "def", "set_dict_item", "(", "dct", ",", "name_string", ",", "set_to", ")", ":", "key_strings", "=", "str", "(", "name_string", ")", ".", "split", "(", "'-->'", ")", "d", "=", "dct", "for", "ks", "in", "key_strings", "[", ":", "-", "1", "]", ":", "d", "=", "d", "[", "ks", "]", "item_type", "=", "type", "(", "d", "[", "key_strings", "[", "-", "1", "]", "]", ")", "d", "[", "key_strings", "[", "-", "1", "]", "]", "=", "item_type", "(", "set_to", ")" ]
Sets dictionary item identified by name_string to set_to. name_string is the indentifier generated using flatten_dict. Maintains the type of the orginal object in dct and tries to convert set_to to that type.
[ "Sets", "dictionary", "item", "identified", "by", "name_string", "to", "set_to", "." ]
e2565aef7ee16ee06523de975e8aa41aca14e3b2
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/queuer.py#L89-L102
241,397
bitlabstudio/django-localized-names
localized_names/templatetags/localized_names_tags.py
get_name
def get_name(obj, setting_name='LONG_NAME_FORMAT'): """ Returns the correct order of the name according to the current language. """ nickname = obj.get_nickname() romanized_first_name = obj.get_romanized_first_name() romanized_last_name = obj.get_romanized_last_name() non_romanized_first_name = obj.get_non_romanized_first_name() non_romanized_last_name = obj.get_non_romanized_last_name() non_translated_title = obj.get_title() non_translated_gender = obj.get_gender() # when the title is blank, gettext returns weird header text. So if this # occurs, we will pass it on blank without gettext if non_translated_title: title = gettext(non_translated_title) else: title = non_translated_title if non_translated_gender: gender = gettext(non_translated_gender) else: gender = non_translated_gender format_string = u'{}'.format(get_format(setting_name)) format_kwargs = {} if '{n}' in format_string: format_kwargs.update({'n': nickname}) if '{N}' in format_string: format_kwargs.update({'N': nickname.upper()}) if '{f}' in format_string: format_kwargs.update({'f': romanized_first_name}) if '{F}' in format_string: format_kwargs.update({'F': romanized_first_name.upper()}) if '{l}' in format_string: format_kwargs.update({'l': romanized_last_name}) if '{L}' in format_string: format_kwargs.update({'L': romanized_last_name.upper()}) if '{a}' in format_string: format_kwargs.update({'a': non_romanized_first_name}) if '{A}' in format_string: format_kwargs.update({'A': non_romanized_first_name.upper()}) if '{x}' in format_string: format_kwargs.update({'x': non_romanized_last_name}) if '{X}' in format_string: format_kwargs.update({'X': non_romanized_last_name.upper()}) if '{t}' in format_string: format_kwargs.update({'t': title}) if '{T}' in format_string: format_kwargs.update({'T': title.upper()}) if '{g}' in format_string: format_kwargs.update({'g': gender}) if '{G}' in format_string: format_kwargs.update({'G': gender.upper()}) return format_string.format(**format_kwargs)
python
def get_name(obj, setting_name='LONG_NAME_FORMAT'): """ Returns the correct order of the name according to the current language. """ nickname = obj.get_nickname() romanized_first_name = obj.get_romanized_first_name() romanized_last_name = obj.get_romanized_last_name() non_romanized_first_name = obj.get_non_romanized_first_name() non_romanized_last_name = obj.get_non_romanized_last_name() non_translated_title = obj.get_title() non_translated_gender = obj.get_gender() # when the title is blank, gettext returns weird header text. So if this # occurs, we will pass it on blank without gettext if non_translated_title: title = gettext(non_translated_title) else: title = non_translated_title if non_translated_gender: gender = gettext(non_translated_gender) else: gender = non_translated_gender format_string = u'{}'.format(get_format(setting_name)) format_kwargs = {} if '{n}' in format_string: format_kwargs.update({'n': nickname}) if '{N}' in format_string: format_kwargs.update({'N': nickname.upper()}) if '{f}' in format_string: format_kwargs.update({'f': romanized_first_name}) if '{F}' in format_string: format_kwargs.update({'F': romanized_first_name.upper()}) if '{l}' in format_string: format_kwargs.update({'l': romanized_last_name}) if '{L}' in format_string: format_kwargs.update({'L': romanized_last_name.upper()}) if '{a}' in format_string: format_kwargs.update({'a': non_romanized_first_name}) if '{A}' in format_string: format_kwargs.update({'A': non_romanized_first_name.upper()}) if '{x}' in format_string: format_kwargs.update({'x': non_romanized_last_name}) if '{X}' in format_string: format_kwargs.update({'X': non_romanized_last_name.upper()}) if '{t}' in format_string: format_kwargs.update({'t': title}) if '{T}' in format_string: format_kwargs.update({'T': title.upper()}) if '{g}' in format_string: format_kwargs.update({'g': gender}) if '{G}' in format_string: format_kwargs.update({'G': gender.upper()}) return format_string.format(**format_kwargs)
[ "def", "get_name", "(", "obj", ",", "setting_name", "=", "'LONG_NAME_FORMAT'", ")", ":", "nickname", "=", "obj", ".", "get_nickname", "(", ")", "romanized_first_name", "=", "obj", ".", "get_romanized_first_name", "(", ")", "romanized_last_name", "=", "obj", ".", "get_romanized_last_name", "(", ")", "non_romanized_first_name", "=", "obj", ".", "get_non_romanized_first_name", "(", ")", "non_romanized_last_name", "=", "obj", ".", "get_non_romanized_last_name", "(", ")", "non_translated_title", "=", "obj", ".", "get_title", "(", ")", "non_translated_gender", "=", "obj", ".", "get_gender", "(", ")", "# when the title is blank, gettext returns weird header text. So if this", "# occurs, we will pass it on blank without gettext", "if", "non_translated_title", ":", "title", "=", "gettext", "(", "non_translated_title", ")", "else", ":", "title", "=", "non_translated_title", "if", "non_translated_gender", ":", "gender", "=", "gettext", "(", "non_translated_gender", ")", "else", ":", "gender", "=", "non_translated_gender", "format_string", "=", "u'{}'", ".", "format", "(", "get_format", "(", "setting_name", ")", ")", "format_kwargs", "=", "{", "}", "if", "'{n}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'n'", ":", "nickname", "}", ")", "if", "'{N}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'N'", ":", "nickname", ".", "upper", "(", ")", "}", ")", "if", "'{f}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'f'", ":", "romanized_first_name", "}", ")", "if", "'{F}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'F'", ":", "romanized_first_name", ".", "upper", "(", ")", "}", ")", "if", "'{l}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'l'", ":", "romanized_last_name", "}", ")", "if", "'{L}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'L'", ":", "romanized_last_name", ".", "upper", "(", ")", "}", ")", "if", "'{a}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'a'", ":", "non_romanized_first_name", "}", ")", "if", "'{A}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'A'", ":", "non_romanized_first_name", ".", "upper", "(", ")", "}", ")", "if", "'{x}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'x'", ":", "non_romanized_last_name", "}", ")", "if", "'{X}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'X'", ":", "non_romanized_last_name", ".", "upper", "(", ")", "}", ")", "if", "'{t}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'t'", ":", "title", "}", ")", "if", "'{T}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'T'", ":", "title", ".", "upper", "(", ")", "}", ")", "if", "'{g}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'g'", ":", "gender", "}", ")", "if", "'{G}'", "in", "format_string", ":", "format_kwargs", ".", "update", "(", "{", "'G'", ":", "gender", ".", "upper", "(", ")", "}", ")", "return", "format_string", ".", "format", "(", "*", "*", "format_kwargs", ")" ]
Returns the correct order of the name according to the current language.
[ "Returns", "the", "correct", "order", "of", "the", "name", "according", "to", "the", "current", "language", "." ]
d2a67fd256f6bf61f0e17b5d77aac1ec7ccfa0cb
https://github.com/bitlabstudio/django-localized-names/blob/d2a67fd256f6bf61f0e17b5d77aac1ec7ccfa0cb/localized_names/templatetags/localized_names_tags.py#L11-L65
241,398
Vito2015/pyextend
pyextend/core/wrappers/timethis.py
timethis
def timethis(func): """A wrapper use for timeit.""" func_module, func_name = func.__module__, func.__name__ @functools.wraps(func) def wrapper(*args, **kwargs): start = _time_perf_counter() r = func(*args, **kwargs) end = _time_perf_counter() print('timethis : <{}.{}> : {}'.format(func_module, func_name, end - start)) return r return wrapper
python
def timethis(func): """A wrapper use for timeit.""" func_module, func_name = func.__module__, func.__name__ @functools.wraps(func) def wrapper(*args, **kwargs): start = _time_perf_counter() r = func(*args, **kwargs) end = _time_perf_counter() print('timethis : <{}.{}> : {}'.format(func_module, func_name, end - start)) return r return wrapper
[ "def", "timethis", "(", "func", ")", ":", "func_module", ",", "func_name", "=", "func", ".", "__module__", ",", "func", ".", "__name__", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start", "=", "_time_perf_counter", "(", ")", "r", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "end", "=", "_time_perf_counter", "(", ")", "print", "(", "'timethis : <{}.{}> : {}'", ".", "format", "(", "func_module", ",", "func_name", ",", "end", "-", "start", ")", ")", "return", "r", "return", "wrapper" ]
A wrapper use for timeit.
[ "A", "wrapper", "use", "for", "timeit", "." ]
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/timethis.py#L23-L34
241,399
biocore/mustached-octo-ironman
moi/group.py
create_info
def create_info(name, info_type, url=None, parent=None, id=None, context=ctx_default, store=False): """Return a group object""" id = str(uuid4()) if id is None else id pubsub = _pubsub_key(id) info = {'id': id, 'type': info_type, 'pubsub': pubsub, 'url': url, 'parent': parent, 'context': context, 'name': name, 'status': 'Queued' if info_type == 'job' else None, 'date_start': None, 'date_end': None, 'date_created': str(datetime.now()), 'result': None} if store: r_client.set(id, json_encode(info)) if parent is not None: r_client.sadd(_children_key(parent), id) return info
python
def create_info(name, info_type, url=None, parent=None, id=None, context=ctx_default, store=False): """Return a group object""" id = str(uuid4()) if id is None else id pubsub = _pubsub_key(id) info = {'id': id, 'type': info_type, 'pubsub': pubsub, 'url': url, 'parent': parent, 'context': context, 'name': name, 'status': 'Queued' if info_type == 'job' else None, 'date_start': None, 'date_end': None, 'date_created': str(datetime.now()), 'result': None} if store: r_client.set(id, json_encode(info)) if parent is not None: r_client.sadd(_children_key(parent), id) return info
[ "def", "create_info", "(", "name", ",", "info_type", ",", "url", "=", "None", ",", "parent", "=", "None", ",", "id", "=", "None", ",", "context", "=", "ctx_default", ",", "store", "=", "False", ")", ":", "id", "=", "str", "(", "uuid4", "(", ")", ")", "if", "id", "is", "None", "else", "id", "pubsub", "=", "_pubsub_key", "(", "id", ")", "info", "=", "{", "'id'", ":", "id", ",", "'type'", ":", "info_type", ",", "'pubsub'", ":", "pubsub", ",", "'url'", ":", "url", ",", "'parent'", ":", "parent", ",", "'context'", ":", "context", ",", "'name'", ":", "name", ",", "'status'", ":", "'Queued'", "if", "info_type", "==", "'job'", "else", "None", ",", "'date_start'", ":", "None", ",", "'date_end'", ":", "None", ",", "'date_created'", ":", "str", "(", "datetime", ".", "now", "(", ")", ")", ",", "'result'", ":", "None", "}", "if", "store", ":", "r_client", ".", "set", "(", "id", ",", "json_encode", "(", "info", ")", ")", "if", "parent", "is", "not", "None", ":", "r_client", ".", "sadd", "(", "_children_key", "(", "parent", ")", ",", "id", ")", "return", "info" ]
Return a group object
[ "Return", "a", "group", "object" ]
54128d8fdff327e1b7ffd9bb77bf38c3df9526d7
https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L351-L376