_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q6100
interpolated_strings.types
train
def types(self): """ Tuple containing types transformed by this transformer. """ out = [] if self._transform_bytes: out.append(bytes) if self._transform_str: out.append(str) return tuple(out)
python
{ "resource": "" }
q6101
interpolated_strings._transform_constant_sequence
train
def _transform_constant_sequence(self, seq): """ Transform a frozenset or tuple. """ should_transform = is_a(self.types) if not any(filter(should_transform, flatten(seq))): # Tuple doesn't contain any transformable strings. Ignore. yield LOAD_CONST(seq) return for const in seq: if should_transform(const): yield from self.transform_stringlike(const) elif isinstance(const, (tuple, frozenset)): yield from self._transform_constant_sequence(const) else: yield LOAD_CONST(const) if isinstance(seq, tuple): yield BUILD_TUPLE(len(seq)) else: assert isinstance(seq, frozenset) yield BUILD_TUPLE(len(seq)) yield LOAD_CONST(frozenset) yield ROT_TWO() yield CALL_FUNCTION(1)
python
{ "resource": "" }
q6102
interpolated_strings.transform_stringlike
train
def transform_stringlike(self, const): """ Yield instructions to process a str or bytes constant. """ yield LOAD_CONST(const) if isinstance(const, bytes): yield from self.bytes_instrs elif isinstance(const, str): yield from self.str_instrs
python
{ "resource": "" }
q6103
Instruction.steal
train
def steal(self, instr): """Steal the jump index off of `instr`. This makes anything that would have jumped to `instr` jump to this Instruction instead. Parameters ---------- instr : Instruction The instruction to steal the jump sources from. Returns ------- self : Instruction The instruction that owns this method. Notes ----- This mutates self and ``instr`` inplace. """ instr._stolen_by = self for jmp in instr._target_of: jmp.arg = self self._target_of = instr._target_of instr._target_of = set() return self
python
{ "resource": "" }
q6104
Instruction.from_opcode
train
def from_opcode(cls, opcode, arg=_no_arg): """ Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. """ return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
python
{ "resource": "" }
q6105
Instruction.stack_effect
train
def stack_effect(self): """ The net effect of executing this instruction on the interpreter stack. Instructions that pop values off the stack have negative stack effect equal to the number of popped values. Instructions that push values onto the stack have positive stack effect equal to the number of popped values. Examples -------- - LOAD_{FAST,NAME,GLOBAL,DEREF} push one value onto the stack. They have a stack_effect of 1. - POP_JUMP_IF_{TRUE,FALSE} always pop one value off the stack. They have a stack effect of -1. - BINARY_* instructions pop two instructions off the stack, apply a binary operator, and push the resulting value onto the stack. They have a stack effect of -1 (-2 values consumed + 1 value pushed). """ if self.opcode == NOP.opcode: # noqa # dis.stack_effect is broken here return 0 return stack_effect( self.opcode, *((self.arg if isinstance(self.arg, int) else 0,) if self.have_arg else ()) )
python
{ "resource": "" }
q6106
Instruction.equiv
train
def equiv(self, instr): """Check equivalence of instructions. This checks against the types and the arguments of the instructions Parameters ---------- instr : Instruction The instruction to check against. Returns ------- is_equiv : bool If the instructions are equivalent. Notes ----- This is a separate concept from instruction identity. Two separate instructions can be equivalent without being the same exact instance. This means that two equivalent instructions can be at different points in the bytecode or be targeted by different jumps. """ return type(self) == type(instr) and self.arg == instr.arg
python
{ "resource": "" }
q6107
EnvSubprocess._get_python_cmd
train
def _get_python_cmd(self): """ return the python executable in the virtualenv. Try first sys.executable but use fallbacks. """ file_names = ["pypy.exe", "python.exe", "python"] executable = sys.executable if executable is not None: executable = os.path.split(executable)[1] file_names.insert(0, executable) return self._get_bin_file(*file_names)
python
{ "resource": "" }
q6108
convert
train
def convert(source_file, destination_file, cfg): """ convert in every way. """ source_ext = os.path.splitext(source_file)[1] source_ext = source_ext.lower() dest_ext = os.path.splitext(destination_file)[1] dest_ext = dest_ext.lower() if source_ext not in (".wav", ".cas", ".bas"): raise AssertionError( "Source file type %r not supported." % repr(source_ext) ) if dest_ext not in (".wav", ".cas", ".bas"): raise AssertionError( "Destination file type %r not supported." % repr(dest_ext) ) print "Convert %s -> %s" % (source_ext, dest_ext) c = Cassette(cfg) if source_ext == ".wav": c.add_from_wav(source_file) elif source_ext == ".cas": c.add_from_cas(source_file) elif source_ext == ".bas": c.add_from_bas(source_file) else: raise RuntimeError # Should never happen c.print_debug_info() if dest_ext == ".wav": c.write_wave(destination_file) elif dest_ext == ".cas": c.write_cas(destination_file) elif dest_ext == ".bas": c.write_bas(destination_file) else: raise RuntimeError
python
{ "resource": "" }
q6109
ScrolledText.save_position
train
def save_position(self): """ save cursor and scroll position """ # save text cursor position: self.old_text_pos = self.index(tkinter.INSERT) # save scroll position: self.old_first, self.old_last = self.yview()
python
{ "resource": "" }
q6110
ScrolledText.restore_position
train
def restore_position(self): """ restore cursor and scroll position """ # restore text cursor position: self.mark_set(tkinter.INSERT, self.old_text_pos) # restore scroll position: self.yview_moveto(self.old_first)
python
{ "resource": "" }
q6111
ROMFile.download
train
def download(self): """ Request url and return his content The Requested content will be cached into the default temp directory. """ if os.path.isfile(self.archive_path): print("Use %r" % self.archive_path) with open(self.archive_path, "rb") as f: content = f.read() else: print("Request: %r..." % self.URL) # Warning: HTTPS requests do not do any verification of the server's certificate. f = urlopen(self.URL) content = f.read() with open(self.archive_path, "wb") as out_file: out_file.write(content) # Check SHA hash: current_sha1 = hashlib.sha1(content).hexdigest() assert current_sha1 == self.DOWNLOAD_SHA1, "Download sha1 value is wrong! SHA1 is: %r" % current_sha1 print("Download SHA1: %r, ok." % current_sha1)
python
{ "resource": "" }
q6112
BaseTkinterGUI.paste_clipboard
train
def paste_clipboard(self, event): """ Send the clipboard content as user input to the CPU. """ log.critical("paste clipboard") clipboard = self.root.clipboard_get() for line in clipboard.splitlines(): log.critical("paste line: %s", repr(line)) self.add_user_input(line + "\r")
python
{ "resource": "" }
q6113
DragonTkinterGUI.display_callback
train
def display_callback(self, cpu_cycles, op_address, address, value): """ called via memory write_byte_middleware """ self.display.write_byte(cpu_cycles, op_address, address, value) return value
python
{ "resource": "" }
q6114
PIA.read_PIA0_A_data
train
def read_PIA0_A_data(self, cpu_cycles, op_address, address): """ read from 0xff00 -> PIA 0 A side Data reg. bit 7 | PA7 | joystick comparison input bit 6 | PA6 | keyboard matrix row 7 bit 5 | PA5 | keyboard matrix row 6 bit 4 | PA4 | keyboard matrix row 5 bit 3 | PA3 | keyboard matrix row 4 & left joystick switch 2 bit 2 | PA2 | keyboard matrix row 3 & right joystick switch 2 bit 1 | PA1 | keyboard matrix row 2 & left joystick switch 1 bit 0 | PA0 | keyboard matrix row 1 & right joystick switch 1 """ pia0b = self.pia_0_B_data.value # $ff02 # FIXME: Find a way to handle CoCo and Dragon in the same way! if self.cfg.CONFIG_NAME == COCO2B: # log.critical("\t count: %i", self.input_repead) if self.input_repead == 7: try: self.current_input_char = self.user_input_queue.get_nowait() except queue.Empty: self.current_input_char = None else: log.critical( "\tget new key from queue: %s", repr(self.current_input_char)) elif self.input_repead == 18: # log.critical("\tForce send 'no key pressed'") self.current_input_char = None elif self.input_repead > 20: self.input_repead = 0 self.input_repead += 1 else: # Dragon if pia0b == self.cfg.PIA0B_KEYBOARD_START: # FIXME if self.empty_key_toggle: # Work-a-round for "poor" dragon keyboard scan routine: # The scan routine in ROM ignores key pressed directly behind # one another if they are in the same row! # See "Inside the Dragon" book, page 203 ;) # # Here with the empty_key_toggle, we always send a "no key pressed" # after every key press back and then we send the next key from # the self.user_input_queue # # TODO: We can check the row of the previous key press and only # force a 'no key pressed' if the row is the same self.empty_key_toggle = False self.current_input_char = None # log.critical("\tForce send 'no key pressed'") else: try: self.current_input_char = self.user_input_queue.get_nowait() except queue.Empty: # log.critical("\tinput_queue is empty")) self.current_input_char = None else: # log.critical("\tget new key from queue: %s", repr(self.current_input_char)) self.empty_key_toggle = True if self.current_input_char is None: # log.critical("\tno key pressed") result = 0xff self.empty_key_toggle = False else: # log.critical("\tsend %s", repr(self.current_input_char)) result = self.cfg.pia_keymatrix_result( self.current_input_char, pia0b) # if not is_bit_set(pia0b, bit=7): # bit 7 | PA7 | joystick comparison input # result = clear_bit(result, bit=7) # if self.current_input_char is not None: # log.critical( # "%04x| read $%04x ($ff02 is $%02x %s) send $%02x %s back\t|%s", # op_address, address, # pia0b, '{0:08b}'.format(pia0b), # result, '{0:08b}'.format(result), # self.cfg.mem_info.get_shortest(op_address) # ) return result
python
{ "resource": "" }
q6115
PIA.write_PIA0_A_data
train
def write_PIA0_A_data(self, cpu_cycles, op_address, address, value): """ write to 0xff00 -> PIA 0 A side Data reg. """ log.error("%04x| write $%02x (%s) to $%04x -> PIA 0 A side Data reg.\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) self.pia_0_A_register.set(value)
python
{ "resource": "" }
q6116
PIA.read_PIA0_A_control
train
def read_PIA0_A_control(self, cpu_cycles, op_address, address): """ read from 0xff01 -> PIA 0 A side control register """ value = 0xb3 log.error( "%04x| read $%04x (PIA 0 A side Control reg.) send $%02x (%s) back.\t|%s", op_address, address, value, byte2bit_string(value), self.cfg.mem_info.get_shortest(op_address) ) return value
python
{ "resource": "" }
q6117
PIA.write_PIA0_A_control
train
def write_PIA0_A_control(self, cpu_cycles, op_address, address, value): """ write to 0xff01 -> PIA 0 A side control register TODO: Handle IRQ bit 7 | IRQ 1 (HSYNC) flag bit 6 | IRQ 2 flag(not used) bit 5 | Control line 2 (CA2) is an output = 1 bit 4 | Control line 2 (CA2) set by bit 3 = 1 bit 3 | select line LSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI bit 2 | set data direction: 0 = $FF00 is DDR / 1 = $FF00 is normal data lines bit 1 | control line 1 (CA1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI bit 0 | HSYNC IRQ: 0 = disabled IRQ / 1 = enabled IRQ """ log.error( "%04x| write $%02x (%s) to $%04x -> PIA 0 A side Control reg.\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) if not is_bit_set(value, bit=2): self.pia_0_A_register.select_pdr() else: self.pia_0_A_register.deselect_pdr()
python
{ "resource": "" }
q6118
PIA.read_PIA0_B_data
train
def read_PIA0_B_data(self, cpu_cycles, op_address, address): """ read from 0xff02 -> PIA 0 B side Data reg. bit 7 | PB7 | keyboard matrix column 8 bit 6 | PB6 | keyboard matrix column 7 / ram size output bit 5 | PB5 | keyboard matrix column 6 bit 4 | PB4 | keyboard matrix column 5 bit 3 | PB3 | keyboard matrix column 4 bit 2 | PB2 | keyboard matrix column 3 bit 1 | PB1 | keyboard matrix column 2 bit 0 | PB0 | keyboard matrix column 1 bits 0-7 also printer data lines """ value = self.pia_0_B_data.value # $ff02 log.debug( "%04x| read $%04x (PIA 0 B side Data reg.) send $%02x (%s) back.\t|%s", op_address, address, value, byte2bit_string(value), self.cfg.mem_info.get_shortest(op_address) ) return value
python
{ "resource": "" }
q6119
PIA.read_PIA0_B_control
train
def read_PIA0_B_control(self, cpu_cycles, op_address, address): """ read from 0xff03 -> PIA 0 B side Control reg. """ value = self.pia_0_B_control.value log.error( "%04x| read $%04x (PIA 0 B side Control reg.) send $%02x (%s) back.\t|%s", op_address, address, value, byte2bit_string(value), self.cfg.mem_info.get_shortest(op_address) ) return value
python
{ "resource": "" }
q6120
PIA.write_PIA0_B_control
train
def write_PIA0_B_control(self, cpu_cycles, op_address, address, value): """ write to 0xff03 -> PIA 0 B side Control reg. TODO: Handle IRQ bit 7 | IRQ 1 (VSYNC) flag bit 6 | IRQ 2 flag(not used) bit 5 | Control line 2 (CB2) is an output = 1 bit 4 | Control line 2 (CB2) set by bit 3 = 1 bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ """ log.critical( "%04x| write $%02x (%s) to $%04x -> PIA 0 B side Control reg.\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) if is_bit_set(value, bit=0): log.critical( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: enable\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) self.cpu.irq_enabled = True value = set_bit(value, bit=7) else: log.critical( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: disable\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) self.cpu.irq_enabled = False if not is_bit_set(value, bit=2): self.pia_0_B_control.select_pdr() else: self.pia_0_B_control.deselect_pdr() self.pia_0_B_control.set(value)
python
{ "resource": "" }
q6121
Machine.inject_basic_program
train
def inject_basic_program(self, ascii_listing): """ save the given ASCII BASIC program listing into the emulator RAM. """ program_start = self.cpu.memory.read_word( self.machine_api.PROGRAM_START_ADDR ) tokens = self.machine_api.ascii_listing2program_dump(ascii_listing) self.cpu.memory.load(program_start, tokens) log.critical("BASIC program injected into Memory.") # Update the BASIC addresses: program_end = program_start + len(tokens) self.cpu.memory.write_word(self.machine_api.VARIABLES_START_ADDR, program_end) self.cpu.memory.write_word(self.machine_api.ARRAY_START_ADDR, program_end) self.cpu.memory.write_word(self.machine_api.FREE_SPACE_START_ADDR, program_end) log.critical("BASIC addresses updated.")
python
{ "resource": "" }
q6122
Wave2Bitstream.sync
train
def sync(self, length): """ synchronized weave sync trigger """ # go in wave stream to the first bit try: self.next() except StopIteration: print "Error: no bits identified!" sys.exit(-1) log.info("First bit is at: %s" % self.pformat_pos()) log.debug("enable half sinus scan") self.half_sinus = True # Toggle sync test by consuming one half sinus sample # self.iter_trigger_generator.next() # Test sync # get "half sinus cycle" test data test_durations = itertools.islice(self.iter_duration_generator, length) # It's a tuple like: [(frame_no, duration)...] test_durations = list(test_durations) diff1, diff2 = diff_info(test_durations) log.debug("sync diff info: %i vs. %i" % (diff1, diff2)) if diff1 > diff2: log.info("\nbit-sync one step.") self.iter_trigger_generator.next() log.debug("Synced.") else: log.info("\nNo bit-sync needed.") self.half_sinus = False log.debug("disable half sinus scan")
python
{ "resource": "" }
q6123
Wave2Bitstream.iter_duration
train
def iter_duration(self, iter_trigger): """ yield the duration of two frames in a row. """ print process_info = ProcessInfo(self.frame_count, use_last_rates=4) start_time = time.time() next_status = start_time + 0.25 old_pos = next(iter_trigger) for pos in iter_trigger: duration = pos - old_pos # log.log(5, "Duration: %s" % duration) yield duration old_pos = pos if time.time() > next_status: next_status = time.time() + 1 self._print_status(process_info) self._print_status(process_info) print
python
{ "resource": "" }
q6124
Wave2Bitstream.iter_trigger
train
def iter_trigger(self, iter_wave_values): """ trigger middle crossing of the wave sinus curve """ window_size = (2 * self.cfg.END_COUNT) + self.cfg.MID_COUNT # sinus curve goes from negative into positive: pos_null_transit = [(0, self.cfg.END_COUNT), (self.cfg.END_COUNT, 0)] # sinus curve goes from positive into negative: neg_null_transit = [(self.cfg.END_COUNT, 0), (0, self.cfg.END_COUNT)] if self.cfg.MID_COUNT > 3: mid_index = int(round(self.cfg.MID_COUNT / 2.0)) else: mid_index = 0 in_pos = False for values in iter_window(iter_wave_values, window_size): # Split the window previous_values = values[:self.cfg.END_COUNT] # e.g.: 123----- mid_values = values[self.cfg.END_COUNT:self.cfg.END_COUNT + self.cfg.MID_COUNT] # e.g.: ---45--- next_values = values[-self.cfg.END_COUNT:] # e.g.: -----678 # get only the value and strip the frame_no # e.g.: (frame_no, value) tuple -> value list previous_values = [i[1] for i in previous_values] next_values = [i[1] for i in next_values] # Count sign from previous and next values sign_info = [ count_sign(previous_values, 0), count_sign(next_values, 0) ] # log.log(5, "sign info: %s" % repr(sign_info)) # yield the mid crossing if in_pos == False and sign_info == pos_null_transit: log.log(5, "sinus curve goes from negative into positive") # log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values)) yield mid_values[mid_index][0] in_pos = True elif in_pos == True and sign_info == neg_null_transit: if self.half_sinus: log.log(5, "sinus curve goes from positive into negative") # log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values)) yield mid_values[mid_index][0] in_pos = False
python
{ "resource": "" }
q6125
Wave2Bitstream.iter_wave_values
train
def iter_wave_values(self): """ yield frame numer + volume value from the WAVE file """ typecode = self.get_typecode(self.samplewidth) if log.level >= 5: if self.cfg.AVG_COUNT > 1: # merge samples -> log output in iter_avg_wave_values tlm = None else: tlm = TextLevelMeter(self.max_value, 79) # Use only a read size which is a quare divider of the samplewidth # Otherwise array.array will raise: ValueError: string length not a multiple of item size divider = int(round(float(WAVE_READ_SIZE) / self.samplewidth)) read_size = self.samplewidth * divider if read_size != WAVE_READ_SIZE: log.info("Real use wave read size: %i Bytes" % read_size) get_wave_block_func = functools.partial(self.wavefile.readframes, read_size) skip_count = 0 manually_audioop_bias = self.samplewidth == 1 and audioop is None for frames in iter(get_wave_block_func, ""): if self.samplewidth == 1: if audioop is None: log.warning("use audioop.bias() work-a-round for missing audioop.") else: # 8 bit samples are unsigned, see: # http://docs.python.org/2/library/audioop.html#audioop.lin2lin frames = audioop.bias(frames, 1, 128) try: values = array.array(typecode, frames) except ValueError, err: # e.g.: # ValueError: string length not a multiple of item size # Work-a-round: Skip the last frames of this block frame_count = len(frames) divider = int(math.floor(float(frame_count) / self.samplewidth)) new_count = self.samplewidth * divider frames = frames[:new_count] # skip frames log.error( "Can't make array from %s frames: Value error: %s (Skip %i and use %i frames)" % ( frame_count, err, frame_count - new_count, len(frames) )) values = array.array(typecode, frames) for value in values: self.wave_pos += 1 # Absolute position in the frame stream if manually_audioop_bias: # audioop.bias can't be used. # See: http://hg.python.org/cpython/file/482590320549/Modules/audioop.c#l957 value = value % 0xff - 128 # if abs(value) < self.min_volume: # # log.log(5, "Ignore to lower amplitude") # skip_count += 1 # continue yield (self.wave_pos, value) log.info("Skip %i samples that are lower than %i" % ( skip_count, self.min_volume )) log.info("Last readed Frame is: %s" % self.pformat_pos())
python
{ "resource": "" }
q6126
iter_steps
train
def iter_steps(g, steps): """ iterate over 'g' in blocks with a length of the given 'step' count. >>> for v in iter_steps([1,2,3,4,5], steps=2): v [1, 2] [3, 4] [5] >>> for v in iter_steps([1,2,3,4,5,6,7,8,9], steps=3): v [1, 2, 3] [4, 5, 6] [7, 8, 9] 12345678 12345678 12345678 >>> bits = [int(i) for i in "0101010101010101111000"] >>> for v in iter_steps(bits, steps=8): v [0, 1, 0, 1, 0, 1, 0, 1] [0, 1, 0, 1, 0, 1, 0, 1] [1, 1, 1, 0, 0, 0] """ values = [] for value in g: values.append(value) if len(values) == steps: yield list(values) values = [] if values: yield list(values)
python
{ "resource": "" }
q6127
TkPeripheryBase._new_output_char
train
def _new_output_char(self, char): """ insert in text field """ self.text.config(state=tkinter.NORMAL) self.text.insert("end", char) self.text.see("end") self.text.config(state=tkinter.DISABLED)
python
{ "resource": "" }
q6128
InputPollThread.check_cpu_interval
train
def check_cpu_interval(self, cpu_process): """ work-a-round for blocking input """ try: # log.critical("check_cpu_interval()") if not cpu_process.is_alive(): log.critical("raise SystemExit, because CPU is not alive.") _thread.interrupt_main() raise SystemExit("Kill pager.getch()") except KeyboardInterrupt: _thread.interrupt_main() else: t = threading.Timer(1.0, self.check_cpu_interval, args=[cpu_process]) t.start()
python
{ "resource": "" }
q6129
BaseTkinterGUIConfig.command_max_delay
train
def command_max_delay(self, event=None): """ CPU burst max running time - self.runtime_cfg.max_delay """ try: max_delay = self.max_delay_var.get() except ValueError: max_delay = self.runtime_cfg.max_delay if max_delay < 0: max_delay = self.runtime_cfg.max_delay if max_delay > 0.1: max_delay = self.runtime_cfg.max_delay self.runtime_cfg.max_delay = max_delay self.max_delay_var.set(self.runtime_cfg.max_delay)
python
{ "resource": "" }
q6130
BaseTkinterGUIConfig.command_inner_burst_op_count
train
def command_inner_burst_op_count(self, event=None): """ CPU burst max running time - self.runtime_cfg.inner_burst_op_count """ try: inner_burst_op_count = self.inner_burst_op_count_var.get() except ValueError: inner_burst_op_count = self.runtime_cfg.inner_burst_op_count if inner_burst_op_count < 1: inner_burst_op_count = self.runtime_cfg.inner_burst_op_count self.runtime_cfg.inner_burst_op_count = inner_burst_op_count self.inner_burst_op_count_var.set(self.runtime_cfg.inner_burst_op_count)
python
{ "resource": "" }
q6131
BaseTkinterGUIConfig.command_max_burst_count
train
def command_max_burst_count(self, event=None): """ max CPU burst op count - self.runtime_cfg.max_burst_count """ try: max_burst_count = self.max_burst_count_var.get() except ValueError: max_burst_count = self.runtime_cfg.max_burst_count if max_burst_count < 1: max_burst_count = self.runtime_cfg.max_burst_count self.runtime_cfg.max_burst_count = max_burst_count self.max_burst_count_var.set(self.runtime_cfg.max_burst_count)
python
{ "resource": "" }
q6132
BaseTkinterGUIConfig.command_max_run_time
train
def command_max_run_time(self, event=None): """ CPU burst max running time - self.runtime_cfg.max_run_time """ try: max_run_time = self.max_run_time_var.get() except ValueError: max_run_time = self.runtime_cfg.max_run_time self.runtime_cfg.max_run_time = max_run_time self.max_run_time_var.set(self.runtime_cfg.max_run_time)
python
{ "resource": "" }
q6133
prompt
train
def prompt(pagenum): """ Show default prompt to continue and process keypress. It assumes terminal/console understands carriage return \r character. """ prompt = "Page -%s-. Press any key to continue . . . " % pagenum echo(prompt) if getch() in [ESC_, CTRL_C_, 'q', 'Q']: return False echo('\r' + ' '*(len(prompt)-1) + '\r')
python
{ "resource": "" }
q6134
page
train
def page(content, pagecallback=prompt): """ Output `content`, call `pagecallback` after every page with page number as a parameter. `pagecallback` may return False to terminate pagination. Default callback shows prompt, waits for keypress and aborts on 'q', ESC or Ctrl-C. """ width = getwidth() height = getheight() pagenum = 1 try: try: line = content.next().rstrip("\r\n") except AttributeError: # Python 3 compatibility line = content.__next__().rstrip("\r\n") except StopIteration: pagecallback(pagenum) return while True: # page cycle linesleft = height-1 # leave the last line for the prompt callback while linesleft: linelist = [line[i:i+width] for i in range(0, len(line), width)] if not linelist: linelist = [''] lines2print = min(len(linelist), linesleft) for i in range(lines2print): if WINDOWS and len(line) == width: # avoid extra blank line by skipping linefeed print echo(linelist[i]) else: print((linelist[i])) linesleft -= lines2print linelist = linelist[lines2print:] if linelist: # prepare symbols left on the line for the next iteration line = ''.join(linelist) continue else: try: try: line = content.next().rstrip("\r\n") except AttributeError: # Python 3 compatibility line = content.__next__().rstrip("\r\n") except StopIteration: pagecallback(pagenum) return if pagecallback(pagenum) == False: return pagenum += 1
python
{ "resource": "" }
q6135
reformat_v09_trace
train
def reformat_v09_trace(raw_trace, max_lines=None): """ reformat v09 trace simmilar to XRoar one and add CC and Memory-Information. Note: v09 traces contains the register info line one trace line later! We reoder it as XRoar done: addr+Opcode with resulted registers """ print() print("Reformat v09 trace...") mem_info = SBC09MemInfo(sys.stderr) result = [] next_update = time.time() + 1 old_line = None for line_no, line in enumerate(raw_trace.splitlines()): if max_lines is not None and line_no >= max_lines: msg = "max lines %i arraived -> Abort." % max_lines print(msg) result.append(msg) break if time.time() > next_update: print("reformat %i trace lines..." % line_no) next_update = time.time() + 1 try: pc = int(line[3:7], 16) op_code = int(line[10:15].strip().replace(" ", ""), 16) cc = int(line[57:59], 16) a = int(line[46:48], 16) b = int(line[51:53], 16) x = int(line[18:22], 16) y = int(line[25:29], 16) u = int(line[32:36], 16) s = int(line[39:43], 16) except ValueError as err: print("Error in line %i: %s" % (line_no, err)) print("Content on line %i:" % line_no) print("-"*79) print(repr(line)) print("-"*79) continue op_data = MC6809OP_DATA_DICT[op_code] mnemonic = op_data["mnemonic"] cc_txt = cc_value2txt(cc) mem = mem_info.get_shortest(pc) # print op_data register_line = "cc=%02x a=%02x b=%02x dp=?? x=%04x y=%04x u=%04x s=%04x| %s" % ( cc, a, b, x, y, u, s, cc_txt ) if old_line is None: line = "(init with: %s)" % register_line else: line = old_line % register_line old_line = "%04x| %-11s %-27s %%s | %s" % ( pc, "%x" % op_code, mnemonic, mem ) result.append(line) print("Done, %i trace lines converted." % line_no) # print raw_trace[:700] return result
python
{ "resource": "" }
q6136
human_duration
train
def human_duration(t): """ Converts a time duration into a friendly text representation. >>> human_duration("type error") Traceback (most recent call last): ... TypeError: human_duration() argument must be integer or float >>> human_duration(0.01) u'10.0 ms' >>> human_duration(0.9) u'900.0 ms' >>> human_duration(65.5) u'1.1 min' >>> human_duration((60 * 60)-1) u'59.0 min' >>> human_duration(60*60) u'1.0 hours' >>> human_duration(1.05*60*60) u'1.1 hours' >>> human_duration(2.54 * 60 * 60 * 24 * 365) u'2.5 years' """ if not isinstance(t, (int, float)): raise TypeError("human_duration() argument must be integer or float") chunks = ( (60 * 60 * 24 * 365, u'years'), (60 * 60 * 24 * 30, u'months'), (60 * 60 * 24 * 7, u'weeks'), (60 * 60 * 24, u'days'), (60 * 60, u'hours'), ) if t < 1: return u"%.1f ms" % round(t * 1000, 1) if t < 60: return u"%.1f sec" % round(t, 1) if t < 60 * 60: return u"%.1f min" % round(t / 60, 1) for seconds, name in chunks: count = t / seconds if count >= 1: count = round(count, 1) break return u"%(number).1f %(type)s" % {'number': count, 'type': name}
python
{ "resource": "" }
q6137
average
train
def average(old_avg, current_value, count): """ Calculate the average. Count must start with 0 >>> average(None, 3.23, 0) 3.23 >>> average(0, 1, 0) 1.0 >>> average(2.5, 5, 4) 3.0 """ if old_avg is None: return current_value return (float(old_avg) * count + current_value) / (count + 1)
python
{ "resource": "" }
q6138
iter_window
train
def iter_window(g, window_size): """ interate over 'g' bit-by-bit and yield a window with the given 'window_size' width. >>> for v in iter_window([1,2,3,4], window_size=2): v [1, 2] [2, 3] [3, 4] >>> for v in iter_window([1,2,3,4,5], window_size=3): v [1, 2, 3] [2, 3, 4] [3, 4, 5] >>> for v in iter_window([1,2,3,4], window_size=2): ... v ... v.append(True) [1, 2] [2, 3] [3, 4] """ values = collections.deque(maxlen=window_size) for value in g: values.append(value) if len(values) == window_size: yield list(values)
python
{ "resource": "" }
q6139
get_word
train
def get_word(byte_iterator): """ return a uint16 value >>> g=iter([0x1e, 0x12]) >>> v=get_word(g) >>> v 7698 >>> hex(v) '0x1e12' """ byte_values = list(itertools.islice(byte_iterator, 2)) try: word = (byte_values[0] << 8) | byte_values[1] except TypeError, err: raise TypeError("Can't build word from %s: %s" % (repr(byte_values), err)) return word
python
{ "resource": "" }
q6140
_run
train
def _run(*args, **kwargs): """ Run current executable via subprocess and given args """ verbose = kwargs.pop("verbose", False) if verbose: click.secho(" ".join([repr(i) for i in args]), bg='blue', fg='white') executable = args[0] if not os.path.isfile(executable): raise RuntimeError("First argument %r is not a existing file!" % executable) if not os.access(executable, os.X_OK): raise RuntimeError("First argument %r exist, but is not executeable!" % executable) return subprocess.Popen(args, **kwargs)
python
{ "resource": "" }
q6141
FileContent.add_block_data
train
def add_block_data(self, block_length, data): """ add a block of tokenized BASIC source code lines. >>> cfg = Dragon32Config >>> fc = FileContent(cfg) >>> block = [ ... 0x1e,0x12,0x0,0xa,0x80,0x20,0x49,0x20,0xcb,0x20,0x31,0x20,0xbc,0x20,0x31,0x30,0x0, ... 0x0,0x0] >>> len(block) 19 >>> fc.add_block_data(19,iter(block)) 19 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 >>> block = iter([ ... 0x1e,0x29,0x0,0x14,0x87,0x20,0x49,0x3b,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22,0x0, ... 0x0,0x0]) >>> fc.add_block_data(999,block) 25 Bytes parsed ERROR: Block length value 999 is not equal to parsed bytes! >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" >>> block = iter([ ... 0x1e,0x31,0x0,0x1e,0x8b,0x20,0x49,0x0, ... 0x0,0x0]) >>> fc.add_block_data(10,block) 10 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" 30 NEXT I Test function tokens in code >>> fc = FileContent(cfg) >>> data = iter([ ... 0x1e,0x4a,0x0,0x1e,0x58,0xcb,0x58,0xc3,0x4c,0xc5,0xff,0x88,0x28,0x52,0x29,0x3a,0x59,0xcb,0x59,0xc3,0x4c,0xc5,0xff,0x89,0x28,0x52,0x29,0x0, ... 0x0,0x0 ... ]) >>> fc.add_block_data(30, data) 30 Bytes parsed >>> fc.print_code_lines() 30 X=X+L*SIN(R):Y=Y+L*COS(R) Test high line numbers >>> fc = FileContent(cfg) >>> data = [ ... 0x1e,0x1a,0x0,0x1,0x87,0x20,0x22,0x4c,0x49,0x4e,0x45,0x20,0x4e,0x55,0x4d,0x42,0x45,0x52,0x20,0x54,0x45,0x53,0x54,0x22,0x0, ... 0x1e,0x23,0x0,0xa,0x87,0x20,0x31,0x30,0x0, ... 0x1e,0x2d,0x0,0x64,0x87,0x20,0x31,0x30,0x30,0x0, ... 0x1e,0x38,0x3,0xe8,0x87,0x20,0x31,0x30,0x30,0x30,0x0, ... 0x1e,0x44,0x27,0x10,0x87,0x20,0x31,0x30,0x30,0x30,0x30,0x0, ... 0x1e,0x50,0x80,0x0,0x87,0x20,0x33,0x32,0x37,0x36,0x38,0x0, ... 0x1e,0x62,0xf9,0xff,0x87,0x20,0x22,0x45,0x4e,0x44,0x22,0x3b,0x36,0x33,0x39,0x39,0x39,0x0,0x0,0x0 ... ] >>> len(data) 99 >>> fc.add_block_data(99, iter(data)) 99 Bytes parsed >>> fc.print_code_lines() 1 PRINT "LINE NUMBER TEST" 10 PRINT 10 100 PRINT 100 1000 PRINT 1000 10000 PRINT 10000 32768 PRINT 32768 63999 PRINT "END";63999 """ # data = list(data) # # print repr(data) # print_as_hex_list(data) # print_codepoint_stream(data) # sys.exit() # create from codepoint list a iterator data = iter(data) byte_count = 0 while True: try: line_pointer = get_word(data) except (StopIteration, IndexError), err: log.error("No line pointer information in code line data. (%s)" % err) break # print "line_pointer:", repr(line_pointer) byte_count += 2 if not line_pointer: # arrived [0x00, 0x00] -> end of block break try: line_number = get_word(data) except (StopIteration, IndexError), err: log.error("No line number information in code line data. (%s)" % err) break # print "line_number:", repr(line_number) byte_count += 2 # data = list(data) # print_as_hex_list(data) # print_codepoint_stream(data) # data = iter(data) # get the code line: # new iterator to get all characters until 0x00 arraived code = iter(data.next, 0x00) code = list(code) # for len() byte_count += len(code) + 1 # from 0x00 consumed in iter() # print_as_hex_list(code) # print_codepoint_stream(code) # convert to a plain ASCII string code = bytes2codeline(code) self.code_lines.append( CodeLine(line_pointer, line_number, code) ) print "%i Bytes parsed" % byte_count if block_length != byte_count: print "ERROR: Block length value %i is not equal to parsed bytes!" % block_length
python
{ "resource": "" }
q6142
FileContent.add_ascii_block
train
def add_ascii_block(self, block_length, data): """ add a block of ASCII BASIC source code lines. >>> data = [ ... 0xd, ... 0x31,0x30,0x20,0x50,0x52,0x49,0x4e,0x54,0x20,0x22,0x54,0x45,0x53,0x54,0x22, ... 0xd, ... 0x32,0x30,0x20,0x50,0x52,0x49,0x4e,0x54,0x20,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22, ... 0xd ... ] >>> len(data) 41 >>> fc = FileContent(Dragon32Config) >>> fc.add_ascii_block(41, iter(data)) 41 Bytes parsed >>> fc.print_code_lines() 10 PRINT "TEST" 20 PRINT "HELLO WORLD!" """ data = iter(data) data.next() # Skip first \r byte_count = 1 # incl. first \r while True: code = iter(data.next, 0xd) # until \r code = "".join([chr(c) for c in code]) if not code: log.warning("code ended.") break byte_count += len(code) + 1 # and \r consumed in iter() try: line_number, code = code.split(" ", 1) except ValueError, err: print "\nERROR: Splitting linenumber in %s: %s" % (repr(code), err) break try: line_number = int(line_number) except ValueError, err: print "\nERROR: Part '%s' is not a line number!" % repr(line_number) continue self.code_lines.append( CodeLine(None, line_number, code) ) print "%i Bytes parsed" % byte_count if block_length != byte_count: log.error( "Block length value %i is not equal to parsed bytes!" % block_length )
python
{ "resource": "" }
q6143
validate
train
def validate(validation, dictionary): """ Validate that a dictionary passes a set of key-based validators. If all of the keys in the dictionary are within the parameters specified by the validation mapping, then the validation passes. :param validation: a mapping of keys to validators :type validation: dict :param dictionary: dictionary to be validated :type dictionary: dict :return: a tuple containing a bool indicating success or failure and a mapping of fields to error messages. """ errors = defaultdict(list) for key in validation: if isinstance(validation[key], (list, tuple)): if Required in validation[key]: if not Required(key, dictionary): errors[key] = ["must be present"] continue _validate_list_helper(validation, dictionary, key, errors) else: v = validation[key] if v == Required: if not Required(key, dictionary): errors[key] = ["must be present"] else: _validate_and_store_errs(v, dictionary, key, errors) if len(errors) > 0: # `errors` gets downgraded from defaultdict to dict # because it makes for prettier output return ValidationResult(valid=False, errors=dict(errors)) else: return ValidationResult(valid=True, errors={})
python
{ "resource": "" }
q6144
ArgSpec
train
def ArgSpec(*args, **kwargs): """ Validate a function based on the given argspec. # Example: validations = { "foo": [ArgSpec("a", "b", c", bar="baz")] } def pass_func(a, b, c, bar="baz"): pass def fail_func(b, c, a, baz="bar"): pass passes = {"foo": pass_func} fails = {"foo": fail_func} """ def argspec_lambda(value): argspec = getargspec(value) argspec_kw_vals = () if argspec.defaults is not None: argspec_kw_vals = argspec.defaults kw_vals = {} arg_offset = 0 arg_len = len(argspec.args) - 1 for val in argspec_kw_vals[::-1]: kw_vals[argspec.args[arg_len - arg_offset]] = val arg_offset += 1 if kwargs == kw_vals: if len(args) != arg_len - arg_offset + 1: return False index = 0 for arg in args: if argspec.args[index] != arg: return False index += 1 return True return False argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs) # as little sense as negating this makes, best to just be consistent. argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs) return argspec_lambda
python
{ "resource": "" }
q6145
GitRepository.get_snapshots
train
def get_snapshots(self,**kwargs): """ Returns a list of snapshots in a given repository. """ commits = self.repository.get_commits(**kwargs) snapshots = [] for commit in commits: for key in ('committer_date','author_date'): commit[key] = datetime.datetime.fromtimestamp(commit[key+'_ts']) snapshot = GitSnapshot(commit) hasher = Hasher() hasher.add(snapshot.sha) snapshot.hash = hasher.digest.hexdigest() snapshot.project = self.project snapshot.pk = uuid.uuid4().hex snapshots.append(snapshot) return snapshots
python
{ "resource": "" }
q6146
diff_objects
train
def diff_objects(objects_a,objects_b,key,comparator = None,with_unchanged = False): """ Returns a "diff" between two lists of objects. :param key: The key that identifies objects with identical location in each set, such as files with the same path or code objects with the same URL. :param comparator: Comparison functions that decides if two objects are identical. """ objects_by_key = {'a' :defaultdict(list), 'b' : defaultdict(list)} for name,objects in ('a',objects_a),('b',objects_b): d = objects_by_key[name] for obj in objects: d[key(obj)].append(obj) added_objects = [obj for key,objs in objects_by_key['b'].items() if key not in objects_by_key['a'] for obj in objs] deleted_objects = [obj for key,objs in objects_by_key['a'].items() if key not in objects_by_key['b'] for obj in objs] joint_keys = [key for key in objects_by_key['a'] if key in objects_by_key['b']] modified_objects = [] #we go through the keys that exist in both object sets for key in joint_keys: objects_a = objects_by_key['a'][key] objects_b = objects_by_key['b'][key] if len(objects_a) > 1 or len(objects_b) > 1: #this is an ambiguous situation: we have more than one object for the same #key, so we have to decide which ones have been added or not #we try to remove identical objects from the set objects_a_copy = objects_a[:] objects_b_copy = objects_b[:] #for the next step, we need a comparator if comparator: #we iterate through the list and try to find different objects... for obj_a in objects_a: for obj_b in objects_b_copy: if comparator(obj_a,obj_b) == 0: #these objects are identical, we remove them from both sets... objects_a_copy.remove(obj_a) objects_b_copy.remove(obj_b) break #here we cannot distinguish objects... if len(objects_b_copy) > len(objects_a_copy): #we arbitrarily mark the last objects in objects_b as added added_objects.extend(objects_b_copy[len(objects_a_copy):]) elif len(objects_a_copy) > len(objects_b_copy): #we arbitrarily mark the last objects in objects_a as deleted deleted_objects.extend(objects_a_copy[len(objects_b_copy):]) else: if comparator and comparator(objects_a[0],objects_b[0]) != 0: #these objects are different modified_objects.append(objects_a[0]) result = { 'added' : added_objects, 'deleted' : deleted_objects, 'modified' : modified_objects, } if with_unchanged: unchanged_objects = [objects_b_by_key[key] for key in joint_keys if not objects_b_by_key[key] in modified_objects] result['unchanged'] = unchanged_objects return result
python
{ "resource": "" }
q6147
CodeEnvironment.save_file_revisions
train
def save_file_revisions(self,snapshot,file_revisions): """ We convert various items in the file revision to documents, so that we can easily search and retrieve them... """ annotations = defaultdict(list) for file_revision in file_revisions: issues_results = {} for analyzer_name,results in file_revision.results.items(): if 'issues' in results: issues_results[analyzer_name] = results['issues'] del results['issues'] if len(issues_results) > 1000: issues_results[analyzer_name] = [{ 'code' : 'TooManyIssues', 'analyzer' : analyzer_name, }] with self.project.backend.transaction(): self.project.backend.save(file_revision) def location_sorter(issue): if issue['location'] and issue['location'][0] and issue['location'][0][0]: return issue['location'][0][0][0] return 0 with self.project.backend.transaction(): for analyzer_name,issues in issues_results.items(): grouped_issues = group_issues_by_fingerprint(issues) for issue_dict in grouped_issues: hasher = Hasher() hasher.add(analyzer_name) hasher.add(issue_dict['code']) hasher.add(issue_dict['fingerprint']) issue_dict['hash'] = hasher.digest.hexdigest() try: #we check if the issue already exists issue = self.project.backend.get(Issue,{'hash' : issue_dict['hash'], 'project' : self.project }) except Issue.DoesNotExist: #if not, we create it d = issue_dict.copy() d['analyzer'] = analyzer_name if 'location' in d: del d['location'] if 'occurrences' in d: del d['occurrences'] issue = Issue(d) issue.project = self.project self.project.backend.save(issue) for occurrence in issue_dict['occurrences']: hasher = Hasher() hasher.add(file_revision.hash) hasher.add(issue.hash) hasher.add(occurrence.get('from_row')) hasher.add(occurrence.get('from_column')) hasher.add(occurrence.get('to_row')) hasher.add(occurrence.get('to_column')) hasher.add(occurrence.get('sequence')) occurrence['hash'] = hasher.digest.hexdigest() try: #we check if the occurrence already exists occurrence = self.project.backend.get(IssueOccurrence,{'hash' : occurrence['hash'], 'issue' : issue }) except IssueOccurrence.DoesNotExist: #if not, we create it occurrence = IssueOccurrence(occurrence) occurrence.issue = issue occurrence.file_revision = file_revision self.project.backend.save(occurrence) annotations['occurrences'].append(occurrence) annotations['issues'].append(issue) return annotations
python
{ "resource": "" }
q6148
update
train
def update(d,ud): """ Recursively merge the values of ud into d. """ if ud is None: return for key,value in ud.items(): if not key in d: d[key] = value elif isinstance(value,dict): update(d[key],value) else: d[key] = value
python
{ "resource": "" }
q6149
Command.find_git_repository
train
def find_git_repository(self, path): """ Tries to find a directory with a .git repository """ while path is not None: git_path = os.path.join(path,'.git') if os.path.exists(git_path) and os.path.isdir(git_path): return path path = os.path.dirname(path) return None
python
{ "resource": "" }
q6150
get_hash
train
def get_hash(node,fields = None,exclude = ['pk','_id'],target = 'pk'): """ Here we generate a unique hash for a given node in the syntax tree. """ hasher = Hasher() def add_to_hash(value): if isinstance(value,dict): if target in value: add_to_hash(value[target]) else: attribute_list = [] for key,v in sorted(value.items(),key = lambda x: x[0]): if (fields is not None and key not in fields) \ or (exclude is not None and key in exclude): continue add_to_hash(key) add_to_hash(v) elif isinstance(value,(tuple,list)) and value and isinstance(value[0],(dict,node_class)): for i,v in enumerate(value): hasher.add(i) add_to_hash(v) else: hasher.add(value) add_to_hash(node) return hasher.digest.hexdigest()
python
{ "resource": "" }
q6151
Reporter.add_message
train
def add_message(self, msg_id, location, msg): """Client API to send a message""" self._messages.append((msg_id,location,msg))
python
{ "resource": "" }
q6152
BaseAnalyzer.get_fingerprint_from_code
train
def get_fingerprint_from_code(self,file_revision,location, extra_data=None): """ This function generates a fingerprint from a series of code snippets. Can be used by derived analyzers to generate fingerprints based on code if nothing better is available. """ code = file_revision.get_file_content() if not isinstance(code,unicode): code = unicode(code,errors = 'ignore') lines = code.split(u"\n") s = "" for l in location: ((from_row,from_column),(to_row,to_column)) = l if from_column is None: continue if from_row == to_row: s+=lines[from_row-1][from_column:to_column] else: if to_row < from_row: raise ValueError("from_row must be smaller than to_row") s+=lines[from_row-1][from_column:] current_row = from_row+1 while current_row < to_row: s+=lines[current_row-1] current_row+=1 s+=lines[current_row-1][:to_column] hasher = Hasher() hasher.add(s) if extra_data is not None: hasher.add(extra_data) return hasher.digest.hexdigest()
python
{ "resource": "" }
q6153
Project.get_issue_classes
train
def get_issue_classes(self,backend = None,enabled = True,sort = None,**kwargs): """ Retrieves the issue classes for a given backend :param backend: A backend to use. If None, the default backend will be used :param enabled: Whether to retrieve enabled or disabled issue classes. Passing `None` will retrieve all issue classes. """ if backend is None: backend = self.backend query = {'project_issue_classes.project' : self} if enabled is not None: query['project_issue_classes.enabled'] = enabled issue_classes = backend.filter(self.IssueClass,query, **kwargs) if sort is not None: issue_classes = issue_classes.sort(sort) return issue_classes
python
{ "resource": "" }
q6154
replace_symbol
train
def replace_symbol(text, replacement_text="", is_replace_consecutive_chars=False, is_strip=False): """ Replace all of the symbols in the ``text``. :param str text: Input text. :param str replacement_text: Replacement text. :return: A replacement string. :rtype: str :Examples: :ref:`example-sanitize-symbol` """ try: new_text = __RE_SYMBOL.sub(replacement_text, preprocess(text)) except (TypeError, AttributeError): raise TypeError("text must be a string") if not replacement_text: return new_text if is_replace_consecutive_chars: new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text) if is_strip: new_text = new_text.strip(replacement_text) return new_text
python
{ "resource": "" }
q6155
validate_filename
train
def validate_filename(filename, platform=None, min_len=1, max_len=_DEFAULT_MAX_FILENAME_LEN): """Verifying whether the ``filename`` is a valid file name or not. Args: filename (str): Filename to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``filename``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length the ``filename``. The value must be lower than: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 - ``Universal``: 260 Defaults to ``255``. Raises: InvalidLengthError: If the ``filename`` is longer than ``max_len`` characters. InvalidCharError: If the ``filename`` includes invalid character(s) for a filename: |invalid_filename_chars|. The following characters are also invalid for Windows platform: |invalid_win_filename_chars|. ReservedNameError: If the ``filename`` equals reserved name by OS. Windows reserved name is as follows: ``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``. Example: :ref:`example-validate-filename` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__ """ FileNameSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(filename)
python
{ "resource": "" }
q6156
validate_filepath
train
def validate_filepath(file_path, platform=None, min_len=1, max_len=None): """Verifying whether the ``file_path`` is a valid file path or not. Args: file_path (str): File path to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``file_path``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length of the ``file_path`` length. If the value is |None|, in the default, automatically determined by the ``platform``: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Raises: NullNameError: If the ``file_path`` is empty. InvalidCharError: If the ``file_path`` includes invalid char(s): |invalid_file_path_chars|. The following characters are also invalid for Windows platform: |invalid_win_file_path_chars| InvalidLengthError: If the ``file_path`` is longer than ``max_len`` characters. Example: :ref:`example-validate-file-path` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__ """ FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path)
python
{ "resource": "" }
q6157
sanitize_filename
train
def sanitize_filename( filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN ): """Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename` """ return FileNameSanitizer(platform=platform, max_len=max_len).sanitize( filename, replacement_text )
python
{ "resource": "" }
q6158
sanitize_filepath
train
def sanitize_filepath(file_path, replacement_text="", platform=None, max_len=None): """Make a valid file path from a string. Replace invalid characters for a file path within the ``file_path`` with the ``replacement_text``. Invalid characters are as followings: |invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters). Args: file_path (str or PathLike object): File path to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``file_path`` length. Truncate the name if the ``file_path`` length exceedd this value. If the value is |None|, the default value automatically determined by the execution platform: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Returns: Same type as the argument (str or PathLike object): Sanitized filepath. Raises: ValueError: If the ``file_path`` is an invalid file path. Example: :ref:`example-sanitize-file-path` """ return FilePathSanitizer(platform=platform, max_len=max_len).sanitize( file_path, replacement_text )
python
{ "resource": "" }
q6159
sanitize_ltsv_label
train
def sanitize_ltsv_label(label, replacement_text=""): """ Replace all of the symbols in text. :param str label: Input text. :param str replacement_text: Replacement text. :return: A replacement string. :rtype: str """ validate_null_string(label, error_msg="label is empty") return __RE_INVALID_LTSV_LABEL.sub(replacement_text, preprocess(label))
python
{ "resource": "" }
q6160
Url.domain
train
def domain(self): """ Return domain from the url """ remove_pac = self.cleanup.replace( "https://", "").replace("http://", "").replace("www.", "") try: return remove_pac.split('/')[0] except: return None
python
{ "resource": "" }
q6161
KitsuUser.search
train
def search(self, term): """ Search for a user by name. :param str term: What to search for. :return: The results as a SearchWrapper iterator or None if no results. :rtype: SearchWrapper or None """ r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['meta']['count']: return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header) else: return None
python
{ "resource": "" }
q6162
KitsuUser.create
train
def create(self, data): """ Create a user. Please review the attributes required. You need only provide the attributes. :param data: A dictionary of the required attributes :return: Dictionary returned by server or a ServerError exception :rtype: Dictionary or Exception """ final_dict = {"data": {"type": "users", "attributes": data}} r = requests.post(self.apiurl + "/users", json=final_dict, headers=self.header) if r.status_code != 200: raise ServerError return r.json()
python
{ "resource": "" }
q6163
KitsuUser.get
train
def get(self, uid): """ Get a user's information by their id. :param uid str: User ID :return: The user's information or None :rtype: Dictionary or None """ r = requests.get(self.apiurl + "/users/{}".format(uid), headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['data']: return jsd['data'] else: return None
python
{ "resource": "" }
q6164
KitsuUser.update
train
def update(self, uid, data, token): """ Update a user's data. Requires an auth token. :param uid str: User ID to update :param data dict: The dictionary of data attributes to change. Just the attributes. :param token str: The authorization token for this user :return: True or Exception :rtype: Bool or ServerError """ final_dict = {"data": {"id": uid, "type": "users", "attributes": data}} final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.patch(self.apiurl + "/users/{}".format(uid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ServerError return True
python
{ "resource": "" }
q6165
VNDB.get
train
def get(self, stype, flags, filters, options=None): """ Send a request to the API to return results related to Visual Novels. :param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist :param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str. :param str filters: A string with the one filter to search by (apparently you only get one). This is kind of special. You need to pass them in the form <filter><op>"<term>" for strings or <filter><op><number> for numbers. This is counter intuitive. Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=. I will attempt to properly format this if not done so when called. :param dict options: A dictionary of options to customize the search by. Optional, defaults to None. :return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query. :raises ServerError: Raises a ServerError if an error is returned. """ if not isinstance(flags, str): if isinstance(flags, list): finflags = ",".join(flags) else: raise SyntaxError("Flags should be a list or comma separated string") else: finflags = flags if not isinstance(filters, str): raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".") if stype not in self.stypes: raise SyntaxError("{} not a valid Search type.".format(stype)) if '"' not in filters or "'" not in filters: newfilters = self.helperpat.split(filters) newfilters = [x.strip() for x in newfilters] newfilters[1] = '"' + newfilters[1] + '"' op = self.helperpat.search(filters) newfilters = op.group(0).join(newfilters) command = '{} {} ({}){}'.format(stype, finflags, newfilters, ' ' + ujson.dumps(options) if options is not None else '') else: command = '{} {} ({}){}'.format(stype, finflags, filters, ' ' + ujson.dumps(options) if options is not None else '') data = self.connection.send_command('get', command) if 'id' in data: raise ServerError(data['msg'], data['id']) else: return {'pages': data.get('more', default=False), 'data': data['items']}
python
{ "resource": "" }
q6166
VNDB.set
train
def set(self, stype, sid, fields): """ Send a request to the API to modify something in the database if logged in. :param str stype: What are we modifying? One of: votelist, vnlist, wishlist :param int sid: The ID that we're modifying. :param dict fields: A dictionary of the fields and their values :raises ServerError: Raises a ServerError if an error is returned :return bool: True if successful, error otherwise """ if stype not in ['votelist', 'vnlist', 'wishlist']: raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype)) command = "{} {} {}".format(stype, id, ujson.dumps(fields)) data = self.connection.send_command('set', command) if 'id' in data: raise ServerError(data['msg'], data['id']) else: return True
python
{ "resource": "" }
q6167
AGet.anime
train
def anime(self, item_id): """ The function to retrieve an anime's details. :param int item_id: the anime's ID :return: dict or None :rtype: dict or NoneType """ query_string = """\ query ($id: Int) { Media(id: $id, type: ANIME) { title { romaji english } startDate { year month day } endDate { year month day } coverImage { large } bannerImage format status episodes season description averageScore meanScore genres synonyms nextAiringEpisode { airingAt timeUntilAiring episode } } } """ vars = {"id": item_id} r = requests.post(self.settings['apiurl'], headers=self.settings['header'], json={'query': query_string, 'variables': vars}) jsd = r.text try: jsd = json.loads(jsd) except ValueError: return None else: return jsd
python
{ "resource": "" }
q6168
AGet.review
train
def review(self, item_id, html = True): """ With the change to v2 of the api, reviews have their own IDs. This accepts the ID of the review. You can set html to False if you want the review body returned without html formatting. The API Default is true. :param item_id: the Id of the review :param html: do you want the body returned with html formatting? :return: json object :rtype: json object containing review information """ query_string = """\ query ($id: Int, $html: Boolean) { Review (id: $id) { summary body(asHtml: $html) score rating ratingAmount createdAt updatedAt private media { id } user { id name avatar { large } } } } """ vars = {"id": item_id, "html": html} r = requests.post(self.settings['apiurl'], headers=self.settings['header'], json={'query': query_string, 'variables': vars}) jsd = r.text try: jsd = json.loads(jsd) except ValueError: return None else: return jsd
python
{ "resource": "" }
q6169
KitsuMappings.get
train
def get(self, external_site: str, external_id: int): """ Get a kitsu mapping by external site ID :param str external_site: string representing the external site :param int external_id: ID of the entry in the external site. :return: Dictionary or None (for not found) :rtype: Dictionary or None :raises: :class:`Pymoe.errors.ServerError` """ r = requests.get(self.apiurl + "/mappings", params={"filter[externalSite]": external_site, "filter[externalId]": external_id}, headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if len(jsd['data']) < 1: return None r = requests.get(jsd['data'][0]['relationships']['item']['links']['related'], headers=self.header) if r.status_code != 200: return jsd else: return r.json()
python
{ "resource": "" }
q6170
KitsuAuth.authenticate
train
def authenticate(self, username, password): """ Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens for this session, it will store the token under the username given. :param username: username :param password: password :param alias: A list of alternative names for a person if using the KitsuAuth token storage :return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError """ r = requests.post(self.apiurl + "/token", params={"grant_type": "password", "username": username, "password": password, "client_id": self.cid, "client_secret": self.csecret}) if r.status_code != 200: raise ServerError jsd = r.json() if self.remember: self.token_storage[username] = {'token': jsd['access_token'], 'refresh': jsd['refresh_token'], 'expiration': int(jsd['created_at']) + int(jsd['expires_in'])} return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']), jsd['refresh_token']
python
{ "resource": "" }
q6171
KitsuAuth.refresh
train
def refresh(self, refresh_token): """ Renew an oauth token given an appropriate refresh token. :param refresh_token: The Refresh Token :return: A tuple of (token, expiration time in unix time stamp) """ r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid, "client_secret": self.csecret, "refresh_token": refresh_token}) if r.status_code != 200: raise ServerError jsd = r.json() return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at'])
python
{ "resource": "" }
q6172
KitsuAuth.get
train
def get(self, username): """ If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one. :param username: The username whose token we are retrieving :return: A token, NotFound or NotSaving error """ if not self.remember: raise NotSaving if username not in self.token_storage: raise UserNotFound if self.token_storage[username]['expiration'] < time.time(): new_token = self.refresh(self.token_storage[username]['refresh']) self.token_storage[username]['token'] = new_token[0] self.token_storage[username]['expiration'] = new_token[1] return new_token[0] else: return self.token_storage[username]['token']
python
{ "resource": "" }
q6173
save
train
def save(url, destination): """ This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to """ r = requests.get(url, stream=True) with open(destination, 'wb') as fd: for chunk in r.iter_content(chunk_size=128): fd.write(chunk)
python
{ "resource": "" }
q6174
Dump.download
train
def download(which, destination=None): """ I realize that the download for the dumps is going to take awhile. Given that, I've decided to approach this using threads. When you call this method, it will launch a thread to download the data. By default, the dump is dropped into the current working directory. If the directory given doesn't exist, we'll try to make it. Don't use '..' in the path as this confuses makedirs. :param int which: 0 for dat (txt), 1 for xml :param str destination: a file path to save to, defaults to cwd """ if destination: if not os.path.exists(destination): os.makedirs(destination) pthread = threading.Thread( target=save, args=( self.urls[which], os.path.join(destination, self.urls[which]) ) ) pthread.start() return pthread
python
{ "resource": "" }
q6175
KitsuManga.get
train
def get(self, aid): """ Get manga information by id. :param int aid: ID of the manga. :return: Dictionary or None (for not found) :rtype: Dictionary or None :raises: :class:`Pymoe.errors.ServerError` """ r = requests.get(self.apiurl + "/manga/{}".format(aid), headers=self.header) if r.status_code != 200: if r.status_code == 404: return None else: raise ServerError return r.json()
python
{ "resource": "" }
q6176
Bakatsuki.active
train
def active(self): """ Get a list of active projects. :return list: A list of tuples containing a title and pageid in that order. """ projects = [] r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500', 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' in jsd: while True: r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500', 'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'], 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' not in jsd: break else: break return projects[0]
python
{ "resource": "" }
q6177
Bakatsuki.light_novels
train
def light_novels(self, language="English"): """ Get a list of light novels under a certain language. :param str language: Defaults to English. Replace with whatever language you want to query. :return list: A list of tuples containing a title and pageid element in that order. """ projects = [] r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")), 'cmtype': 'page', 'cmlimit': '500', 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' in jsd: while True: r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")), 'cmtype': 'page', 'cmlimit': '500', 'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'], 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' not in jsd: break else: break return projects[0]
python
{ "resource": "" }
q6178
Bakatsuki.chapters
train
def chapters(self, title): """ Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage. :param str title: The title of the novel you want chapters from :return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied """ r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")), headers=self.header) if r.status_code != 200: raise requests.HTTPError("Not Found") else: parsed = soup(r.text, 'html.parser') dd = parsed.find_all("a") volumes = [] for link in dd: if 'class' in link.attrs: if 'image' in link.get('class'): continue if 'href' in link.attrs: if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'): volumes.append(link) seplist = OrderedDict() for item in volumes: if 'title' in item.attrs: result = re.search(self.separate_regex, item.get('title').lower()) else: result = re.search(self.separate_regex, item.text.lower()) if result and result.groups(): if result.group('chapter').lstrip('0') in seplist: seplist[result.group('chapter').lstrip('0')].append([item.get('href'), item.get('title') if 'title' in item.attrs else item.text]) else: seplist[result.group('chapter').lstrip('0')] = [[item.get('href'), item.get('title') if 'title' in item.attrs else item.text]] return seplist
python
{ "resource": "" }
q6179
Bakatsuki.cover
train
def cover(self, pageid): """ Get a cover image given a page id. :param str pageid: The pageid for the light novel you want a cover image for :return str: the image url """ r = requests.get(self.api, params={'action': 'query', 'prop': 'pageimages', 'pageids': pageid, 'format': 'json'}, headers=self.header) jsd = r.json() image = "File:" + jsd['query']['pages'][str(pageid)]['pageimage'] r = requests.get(self.api, params={'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'titles': image, 'format': 'json'}, headers=self.header) jsd = r.json() return jsd['query']['pages'][list(jsd['query']['pages'].keys())[0]]['imageinfo'][0]['url']
python
{ "resource": "" }
q6180
Bakatsuki.get_text
train
def get_text(self, title): """ This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too. :param title: Title for the page you want the content of :return: a string containing the html content """ r = requests.get(self.api, params={'action': 'parse', 'page': title, 'format': 'json'}, headers=self.header) jsd = r.json() return jsd['parse']['text']['*']
python
{ "resource": "" }
q6181
Mal._verify_credentials
train
def _verify_credentials(self): """ An internal method that verifies the credentials given at instantiation. :raises: :class:`Pymoe.errors.UserLoginFailed` """ r = requests.get(self.apiurl + "account/verify_credentials.xml", auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 200: raise UserLoginFailed("Username or Password incorrect.")
python
{ "resource": "" }
q6182
Mal._anime_add
train
def _anime_add(self, data): """ Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success """ if isinstance(data, Anime): xmlstr = data.to_xml() r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id), params={'data': xmlstr}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 201: raise ServerError(r.text, r.status_code) return True else: raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
python
{ "resource": "" }
q6183
Mal.user
train
def user(self, name): """ Get a user's anime list and details. This returns an encapsulated data type. :param str name: The username to query :rtype: :class:`Pymoe.Mal.Objects.User` :return: A :class:`Pymoe.Mal.Objects.User` Object """ anime_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'anime'}, headers=self.header) if anime_data.status_code != 200: raise ConnectionError( "Anime Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format( anime_data.status_code, anime_data.text)) manga_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'manga'}, headers=self.header) if manga_data.status_code != 200: raise ConnectionError( "Manga Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format( manga_data.status_code, manga_data.text)) root = ET.fromstring(anime_data.text) uid = root.find('myinfo').find('user_id').text uname = root.find('myinfo').find('user_name').text anime_object_list = self.parse_anime_data(anime_data.text) manga_object_list = self.parse_manga_data(manga_data.text) return User(uid=uid, name=uname, anime_list=NT_USER_ANIME( watching=[x for x in anime_object_list['data'] if x.status.user == "Currently Watching"], completed=[x for x in anime_object_list['data'] if x.status.user == "Completed"], held=[x for x in anime_object_list['data'] if x.status.user == "On Hold"], dropped=[x for x in anime_object_list['data'] if x.status.user == "Dropped"], planned=[x for x in anime_object_list['data'] if x.status.user == "Plan to Watch"] ), anime_days=anime_object_list['days'], manga_list=NT_USER_MANGA( reading=[x for x in manga_object_list['data'] if x.status.user == "Currently Reading"], completed=[x for x in manga_object_list['data'] if x.status.user == "Completed"], held=[x for x in manga_object_list['data'] if x.status.user == "On Hold"], dropped=[x for x in manga_object_list['data'] if x.status.user == "Dropped"], planned=[x for x in manga_object_list['data'] if x.status.user == "Plan to Read"] ), manga_days=manga_object_list['days'])
python
{ "resource": "" }
q6184
VNDBConnection.send_command
train
def send_command(self, command, args=None): """ Send a command to VNDB and then get the result. :param command: What command are we sending :param args: What are the json args for this command :return: Servers Response :rtype: Dictionary (See D11 docs on VNDB) """ if args: if isinstance(args, str): final_command = command + ' ' + args + '\x04' else: # We just let ujson propogate the error here if it can't parse the arguments final_command = command + ' ' + ujson.dumps(args) + '\x04' else: final_command = command + '\x04' self.sslwrap.sendall(final_command.encode('utf-8')) return self._recv_data()
python
{ "resource": "" }
q6185
VNDBConnection._recv_data
train
def _recv_data(self): """ Receieves data until we reach the \x04 and then returns it. :return: The data received """ temp = "" while True: self.data_buffer = self.sslwrap.recv(1024) if '\x04' in self.data_buffer.decode('utf-8', 'ignore'): temp += self.data_buffer.decode('utf-8', 'ignore') break else: temp += self.data_buffer.decode('utf-8', 'ignore') self.data_buffer = bytes(1024) temp = temp.replace('\x04', '') if 'Ok' in temp: # Because login return temp else: return ujson.loads(temp.split(' ', 1)[1])
python
{ "resource": "" }
q6186
KitsuLib.get
train
def get(self, uid, filters=None): """ Get a user's list of library entries. While individual entries on this list don't show what type of entry it is, you can use the filters provided by the Kitsu API to only select which ones you want :param uid: str: User ID to get library entries for :param filters: dict: Dictionary of filters for the library :return: Results or ServerError :rtype: SearchWrapper or Exception """ filters = self.__format_filters(filters) r = requests.get(self.apiurl + "/users/{}/library-entries".format(uid), headers=self.header, params=filters) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['meta']['count']: return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header) else: return None
python
{ "resource": "" }
q6187
KitsuLib.create
train
def create(self, user_id, media_id, item_type, token, data): """ Create a library entry for a user. data should be just the attributes. Data at least needs a status and progress. :param user_id str: User ID that this Library Entry is for :param media_id str: ID for the media this entry relates to :param item_type str: anime, drama or manga depending :param token str: OAuth token for user :param data dict: Dictionary of attributes for the entry :return: New Entry ID or ServerError :rtype: Str or Exception """ final_dict = { "data": { "type": "libraryEntries", "attributes": data, "relationships":{ "user":{ "data":{ "id": user_id, "type": "users" } }, "media":{ "data":{ "id": media_id, "type": item_type } } } } } final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.post(self.apiurl + "/library-entries", json=final_dict, headers=final_headers) if r.status_code != 201: raise ConnectionError(r.text) jsd = r.json() return jsd['data']['id']
python
{ "resource": "" }
q6188
KitsuLib.update
train
def update(self, eid, data, token): """ Update a given Library Entry. :param eid str: Entry ID :param data dict: Attributes :param token str: OAuth token :return: True or ServerError :rtype: Bool or Exception """ final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}} final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ConnectionError(r.text) return True
python
{ "resource": "" }
q6189
KitsuLib.delete
train
def delete(self, eid, token): """ Delete a library entry. :param eid str: Entry ID :param token str: OAuth Token :return: True or ServerError :rtype: Bool or Exception """ final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers) if r.status_code != 204: print(r.status_code) raise ConnectionError(r.text) return True
python
{ "resource": "" }
q6190
convert_to_record
train
def convert_to_record(func): """Wrap mongodb record to a dict record with default value None """ @functools.wraps(func) def wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) if result is not None: if isinstance(result, dict): return _record(result) return (_record(i) for i in result) return result return wrapper
python
{ "resource": "" }
q6191
MixinModel.to_one_str
train
def to_one_str(cls, value, *args, **kwargs): """Convert single record's values to str """ if kwargs.get('wrapper'): return cls._wrapper_to_one_str(value) return _es.to_dict_str(value)
python
{ "resource": "" }
q6192
MixinModel.to_str
train
def to_str(cls, values, callback=None): """Convert many records's values to str """ if callback and callable(callback): if isinstance(values, dict): return callback(_es.to_str(values)) return [callback(_es.to_str(i)) for i in values] return _es.to_str(values)
python
{ "resource": "" }
q6193
MixinModel.import_model
train
def import_model(cls, ins_name): """Import model class in models package """ try: package_space = getattr(cls, 'package_space') except AttributeError: raise ValueError('package_space not exist') else: return import_object(ins_name, package_space)
python
{ "resource": "" }
q6194
register_app
train
def register_app(app_name, app_setting, web_application_setting, mainfile, package_space): """insert current project root path into sys path """ from turbo import log app_config.app_name = app_name app_config.app_setting = app_setting app_config.project_name = os.path.basename(get_base_dir(mainfile, 2)) app_config.web_application_setting.update(web_application_setting) if app_setting.get('session_config'): app_config.session_config.update(app_setting['session_config']) log.getLogger(**app_setting.log) _install_app(package_space)
python
{ "resource": "" }
q6195
register_url
train
def register_url(url, handler, name=None, kwargs=None): """insert url into tornado application handlers group :arg str url: url :handler object handler: url mapping handler :name reverse url name :kwargs dict tornado handler initlize args """ if name is None and kwargs is None: app_config.urls.append((url, handler)) return if name is None: app_config.urls.append((url, handler, kwargs)) return app_config.urls.append((url, handler, kwargs, name))
python
{ "resource": "" }
q6196
BaseBaseHandler.parameter
train
def parameter(self): ''' according to request method config to filter all request paremter if value is invalid then set None ''' method = self.request.method.lower() arguments = self.request.arguments files = self.request.files rpd = {} # request parameter dict def filter_parameter(key, tp, default=None): if tp not in self._types: raise ValueError( '%s parameter expected types %s' % (key, self._types)) if not isinstance(tp, file_types): if key not in arguments: rpd[key] = default return if tp in [ObjectId, int, float, bool]: rpd[key] = getattr(self, 'to_%s' % getattr( tp, '__name__').lower())(self.get_argument(key)) return if tp == basestring_type or issubclass(tp, basestring_type): rpd[key] = self.get_argument(key, strip=False) return if tp == list: rpd[key] = self.get_arguments(key) return if tp == file: if key not in files: rpd[key] = [] return rpd[key] = self.request.files[key] required_params = getattr(self, '_required_params', None) if isinstance(required_params, list): for key, tp, default in required_params: filter_parameter(key, tp, default) # extract method required params method_required_params = getattr( self, '_%s_required_params' % method, None) if isinstance(method_required_params, list): for key, tp, default in method_required_params: filter_parameter(key, tp, default) params = getattr(self, '_%s_params' % method, None) if params is None: return rpd # need arguments try: for key, tp in params.get('need', []): if tp == list: filter_parameter(key, tp, []) else: filter_parameter(key, tp) except ValueError as e: app_log.error( '%s request need arguments parse error: %s' % (method, e)) raise ValueError(e) except Exception as e: app_log.error( '%s request need arguments parse error: %s' % (method, e)) raise e # option arguments for key, tp, default in params.get('option', []): filter_parameter(key, tp, default) return rpd
python
{ "resource": "" }
q6197
BaseBaseHandler.wo_resp
train
def wo_resp(self, resp): """ can override for other style """ if self._data is not None: resp['res'] = self.to_str(self._data) return self.wo_json(resp)
python
{ "resource": "" }
q6198
BaseBaseModel.find
train
def find(self, *args, **kwargs): """collection find method """ wrapper = kwargs.pop('wrapper', False) if wrapper is True: return self._wrapper_find(*args, **kwargs) return self.__collect.find(*args, **kwargs)
python
{ "resource": "" }
q6199
BaseBaseModel._wrapper_find_one
train
def _wrapper_find_one(self, filter_=None, *args, **kwargs): """Convert record to a dict that has no key error """ return self.__collect.find_one(filter_, *args, **kwargs)
python
{ "resource": "" }