code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
print ( '{| class="wikitable"' ' style="font-family: monospace;' ' background-color:#ffffcc;"' ' cellpadding="10"' ) print("|-") print("! POKE") print("value") print("! ") print("! unicode") print("codepoint") print("! type") print("|-") for no, data in enumerate(DRAGON_CHARS_MAP): item, item_type = data codepoint = ord(item) print("|%i" % no) foreground, background = get_rgb_color(item_type) foreground = "#%02x%02x%02x" % foreground background = "#%02x%02x%02x" % background style = "color: #%s;" print('| style="color:%s; background-color:%s;" | &#x%x;' % ( foreground, background, codepoint )) print("|%i" % codepoint) print("|%s" % item_type) print("|-") print("|}")
def create_wiki_page()
for http://archive.worldofdragon.org/index.php?title=CharMap
5.147602
4.855662
1.060124
code_line = "" func_token = False for byte_no in raw_bytes: if byte_no == 0xff: # Next byte is a function token func_token = True continue elif func_token == True: func_token = False try: character = FUNCTION_TOKEN[byte_no] except KeyError, err: print "Error: BASIC function torken for '%s' not found!" % hex(byte_no) character = chr(byte_no) elif byte_no in BASIC_TOKENS: try: character = BASIC_TOKENS[byte_no] except KeyError, err: print "Error: BASIC torken for '%s' not found!" % hex(byte_no) character = chr(byte_no) else: character = chr(byte_no) # print byte_no, repr(character) code_line += character return code_line
def bytes2codeline(raw_bytes)
>>> data = (0x87,0x20,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22) >>> bytes2codeline(data) 'PRINT "HELLO WORLD!"'
2.77604
2.867793
0.968006
log.debug("enable half sinus scan") self.half_sinus = True statistics = self._get_statistics() width = 50 max_count = max(statistics.values()) print print "Found this zeror crossing timings in the wave file:" print for duration, count in sorted(statistics.items(), reverse=True): hz = duration2hz(duration, self.framerate / 2) w = int(round(float(width) / max_count * count)) stars = "*"*w print "%5sHz (%5s Samples) exist: %4s %s" % (hz, duration, count, stars) print print "Notes:" print " - Hz values are converted to full sinus cycle duration." print " - Sample cound is from half sinus cycle."
def analyze(self)
Example output: 394Hz ( 28 Samples) exist: 1 613Hz ( 18 Samples) exist: 1 788Hz ( 14 Samples) exist: 1 919Hz ( 12 Samples) exist: 329 ********* 1002Hz ( 11 Samples) exist: 1704 ********************************************** 1103Hz ( 10 Samples) exist: 1256 ********************************** 1225Hz ( 9 Samples) exist: 1743 *********************************************** 1378Hz ( 8 Samples) exist: 1 1575Hz ( 7 Samples) exist: 322 ********* 1838Hz ( 6 Samples) exist: 1851 ************************************************** 2205Hz ( 5 Samples) exist: 1397 ************************************** 2756Hz ( 4 Samples) exist: 913 *************************
10.026299
8.448423
1.186766
# go in wave stream to the first bit try: self.next() except StopIteration: print "Error: no bits identified!" sys.exit(-1) log.info("First bit is at: %s" % self.pformat_pos()) log.debug("enable half sinus scan") self.half_sinus = True # Toggle sync test by consuming one half sinus sample # self.iter_trigger_generator.next() # Test sync # get "half sinus cycle" test data test_durations = itertools.islice(self.iter_duration_generator, length) # It's a tuple like: [(frame_no, duration)...] test_durations = list(test_durations) diff1, diff2 = diff_info(test_durations) log.debug("sync diff info: %i vs. %i" % (diff1, diff2)) if diff1 > diff2: log.info("\nbit-sync one step.") self.iter_trigger_generator.next() log.debug("Synced.") else: log.info("\nNo bit-sync needed.") self.half_sinus = False log.debug("disable half sinus scan")
def sync(self, length)
synchronized weave sync trigger
8.695148
8.616838
1.009088
print process_info = ProcessInfo(self.frame_count, use_last_rates=4) start_time = time.time() next_status = start_time + 0.25 old_pos = next(iter_trigger) for pos in iter_trigger: duration = pos - old_pos # log.log(5, "Duration: %s" % duration) yield duration old_pos = pos if time.time() > next_status: next_status = time.time() + 1 self._print_status(process_info) self._print_status(process_info) print
def iter_duration(self, iter_trigger)
yield the duration of two frames in a row.
4.926999
4.704075
1.047389
window_size = (2 * self.cfg.END_COUNT) + self.cfg.MID_COUNT # sinus curve goes from negative into positive: pos_null_transit = [(0, self.cfg.END_COUNT), (self.cfg.END_COUNT, 0)] # sinus curve goes from positive into negative: neg_null_transit = [(self.cfg.END_COUNT, 0), (0, self.cfg.END_COUNT)] if self.cfg.MID_COUNT > 3: mid_index = int(round(self.cfg.MID_COUNT / 2.0)) else: mid_index = 0 in_pos = False for values in iter_window(iter_wave_values, window_size): # Split the window previous_values = values[:self.cfg.END_COUNT] # e.g.: 123----- mid_values = values[self.cfg.END_COUNT:self.cfg.END_COUNT + self.cfg.MID_COUNT] # e.g.: ---45--- next_values = values[-self.cfg.END_COUNT:] # e.g.: -----678 # get only the value and strip the frame_no # e.g.: (frame_no, value) tuple -> value list previous_values = [i[1] for i in previous_values] next_values = [i[1] for i in next_values] # Count sign from previous and next values sign_info = [ count_sign(previous_values, 0), count_sign(next_values, 0) ] # log.log(5, "sign info: %s" % repr(sign_info)) # yield the mid crossing if in_pos == False and sign_info == pos_null_transit: log.log(5, "sinus curve goes from negative into positive") # log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values)) yield mid_values[mid_index][0] in_pos = True elif in_pos == True and sign_info == neg_null_transit: if self.half_sinus: log.log(5, "sinus curve goes from positive into negative") # log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values)) yield mid_values[mid_index][0] in_pos = False
def iter_trigger(self, iter_wave_values)
trigger middle crossing of the wave sinus curve
3.207037
3.110341
1.031089
typecode = self.get_typecode(self.samplewidth) if log.level >= 5: if self.cfg.AVG_COUNT > 1: # merge samples -> log output in iter_avg_wave_values tlm = None else: tlm = TextLevelMeter(self.max_value, 79) # Use only a read size which is a quare divider of the samplewidth # Otherwise array.array will raise: ValueError: string length not a multiple of item size divider = int(round(float(WAVE_READ_SIZE) / self.samplewidth)) read_size = self.samplewidth * divider if read_size != WAVE_READ_SIZE: log.info("Real use wave read size: %i Bytes" % read_size) get_wave_block_func = functools.partial(self.wavefile.readframes, read_size) skip_count = 0 manually_audioop_bias = self.samplewidth == 1 and audioop is None for frames in iter(get_wave_block_func, ""): if self.samplewidth == 1: if audioop is None: log.warning("use audioop.bias() work-a-round for missing audioop.") else: # 8 bit samples are unsigned, see: # http://docs.python.org/2/library/audioop.html#audioop.lin2lin frames = audioop.bias(frames, 1, 128) try: values = array.array(typecode, frames) except ValueError, err: # e.g.: # ValueError: string length not a multiple of item size # Work-a-round: Skip the last frames of this block frame_count = len(frames) divider = int(math.floor(float(frame_count) / self.samplewidth)) new_count = self.samplewidth * divider frames = frames[:new_count] # skip frames log.error( "Can't make array from %s frames: Value error: %s (Skip %i and use %i frames)" % ( frame_count, err, frame_count - new_count, len(frames) )) values = array.array(typecode, frames) for value in values: self.wave_pos += 1 # Absolute position in the frame stream if manually_audioop_bias: # audioop.bias can't be used. # See: http://hg.python.org/cpython/file/482590320549/Modules/audioop.c#l957 value = value % 0xff - 128 # if abs(value) < self.min_volume: # # log.log(5, "Ignore to lower amplitude") # skip_count += 1 # continue yield (self.wave_pos, value) log.info("Skip %i samples that are lower than %i" % ( skip_count, self.min_volume )) log.info("Last readed Frame is: %s" % self.pformat_pos())
def iter_wave_values(self)
yield frame numer + volume value from the WAVE file
5.415922
5.368155
1.008898
values = [] for value in g: values.append(value) if len(values) == steps: yield list(values) values = [] if values: yield list(values)
def iter_steps(g, steps)
iterate over 'g' in blocks with a length of the given 'step' count. >>> for v in iter_steps([1,2,3,4,5], steps=2): v [1, 2] [3, 4] [5] >>> for v in iter_steps([1,2,3,4,5,6,7,8,9], steps=3): v [1, 2, 3] [4, 5, 6] [7, 8, 9] 12345678 12345678 12345678 >>> bits = [int(i) for i in "0101010101010101111000"] >>> for v in iter_steps(bits, steps=8): v [0, 1, 0, 1, 0, 1, 0, 1] [0, 1, 0, 1, 0, 1, 0, 1] [1, 1, 1, 0, 0, 0]
2.352173
3.035625
0.774856
return [self.read_byte(addr) for addr in xrange(start, end)]
def get(self, start, end)
used in unittests
6.147732
5.880524
1.04544
self.text.config(state=tkinter.NORMAL) self.text.insert("end", char) self.text.see("end") self.text.config(state=tkinter.DISABLED)
def _new_output_char(self, char)
insert in text field
2.16642
1.978395
1.095039
try: # log.critical("check_cpu_interval()") if not cpu_process.is_alive(): log.critical("raise SystemExit, because CPU is not alive.") _thread.interrupt_main() raise SystemExit("Kill pager.getch()") except KeyboardInterrupt: _thread.interrupt_main() else: t = threading.Timer(1.0, self.check_cpu_interval, args=[cpu_process]) t.start()
def check_cpu_interval(self, cpu_process)
work-a-round for blocking input
4.814287
4.564843
1.054645
try: cycles_per_sec = self.cycles_per_sec_var.get() except ValueError: self.cycles_per_sec_var.set(self.runtime_cfg.cycles_per_sec) return self.cycles_per_sec_label_var.set( "cycles/sec / 1000000 = %f MHz CPU frequency * 16 = %f Mhz crystal" % ( cycles_per_sec / 1000000, cycles_per_sec / 1000000 * 16, ) ) self.runtime_cfg.cycles_per_sec = cycles_per_sec
def command_cycles_per_sec(self, event=None)
TODO: refactor: move code to CPU!
2.924409
2.753559
1.062047
try: max_delay = self.max_delay_var.get() except ValueError: max_delay = self.runtime_cfg.max_delay if max_delay < 0: max_delay = self.runtime_cfg.max_delay if max_delay > 0.1: max_delay = self.runtime_cfg.max_delay self.runtime_cfg.max_delay = max_delay self.max_delay_var.set(self.runtime_cfg.max_delay)
def command_max_delay(self, event=None)
CPU burst max running time - self.runtime_cfg.max_delay
2.213776
1.844713
1.200065
try: inner_burst_op_count = self.inner_burst_op_count_var.get() except ValueError: inner_burst_op_count = self.runtime_cfg.inner_burst_op_count if inner_burst_op_count < 1: inner_burst_op_count = self.runtime_cfg.inner_burst_op_count self.runtime_cfg.inner_burst_op_count = inner_burst_op_count self.inner_burst_op_count_var.set(self.runtime_cfg.inner_burst_op_count)
def command_inner_burst_op_count(self, event=None)
CPU burst max running time - self.runtime_cfg.inner_burst_op_count
1.737418
1.463698
1.187006
try: max_burst_count = self.max_burst_count_var.get() except ValueError: max_burst_count = self.runtime_cfg.max_burst_count if max_burst_count < 1: max_burst_count = self.runtime_cfg.max_burst_count self.runtime_cfg.max_burst_count = max_burst_count self.max_burst_count_var.set(self.runtime_cfg.max_burst_count)
def command_max_burst_count(self, event=None)
max CPU burst op count - self.runtime_cfg.max_burst_count
1.923894
1.593959
1.206991
try: max_run_time = self.max_run_time_var.get() except ValueError: max_run_time = self.runtime_cfg.max_run_time self.runtime_cfg.max_run_time = max_run_time self.max_run_time_var.set(self.runtime_cfg.max_run_time)
def command_max_run_time(self, event=None)
CPU burst max running time - self.runtime_cfg.max_run_time
2.146544
1.721074
1.247212
log.critical("cli kwargs: %s", repr(kwargs)) ctx.obj = CliConfig(**kwargs)
def cli(ctx, **kwargs)
DragonPy is a Open source (GPL v3 or later) emulator for the 30 years old homecomputer Dragon 32 and Tandy TRS-80 Color Computer (CoCo)... Homepage: https://github.com/jedie/DragonPy
10.12941
13.9563
0.725795
sbi = CONSOLE_SCREEN_BUFFER_INFO() ret = windll.kernel32.GetConsoleScreenBufferInfo(console_handle, byref(sbi)) if ret == 0: return (0, 0) return (sbi.srWindow.Right - sbi.srWindow.Left + 1, sbi.srWindow.Bottom - sbi.srWindow.Top + 1)
def _windows_get_window_size()
Return (width, height) of available window area on Windows. (0, 0) if no console is allocated.
1.947889
1.93432
1.007015
# see README.txt for reference information # http://www.kernel.org/doc/man-pages/online/pages/man4/tty_ioctl.4.html from fcntl import ioctl from termios import TIOCGWINSZ from array import array winsize = array("H", [0] * 4) try: ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize) except IOError: # for example IOError: [Errno 25] Inappropriate ioctl for device # when output is redirected # [ ] TODO: check fd with os.isatty pass return (winsize[1], winsize[0])
def _posix_get_window_size()
Return (width, height) of console terminal on POSIX system. (0, 0) on IOError, i.e. when no console is allocated.
3.945757
3.865079
1.020874
def hex3fy(key): from binascii import hexlify # Python 3 strings are no longer binary, encode them for hexlify() if PY3K: key = key.encode('utf-8') keyhex = hexlify(key).upper() if PY3K: keyhex = keyhex.decode('utf-8') return keyhex if type(key) == str: return hex3fy(key) else: return ' '.join( [hex3fy(s) for s in key] )
def dumpkey(key)
Helper to convert result of `getch` (string) or `getchars` (list) to hex string.
4.011723
3.895888
1.029732
import sys, termios fd = sys.stdin.fileno() # save old terminal settings old_settings = termios.tcgetattr(fd) chars = [] try: # change terminal settings - turn off canonical mode and echo. # in canonical mode read from stdin returns one line at a time # and we need one char at a time (see DESIGN.rst for more info) newattr = list(old_settings) newattr[3] &= ~termios.ICANON newattr[3] &= ~termios.ECHO newattr[6][termios.VMIN] = 1 # block until one char received newattr[6][termios.VTIME] = 0 # TCSANOW below means apply settings immediately termios.tcsetattr(fd, termios.TCSANOW, newattr) # [ ] this fails when stdin is redirected, like # ls -la | pager.py # [ ] also check on Windows ch = sys.stdin.read(1) chars = [ch] if _getall: # move rest of chars (if any) from input buffer # change terminal settings - enable non-blocking read newattr = termios.tcgetattr(fd) newattr[6][termios.VMIN] = 0 # CC structure newattr[6][termios.VTIME] = 0 termios.tcsetattr(fd, termios.TCSANOW, newattr) while True: ch = sys.stdin.read(1) if ch != '': chars.append(ch) else: break finally: # restore terminal settings. Do this when all output is # finished - TCSADRAIN flag termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) if _getall: return chars else: return chars[0]
def _getch_unix(_getall=False)
# --- current algorithm --- # 1. switch to char-by-char input mode # 2. turn off echo # 3. wait for at least one char to appear # 4. read the rest of the character buffer (_getall=True) # 5. return list of characters (_getall on) # or a single char (_getall off)
3.898515
3.663442
1.064167
prompt = "Page -%s-. Press any key to continue . . . " % pagenum echo(prompt) if getch() in [ESC_, CTRL_C_, 'q', 'Q']: return False echo('\r' + ' '*(len(prompt)-1) + '\r')
def prompt(pagenum)
Show default prompt to continue and process keypress. It assumes terminal/console understands carriage return \r character.
7.380137
7.252473
1.017603
width = getwidth() height = getheight() pagenum = 1 try: try: line = content.next().rstrip("\r\n") except AttributeError: # Python 3 compatibility line = content.__next__().rstrip("\r\n") except StopIteration: pagecallback(pagenum) return while True: # page cycle linesleft = height-1 # leave the last line for the prompt callback while linesleft: linelist = [line[i:i+width] for i in range(0, len(line), width)] if not linelist: linelist = [''] lines2print = min(len(linelist), linesleft) for i in range(lines2print): if WINDOWS and len(line) == width: # avoid extra blank line by skipping linefeed print echo(linelist[i]) else: print((linelist[i])) linesleft -= lines2print linelist = linelist[lines2print:] if linelist: # prepare symbols left on the line for the next iteration line = ''.join(linelist) continue else: try: try: line = content.next().rstrip("\r\n") except AttributeError: # Python 3 compatibility line = content.__next__().rstrip("\r\n") except StopIteration: pagecallback(pagenum) return if pagecallback(pagenum) == False: return pagenum += 1
def page(content, pagecallback=prompt)
Output `content`, call `pagecallback` after every page with page number as a parameter. `pagecallback` may return False to terminate pagination. Default callback shows prompt, waits for keypress and aborts on 'q', ESC or Ctrl-C.
3.551504
3.676599
0.965975
print() print("Reformat v09 trace...") mem_info = SBC09MemInfo(sys.stderr) result = [] next_update = time.time() + 1 old_line = None for line_no, line in enumerate(raw_trace.splitlines()): if max_lines is not None and line_no >= max_lines: msg = "max lines %i arraived -> Abort." % max_lines print(msg) result.append(msg) break if time.time() > next_update: print("reformat %i trace lines..." % line_no) next_update = time.time() + 1 try: pc = int(line[3:7], 16) op_code = int(line[10:15].strip().replace(" ", ""), 16) cc = int(line[57:59], 16) a = int(line[46:48], 16) b = int(line[51:53], 16) x = int(line[18:22], 16) y = int(line[25:29], 16) u = int(line[32:36], 16) s = int(line[39:43], 16) except ValueError as err: print("Error in line %i: %s" % (line_no, err)) print("Content on line %i:" % line_no) print("-"*79) print(repr(line)) print("-"*79) continue op_data = MC6809OP_DATA_DICT[op_code] mnemonic = op_data["mnemonic"] cc_txt = cc_value2txt(cc) mem = mem_info.get_shortest(pc) # print op_data register_line = "cc=%02x a=%02x b=%02x dp=?? x=%04x y=%04x u=%04x s=%04x| %s" % ( cc, a, b, x, y, u, s, cc_txt ) if old_line is None: line = "(init with: %s)" % register_line else: line = old_line % register_line old_line = "%04x| %-11s %-27s %%s | %s" % ( pc, "%x" % op_code, mnemonic, mem ) result.append(line) print("Done, %i trace lines converted." % line_no) # print raw_trace[:700] return result
def reformat_v09_trace(raw_trace, max_lines=None)
reformat v09 trace simmilar to XRoar one and add CC and Memory-Information. Note: v09 traces contains the register info line one trace line later! We reoder it as XRoar done: addr+Opcode with resulted registers
3.425637
3.360214
1.01947
if not isinstance(t, (int, float)): raise TypeError("human_duration() argument must be integer or float") chunks = ( (60 * 60 * 24 * 365, u'years'), (60 * 60 * 24 * 30, u'months'), (60 * 60 * 24 * 7, u'weeks'), (60 * 60 * 24, u'days'), (60 * 60, u'hours'), ) if t < 1: return u"%.1f ms" % round(t * 1000, 1) if t < 60: return u"%.1f sec" % round(t, 1) if t < 60 * 60: return u"%.1f min" % round(t / 60, 1) for seconds, name in chunks: count = t / seconds if count >= 1: count = round(count, 1) break return u"%(number).1f %(type)s" % {'number': count, 'type': name}
def human_duration(t)
Converts a time duration into a friendly text representation. >>> human_duration("type error") Traceback (most recent call last): ... TypeError: human_duration() argument must be integer or float >>> human_duration(0.01) u'10.0 ms' >>> human_duration(0.9) u'900.0 ms' >>> human_duration(65.5) u'1.1 min' >>> human_duration((60 * 60)-1) u'59.0 min' >>> human_duration(60*60) u'1.0 hours' >>> human_duration(1.05*60*60) u'1.1 hours' >>> human_duration(2.54 * 60 * 60 * 24 * 365) u'2.5 years'
1.887686
1.893863
0.996738
if old_avg is None: return current_value return (float(old_avg) * count + current_value) / (count + 1)
def average(old_avg, current_value, count)
Calculate the average. Count must start with 0 >>> average(None, 3.23, 0) 3.23 >>> average(0, 1, 0) 1.0 >>> average(2.5, 5, 4) 3.0
2.237875
3.402627
0.657691
values = collections.deque(maxlen=window_size) for value in g: values.append(value) if len(values) == window_size: yield list(values)
def iter_window(g, window_size)
interate over 'g' bit-by-bit and yield a window with the given 'window_size' width. >>> for v in iter_window([1,2,3,4], window_size=2): v [1, 2] [2, 3] [3, 4] >>> for v in iter_window([1,2,3,4,5], window_size=3): v [1, 2, 3] [2, 3, 4] [3, 4, 5] >>> for v in iter_window([1,2,3,4], window_size=2): ... v ... v.append(True) [1, 2] [2, 3] [3, 4]
2.162908
3.059967
0.70684
assert isinstance(bitstream, (collections.Iterable, types.GeneratorType)) assert isinstance(pattern, (list, tuple)) window_size = len(pattern) count = -1 for count, data in enumerate(iter_steps(bitstream, window_size), 1): # print count, data, pattern if data != pattern: count -= 1 break return count
def count_continuous_pattern(bitstream, pattern)
>>> pattern = list(bytes2bit_strings("A")) >>> bitstream = bytes2bit_strings("AAAXXX") >>> count_continuous_pattern(bitstream, pattern) 3 >>> pattern = list(bytes2bit_strings("X")) >>> bitstream = bytes2bit_strings("AAAXXX") >>> count_continuous_pattern(bitstream, pattern) 0
4.521348
5.489899
0.823576
assert isinstance(bitstream, (collections.Iterable, types.GeneratorType)) assert isinstance(pattern, (list, tuple)) window_size = len(pattern) pos = -1 for pos, data in enumerate(iter_window(bitstream, window_size)): # print pos, data, pattern if data == pattern: return pos if max_pos is not None and pos > max_pos: raise MaxPosArraived(pos) raise PatternNotFound(pos)
def find_iter_window(bitstream, pattern, max_pos=None)
>>> pattern = list(bytes2bit_strings("B")) >>> bitstream = bytes2bit_strings("AAABCCC") >>> find_iter_window(bitstream, pattern) 24 >>> "".join(list(bitstream2string(bitstream))) 'CCC' >>> find_iter_window(bytes2bit_strings("HELLO!"), list(bytes2bit_strings("LO"))) 24 >>> find_iter_window(bytes2bit_strings("HELLO!"), list(bytes2bit_strings("LO")), max_pos=16) Traceback (most recent call last): ... MaxPosArraived: 17 >>> find_iter_window(bytes2bit_strings("HELLO!"), list(bytes2bit_strings("X"))) Traceback (most recent call last): ... PatternNotFound: 40
3.75515
3.465758
1.0835
count = 0 x = None for count, x in enumerate(iterable): if x != sentinel: break return count, x
def count_the_same(iterable, sentinel)
>>> count_the_same([0x55,0x55,0x55,0x55,0x3C,"foo","bar"],0x55) (4, 60) >>> 0x3C == 60 True
3.650721
6.267522
0.582482
def get_diff(l): diff = 0 for no1, no2 in iter_steps(l, steps=2): diff += abs(no1 - no2) return diff data1 = data[2:] diff1 = get_diff(data1) data2 = data[1:-1] diff2 = get_diff(data2) return diff1, diff2
def diff_info(data)
>>> diff_info([5,5,10,10,5,5,10,10]) (0, 15) >>> diff_info([5,10,10,5,5,10,10,5]) (15, 0)
3.5135
3.624789
0.969298
positive_count = 0 negative_count = 0 for value in values: if value > min_value: positive_count += 1 elif value < -min_value: negative_count += 1 return positive_count, negative_count
def count_sign(values, min_value)
>>> count_sign([3,-1,-2], 0) (1, 2) >>> count_sign([3,-1,-2], 2) (1, 0) >>> count_sign([0,-1],0) (0, 1)
1.750358
2.068397
0.846239
bit_string = "".join([str(c) for c in reversed(bits)]) return int(bit_string, 2)
def bits2codepoint(bits)
>>> c = bits2codepoint([0, 0, 0, 1, 0, 0, 1, 0]) >>> c 72 >>> chr(c) 'H' >>> bits2codepoint("00010010") 72 >>> bits2codepoint([0, 0, 1, 1, 0, 0, 1, 0]) 76
3.500229
6.183305
0.566077
if isinstance(data, basestring): assert len(data) == 1 data = ord(data) bits = '{0:08b}'.format(data) bits = bits[::-1] return bits
def byte2bit_string(data)
>>> byte2bit_string("H") '00010010' >>> byte2bit_string(0x55) '10101010'
2.682979
3.409824
0.786838
if isinstance(codepoints, int): codepoints = [codepoints] for codepoint in codepoints: bit_string = byte2bit_string(codepoint) for bit in bit_string: yield int(bit)
def codepoints2bitstream(codepoints)
>>> list(codepoints2bitstream([0x48,0x45])) [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0] >>> list(codepoints2bitstream(0x48)) [0, 0, 0, 1, 0, 0, 1, 0]
2.916706
3.250155
0.897405
in_line_count = 0 line = [] for no, codepoint in enumerate(codepoint_stream, 1): r = repr(chr(codepoint)) if "\\x" in r: # FIXME txt = "%s %i" % (hex(codepoint), codepoint) else: txt = "%s %s" % (hex(codepoint), r) line.append(txt.center(8)) in_line_count += 1 if in_line_count >= display_block_count: in_line_count = 0 print "%4s | %s |" % (no, " | ".join(line)) line = [] if line: print "%4s | %s |" % (no, " | ".join(line)) if in_line_count > 0: print
def print_codepoint_stream(codepoint_stream, display_block_count=8, no_repr=False)
>>> def g(txt): ... for c in txt: yield ord(c) >>> codepoint_stream = g("HELLO!") >>> print_codepoint_stream(codepoint_stream) ... # doctest: +NORMALIZE_WHITESPACE 6 | 0x48 'H' | 0x45 'E' | 0x4c 'L' | 0x4c 'L' | 0x4f 'O' | 0x21 '!' |
2.867098
3.012243
0.951815
printable = string.printable.replace("\n", "").replace("\r", "") line = [] strings = "" for codepoint in codepoints: char = chr(codepoint) if char in printable: strings += char else: if strings != "": line.append(strings) strings = "" line.append(char) return line
def pformat_codepoints(codepoints)
>>> l = pformat_codepoints([13, 70, 111, 111, 32, 66, 97, 114, 32, 33, 13]) >>> repr(l) "['\\\\r', 'Foo Bar !', '\\\\r']"
2.79697
3.01276
0.928375
def print_line(no, line, line_info): print "%4s - %s" % (no, line) if no_repr: return line = [] for codepoint in line_info: r = repr(chr(codepoint)) if "\\x" in r: # FIXME txt = "%s" % hex(codepoint) else: txt = "%s %s" % (hex(codepoint), r) txt = txt.center(8) line.append(txt) print " %s" % " ".join(line) in_line_count = 0 line = "" line_info = [] for no, bits in enumerate(block_bit_list, 1): line += "%s " % "".join([str(c) for c in bits]) codepoint = bits2codepoint(bits) line_info.append(codepoint) in_line_count += 1 if in_line_count >= display_block_count: in_line_count = 0 print_line(no, line, line_info) line_info = [] line = "" if line: print_line(no, line, line_info) if in_line_count > 0: print
def print_block_bit_list(block_bit_list, display_block_count=8, no_repr=False)
>>> bit_list = ( ... [0,0,1,1,0,0,1,0], # L ... [1,0,0,1,0,0,1,0], # I ... ) >>> print_block_bit_list(bit_list) ... # doctest: +NORMALIZE_WHITESPACE 2 - 00110010 10010010 0x4c 'L' 0x49 'I'
2.838917
2.873694
0.987898
block_bit_list = iter_steps(bitstream, steps=8) print_block_bit_list(block_bit_list, no_repr=no_repr)
def print_bitlist(bitstream, no_repr=False)
>>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 0x48 'H' 0x61 'a' 0x6c 'l' 0x6c 'l' 0x6f 'o' 0x20 ' ' 0x57 'W' 0x6f 'o' 12 - 01001110 00110110 00100110 10000100 0x72 'r' 0x6c 'l' 0x64 'd' 0x21 '!' >>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream, no_repr=True) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 12 - 01001110 00110110 00100110 10000100
5.642181
9.116895
0.618871
byte_values = list(itertools.islice(byte_iterator, 2)) try: word = (byte_values[0] << 8) | byte_values[1] except TypeError, err: raise TypeError("Can't build word from %s: %s" % (repr(byte_values), err)) return word
def get_word(byte_iterator)
return a uint16 value >>> g=iter([0x1e, 0x12]) >>> v=get_word(g) >>> v 7698 >>> hex(v) '0x1e12'
2.836279
3.46918
0.817565
count -= 1 for index in xrange(0, count + 1): angle = 360.0 / count * index y = math.sin(math.radians(angle)) * max_value y = int(round(y)) yield y
def sinus_values(count, max_value)
>>> values = list(sinus_values(10, 32768)) >>> len(values) 10 >>> values [0, 21063, 32270, 28378, 11207, -11207, -28378, -32270, -21063, 0] >>> tl = TextLevelMeter(32768, width=40) >>> for v in values: ... tl.feed(v) '| * |' '| | * |' '| | *|' '| | * |' '| | * |' '| * | |' '| * | |' '|* | |' '| * | |' '| * |'
3.017237
5.351665
0.563794
count = int(round(float(framerate) / float(hz))) count += 1 values = tuple(sinus_values(count, max_value)) values = values[1:] return values
def sinus_values_by_hz(framerate, hz, max_value)
Create sinus values with the given framerate and Hz. Note: We skip the first zero-crossing, so the values can be used directy in a loop. >>> values = sinus_values_by_hz(22050, 1200, 255) >>> len(values) # 22050 / 1200Hz = 18,375 18 >>> values (87, 164, 221, 251, 251, 221, 164, 87, 0, -87, -164, -221, -251, -251, -221, -164, -87, 0) >>> tl = TextLevelMeter(255, width=40) >>> for v in values: ... tl.feed(v) '| | * |' '| | * |' '| | * |' '| | *|' '| | *|' '| | * |' '| | * |' '| | * |' '| * |' '| * | |' '| * | |' '| * | |' '|* | |' '|* | |' '| * | |' '| * | |' '| * | |' '| * |' >>> values = sinus_values_by_hz(44100, 1200, 255) >>> len(values) # 44100 / 1200Hz = 36,75 37
4.270765
7.50022
0.569419
distribution = get_distribution(package.DISTRIBUTION_NAME) entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT) if not entry_info: raise RuntimeError( "Can't find entry info for distribution: %r (group: %r, entry point: %r)" % ( package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT ) ) return entry_info.module_name
def get_module_name(package)
package must have these attributes: e.g.: package.DISTRIBUTION_NAME = "DragonPyEmulator" package.DIST_GROUP = "console_scripts" package.ENTRY_POINT = "DragonPy" :return: a string like: "dragonpy.core.cli"
3.100406
2.381034
1.302126
verbose = kwargs.pop("verbose", False) if verbose: click.secho(" ".join([repr(i) for i in args]), bg='blue', fg='white') executable = args[0] if not os.path.isfile(executable): raise RuntimeError("First argument %r is not a existing file!" % executable) if not os.access(executable, os.X_OK): raise RuntimeError("First argument %r exist, but is not executeable!" % executable) return subprocess.Popen(args, **kwargs)
def _run(*args, **kwargs)
Run current executable via subprocess and given args
3.474879
3.240243
1.072413
# --- CUT here --- if options.install_type==INST_PYPI: requirements=NORMAL_INSTALLATION elif options.install_type==INST_GIT: requirements=GIT_READONLY_INSTALLATION elif options.install_type==INST_DEV: requirements=DEVELOPER_INSTALLATION else: # Should never happen raise RuntimeError("Install type %r unknown?!?" % options.install_type) env_subprocess = EnvSubprocess(home_dir) # from bootstrap_env.bootstrap_install_pip logfile = os.path.join(env_subprocess.abs_home_dir, "install.log") for requirement in requirements: sys.stdout.write("\n\nInstall %r:\n" % requirement) env_subprocess.call_env_pip(["install", "--log=%s" % logfile, requirement]) sys.stdout.write("\n")
def after_install(options, home_dir)
called after virtualenv was created and pip/setuptools installed. Now we installed requirement libs/packages.
5.607295
5.513717
1.016972
sys.stderr.flush() # for eclipse sys.stdout.flush() # for eclipse tb = sys.exc_info()[2] while True: if not tb.tb_next: break tb = tb.tb_next stack = [] f = tb.tb_frame while f: stack.append(f) f = f.f_back txt = traceback.format_exc() txt_lines = txt.splitlines() first_line = txt_lines.pop(0) last_line = txt_lines.pop(-1) click.secho(first_line, fg="red") for line in txt_lines: if line.strip().startswith("File"): click.echo(line) else: click.secho(line, fg="white", bold=True) click.secho(last_line, fg="red") click.echo() click.secho( "Locals by frame, most recent call first:", fg="blue", bold=True ) for frame in stack: msg = 'File "%s", line %i, in %s' % ( frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name, ) msg = click.style(msg, fg="white", bold=True, underline=True) click.echo("\n *** %s" % msg) for key, value in list(frame.f_locals.items()): click.echo("%30s = " % click.style(key, bold=True), nl=False) # We have to be careful not to cause a new error in our error # printer! Calling str() on an unknown object could cause an # error we don't want. if isinstance(value, int): value = "$%x (decimal: %i)" % (value, value) else: value = repr(value) if len(value) > MAX_CHARS: value = "%s..." % value[:MAX_CHARS] try: click.echo(value) except: click.echo("<ERROR WHILE PRINTING VALUE>")
def print_exc_plus()
Print the usual traceback information, followed by a listing of all the local variables in each frame.
2.69795
2.725476
0.989901
data = iter(data) data.next() # Skip first \r byte_count = 1 # incl. first \r while True: code = iter(data.next, 0xd) # until \r code = "".join([chr(c) for c in code]) if not code: log.warning("code ended.") break byte_count += len(code) + 1 # and \r consumed in iter() try: line_number, code = code.split(" ", 1) except ValueError, err: print "\nERROR: Splitting linenumber in %s: %s" % (repr(code), err) break try: line_number = int(line_number) except ValueError, err: print "\nERROR: Part '%s' is not a line number!" % repr(line_number) continue self.code_lines.append( CodeLine(None, line_number, code) ) print "%i Bytes parsed" % byte_count if block_length != byte_count: log.error( "Block length value %i is not equal to parsed bytes!" % block_length )
def add_ascii_block(self, block_length, data)
add a block of ASCII BASIC source code lines. >>> data = [ ... 0xd, ... 0x31,0x30,0x20,0x50,0x52,0x49,0x4e,0x54,0x20,0x22,0x54,0x45,0x53,0x54,0x22, ... 0xd, ... 0x32,0x30,0x20,0x50,0x52,0x49,0x4e,0x54,0x20,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22, ... 0xd ... ] >>> len(data) 41 >>> fc = FileContent(Dragon32Config) >>> fc.add_ascii_block(41, iter(data)) 41 Bytes parsed >>> fc.print_code_lines() 10 PRINT "TEST" 20 PRINT "HELLO WORLD!"
4.938425
4.794827
1.029948
codepoints = [] codepoints += list(string2codepoint(self.filename.ljust(8, " "))) codepoints.append(self.cfg.FTYPE_BASIC) # one byte file type codepoints.append(self.cfg.BASIC_ASCII) # one byte ASCII flag # one byte gap flag (0x00=no gaps, 0xFF=gaps) # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4231&p=9110#p9110 codepoints.append(self.gap_flag) # machine code starting/loading address if self.file_type != self.cfg.FTYPE_BASIC: # BASIC programm (0x00) codepoints = iter(codepoints) self.start_address = get_word(codepoints) log.info("machine code starting address: %s" % hex(self.start_address)) self.load_address = get_word(codepoints) log.info("machine code loading address: %s" % hex(self.load_address)) else: # not needed in BASIC files # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4341&p=9109#p9109 pass log.debug("filename block: %s" % pformat_codepoints(codepoints)) return codepoints
def get_filename_block_as_codepoints(self)
TODO: Support tokenized BASIC. Now we only create ASCII BASIC.
4.342369
4.104821
1.05787
if self.current_file is not None and self.buffer: self.current_file.add_block_data(self.buffered_block_length, self.buffer) self.buffer = [] self.buffered_block_length = 0
def buffer2file(self)
add the code buffer content to CassetteFile() instance
3.897634
3.439184
1.133302
errors = defaultdict(list) for key in validation: if isinstance(validation[key], (list, tuple)): if Required in validation[key]: if not Required(key, dictionary): errors[key] = ["must be present"] continue _validate_list_helper(validation, dictionary, key, errors) else: v = validation[key] if v == Required: if not Required(key, dictionary): errors[key] = ["must be present"] else: _validate_and_store_errs(v, dictionary, key, errors) if len(errors) > 0: # `errors` gets downgraded from defaultdict to dict # because it makes for prettier output return ValidationResult(valid=False, errors=dict(errors)) else: return ValidationResult(valid=True, errors={})
def validate(validation, dictionary)
Validate that a dictionary passes a set of key-based validators. If all of the keys in the dictionary are within the parameters specified by the validation mapping, then the validation passes. :param validation: a mapping of keys to validators :type validation: dict :param dictionary: dictionary to be validated :type dictionary: dict :return: a tuple containing a bool indicating success or failure and a mapping of fields to error messages.
3.477135
3.520919
0.987565
def argspec_lambda(value): argspec = getargspec(value) argspec_kw_vals = () if argspec.defaults is not None: argspec_kw_vals = argspec.defaults kw_vals = {} arg_offset = 0 arg_len = len(argspec.args) - 1 for val in argspec_kw_vals[::-1]: kw_vals[argspec.args[arg_len - arg_offset]] = val arg_offset += 1 if kwargs == kw_vals: if len(args) != arg_len - arg_offset + 1: return False index = 0 for arg in args: if argspec.args[index] != arg: return False index += 1 return True return False argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs) # as little sense as negating this makes, best to just be consistent. argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs) return argspec_lambda
def ArgSpec(*args, **kwargs)
Validate a function based on the given argspec. # Example: validations = { "foo": [ArgSpec("a", "b", c", bar="baz")] } def pass_func(a, b, c, bar="baz"): pass def fail_func(b, c, a, baz="bar"): pass passes = {"foo": pass_func} fails = {"foo": fail_func}
3.492778
3.399624
1.027401
current_date = start_date if group_type == 'monthly': current_year = start_date.year current_month = start_date.month for i in range(n-1): current_month-=1 if current_month == 0: current_month = 12 current_year -= 1 first_date = datetime.datetime(current_year,current_month,1) elif group_type == 'weekly': first_date=start_date-datetime.timedelta(days = start_date.weekday()+(n-1)*7) elif group_type == 'daily': first_date = start_date-datetime.timedelta(days = n-1) first_date = datetime.datetime(first_date.year,first_date.month,first_date.day,0,0,0) return first_date
def get_first_date_for_group(start_date,group_type,n)
:param start: start date :n : how many groups we want to get :group_type : daily, weekly, monthly
1.701401
1.763046
0.965035
commits = self.repository.get_commits(**kwargs) snapshots = [] for commit in commits: for key in ('committer_date','author_date'): commit[key] = datetime.datetime.fromtimestamp(commit[key+'_ts']) snapshot = GitSnapshot(commit) hasher = Hasher() hasher.add(snapshot.sha) snapshot.hash = hasher.digest.hexdigest() snapshot.project = self.project snapshot.pk = uuid.uuid4().hex snapshots.append(snapshot) return snapshots
def get_snapshots(self,**kwargs)
Returns a list of snapshots in a given repository.
3.522371
3.574955
0.985291
objects_by_key = {'a' :defaultdict(list), 'b' : defaultdict(list)} for name,objects in ('a',objects_a),('b',objects_b): d = objects_by_key[name] for obj in objects: d[key(obj)].append(obj) added_objects = [obj for key,objs in objects_by_key['b'].items() if key not in objects_by_key['a'] for obj in objs] deleted_objects = [obj for key,objs in objects_by_key['a'].items() if key not in objects_by_key['b'] for obj in objs] joint_keys = [key for key in objects_by_key['a'] if key in objects_by_key['b']] modified_objects = [] #we go through the keys that exist in both object sets for key in joint_keys: objects_a = objects_by_key['a'][key] objects_b = objects_by_key['b'][key] if len(objects_a) > 1 or len(objects_b) > 1: #this is an ambiguous situation: we have more than one object for the same #key, so we have to decide which ones have been added or not #we try to remove identical objects from the set objects_a_copy = objects_a[:] objects_b_copy = objects_b[:] #for the next step, we need a comparator if comparator: #we iterate through the list and try to find different objects... for obj_a in objects_a: for obj_b in objects_b_copy: if comparator(obj_a,obj_b) == 0: #these objects are identical, we remove them from both sets... objects_a_copy.remove(obj_a) objects_b_copy.remove(obj_b) break #here we cannot distinguish objects... if len(objects_b_copy) > len(objects_a_copy): #we arbitrarily mark the last objects in objects_b as added added_objects.extend(objects_b_copy[len(objects_a_copy):]) elif len(objects_a_copy) > len(objects_b_copy): #we arbitrarily mark the last objects in objects_a as deleted deleted_objects.extend(objects_a_copy[len(objects_b_copy):]) else: if comparator and comparator(objects_a[0],objects_b[0]) != 0: #these objects are different modified_objects.append(objects_a[0]) result = { 'added' : added_objects, 'deleted' : deleted_objects, 'modified' : modified_objects, } if with_unchanged: unchanged_objects = [objects_b_by_key[key] for key in joint_keys if not objects_b_by_key[key] in modified_objects] result['unchanged'] = unchanged_objects return result
def diff_objects(objects_a,objects_b,key,comparator = None,with_unchanged = False)
Returns a "diff" between two lists of objects. :param key: The key that identifies objects with identical location in each set, such as files with the same path or code objects with the same URL. :param comparator: Comparison functions that decides if two objects are identical.
2.258243
2.324559
0.971471
annotations = defaultdict(list) for file_revision in file_revisions: issues_results = {} for analyzer_name,results in file_revision.results.items(): if 'issues' in results: issues_results[analyzer_name] = results['issues'] del results['issues'] if len(issues_results) > 1000: issues_results[analyzer_name] = [{ 'code' : 'TooManyIssues', 'analyzer' : analyzer_name, }] with self.project.backend.transaction(): self.project.backend.save(file_revision) def location_sorter(issue): if issue['location'] and issue['location'][0] and issue['location'][0][0]: return issue['location'][0][0][0] return 0 with self.project.backend.transaction(): for analyzer_name,issues in issues_results.items(): grouped_issues = group_issues_by_fingerprint(issues) for issue_dict in grouped_issues: hasher = Hasher() hasher.add(analyzer_name) hasher.add(issue_dict['code']) hasher.add(issue_dict['fingerprint']) issue_dict['hash'] = hasher.digest.hexdigest() try: #we check if the issue already exists issue = self.project.backend.get(Issue,{'hash' : issue_dict['hash'], 'project' : self.project }) except Issue.DoesNotExist: #if not, we create it d = issue_dict.copy() d['analyzer'] = analyzer_name if 'location' in d: del d['location'] if 'occurrences' in d: del d['occurrences'] issue = Issue(d) issue.project = self.project self.project.backend.save(issue) for occurrence in issue_dict['occurrences']: hasher = Hasher() hasher.add(file_revision.hash) hasher.add(issue.hash) hasher.add(occurrence.get('from_row')) hasher.add(occurrence.get('from_column')) hasher.add(occurrence.get('to_row')) hasher.add(occurrence.get('to_column')) hasher.add(occurrence.get('sequence')) occurrence['hash'] = hasher.digest.hexdigest() try: #we check if the occurrence already exists occurrence = self.project.backend.get(IssueOccurrence,{'hash' : occurrence['hash'], 'issue' : issue }) except IssueOccurrence.DoesNotExist: #if not, we create it occurrence = IssueOccurrence(occurrence) occurrence.issue = issue occurrence.file_revision = file_revision self.project.backend.save(occurrence) annotations['occurrences'].append(occurrence) annotations['issues'].append(issue) return annotations
def save_file_revisions(self,snapshot,file_revisions)
We convert various items in the file revision to documents, so that we can easily search and retrieve them...
2.215478
2.215793
0.999858
if ud is None: return for key,value in ud.items(): if not key in d: d[key] = value elif isinstance(value,dict): update(d[key],value) else: d[key] = value
def update(d,ud)
Recursively merge the values of ud into d.
2.001033
1.884392
1.061899
while path is not None: git_path = os.path.join(path,'.git') if os.path.exists(git_path) and os.path.isdir(git_path): return path path = os.path.dirname(path) return None
def find_git_repository(self, path)
Tries to find a directory with a .git repository
1.920895
1.884652
1.019231
project_path = get_project_path() project_config = get_project_config(project_path) backend = get_backend(project_path,project_config,initialize_db = False) url = str(backend.engine.url) with backend.transaction(): context.configure( connection=backend.connection, url=url, target_metadata=backend.metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations()
def run_migrations_offline()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
3.347943
2.947662
1.135796
print("Running migrations online") project_path = get_project_path() project_config = get_project_config(project_path) backend = get_backend(project_path,project_config,initialize_db = False) context.configure( connection=backend.connection, target_metadata=backend.metadata, ) with context.begin_transaction(): context.run_migrations()
def run_migrations_online()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
3.310508
3.254451
1.017225
hasher = Hasher() def add_to_hash(value): if isinstance(value,dict): if target in value: add_to_hash(value[target]) else: attribute_list = [] for key,v in sorted(value.items(),key = lambda x: x[0]): if (fields is not None and key not in fields) \ or (exclude is not None and key in exclude): continue add_to_hash(key) add_to_hash(v) elif isinstance(value,(tuple,list)) and value and isinstance(value[0],(dict,node_class)): for i,v in enumerate(value): hasher.add(i) add_to_hash(v) else: hasher.add(value) add_to_hash(node) return hasher.digest.hexdigest()
def get_hash(node,fields = None,exclude = ['pk','_id'],target = 'pk')
Here we generate a unique hash for a given node in the syntax tree.
2.525617
2.481817
1.017649
self._messages.append((msg_id,location,msg))
def add_message(self, msg_id, location, msg)
Client API to send a message
6.462101
7.878576
0.820212
code = file_revision.get_file_content() if not isinstance(code,unicode): code = unicode(code,errors = 'ignore') lines = code.split(u"\n") s = "" for l in location: ((from_row,from_column),(to_row,to_column)) = l if from_column is None: continue if from_row == to_row: s+=lines[from_row-1][from_column:to_column] else: if to_row < from_row: raise ValueError("from_row must be smaller than to_row") s+=lines[from_row-1][from_column:] current_row = from_row+1 while current_row < to_row: s+=lines[current_row-1] current_row+=1 s+=lines[current_row-1][:to_column] hasher = Hasher() hasher.add(s) if extra_data is not None: hasher.add(extra_data) return hasher.digest.hexdigest()
def get_fingerprint_from_code(self,file_revision,location, extra_data=None)
This function generates a fingerprint from a series of code snippets. Can be used by derived analyzers to generate fingerprints based on code if nothing better is available.
2.378782
2.409048
0.987437
issues_by_fingerprint = defaultdict(list) for issue in issues: if not 'fingerprint' in issue: raise AttributeError("No fingerprint defined for issue with analyzer %s and code %s!" % (issue.get('analyzer','(undefined)'),issue['code'])) fp_code = "%s:%s" % (issue['fingerprint'],issue['code']) if fp_code in issues_by_fingerprint: grouped_issue = issues_by_fingerprint[fp_code] else: grouped_issue = issue.copy() grouped_issue['occurrences'] = [] if 'location' in grouped_issue: del grouped_issue['location'] issues_by_fingerprint[fp_code] = grouped_issue locations = issue.get('location',[]) if locations: for i,start_stop in enumerate(locations): occurrence = { 'from_row' : None, 'to_row' : None, 'from_column' : None, 'to_column' : None, 'sequence' : i } grouped_issue['occurrences'].append(occurrence) if not isinstance(start_stop,(list,tuple)) or not len(start_stop) == 2: continue start,stop = start_stop if isinstance(start,(list,tuple)) and len(start) == 2: occurrence['from_row'] = start[0] occurrence['from_column'] = start[1] if isinstance(stop,(list,tuple)) and len(stop) == 2: occurrence['to_row'] = stop[0] occurrence['to_column'] = stop[1] grouped_issue['occurrences'] = sorted(grouped_issue['occurrences'],key = lambda x: (x['from_row'],x['from_column'])) return issues_by_fingerprint.values()
def group_issues_by_fingerprint(issues)
Groups issues by fingerprint. Grouping is done by issue code in addition. IMPORTANT: It is assumed that all issues come from the SAME analyzer.
2.171777
2.131264
1.019009
if backend is None: backend = self.backend query = {'project_issue_classes.project' : self} if enabled is not None: query['project_issue_classes.enabled'] = enabled issue_classes = backend.filter(self.IssueClass,query, **kwargs) if sort is not None: issue_classes = issue_classes.sort(sort) return issue_classes
def get_issue_classes(self,backend = None,enabled = True,sort = None,**kwargs)
Retrieves the issue classes for a given backend :param backend: A backend to use. If None, the default backend will be used :param enabled: Whether to retrieve enabled or disabled issue classes. Passing `None` will retrieve all issue classes.
3.015517
3.653241
0.825436
match_list = __RE_SYMBOL.findall(preprocess(text)) if match_list: raise InvalidCharError("invalid symbols found: {}".format(match_list))
def validate_symbol(text)
Verifying whether symbol(s) included in the ``text`` or not. :param str text: Input text. :raises pathvalidate.InvalidCharError: If symbol(s) included in the ``text``.
8.693339
7.293375
1.19195
try: new_text = __RE_SYMBOL.sub(replacement_text, preprocess(text)) except (TypeError, AttributeError): raise TypeError("text must be a string") if not replacement_text: return new_text if is_replace_consecutive_chars: new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text) if is_strip: new_text = new_text.strip(replacement_text) return new_text
def replace_symbol(text, replacement_text="", is_replace_consecutive_chars=False, is_strip=False)
Replace all of the symbols in the ``text``. :param str text: Input text. :param str replacement_text: Replacement text. :return: A replacement string. :rtype: str :Examples: :ref:`example-sanitize-symbol`
2.54488
2.77426
0.917318
FileNameSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(filename)
def validate_filename(filename, platform=None, min_len=1, max_len=_DEFAULT_MAX_FILENAME_LEN)
Verifying whether the ``filename`` is a valid file name or not. Args: filename (str): Filename to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``filename``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length the ``filename``. The value must be lower than: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 - ``Universal``: 260 Defaults to ``255``. Raises: InvalidLengthError: If the ``filename`` is longer than ``max_len`` characters. InvalidCharError: If the ``filename`` includes invalid character(s) for a filename: |invalid_filename_chars|. The following characters are also invalid for Windows platform: |invalid_win_filename_chars|. ReservedNameError: If the ``filename`` equals reserved name by OS. Windows reserved name is as follows: ``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``. Example: :ref:`example-validate-filename` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
3.945885
5.633639
0.700415
FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path)
def validate_filepath(file_path, platform=None, min_len=1, max_len=None)
Verifying whether the ``file_path`` is a valid file path or not. Args: file_path (str): File path to validate. platform (str, optional): .. include:: platform.txt min_len (int, optional): Minimum length of the ``file_path``. The value must be greater or equal to one. Defaults to ``1``. max_len (int, optional): Maximum length of the ``file_path`` length. If the value is |None|, in the default, automatically determined by the ``platform``: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Raises: NullNameError: If the ``file_path`` is empty. InvalidCharError: If the ``file_path`` includes invalid char(s): |invalid_file_path_chars|. The following characters are also invalid for Windows platform: |invalid_win_file_path_chars| InvalidLengthError: If the ``file_path`` is longer than ``max_len`` characters. Example: :ref:`example-validate-file-path` See Also: `Naming Files, Paths, and Namespaces (Windows) <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
3.728291
5.071111
0.735202
return FileNameSanitizer(platform=platform, max_len=max_len).sanitize( filename, replacement_text )
def sanitize_filename( filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN )
Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename`
4.257466
7.186581
0.592419
return FilePathSanitizer(platform=platform, max_len=max_len).sanitize( file_path, replacement_text )
def sanitize_filepath(file_path, replacement_text="", platform=None, max_len=None)
Make a valid file path from a string. Replace invalid characters for a file path within the ``file_path`` with the ``replacement_text``. Invalid characters are as followings: |invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters). Args: file_path (str or PathLike object): File path to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``file_path`` length. Truncate the name if the ``file_path`` length exceedd this value. If the value is |None|, the default value automatically determined by the execution platform: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Returns: Same type as the argument (str or PathLike object): Sanitized filepath. Raises: ValueError: If the ``file_path`` is an invalid file path. Example: :ref:`example-sanitize-file-path`
3.831668
6.695392
0.572284
validate_null_string(label, error_msg="label is empty") match_list = __RE_INVALID_LTSV_LABEL.findall(preprocess(label)) if match_list: raise InvalidCharError( "invalid character found for a LTSV format label: {}".format(match_list) )
def validate_ltsv_label(label)
Verifying whether ``label`` is a valid `Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not. :param str label: Label to validate. :raises pathvalidate.NullNameError: If the ``label`` is empty. :raises pathvalidate.InvalidCharError: If invalid character(s) found in the ``label`` for a LTSV format label.
8.633387
6.190088
1.394711
validate_null_string(label, error_msg="label is empty") return __RE_INVALID_LTSV_LABEL.sub(replacement_text, preprocess(label))
def sanitize_ltsv_label(label, replacement_text="")
Replace all of the symbols in text. :param str label: Input text. :param str replacement_text: Replacement text. :return: A replacement string. :rtype: str
11.755373
12.654363
0.928958
try: self.parameters = self._main_url.split('?')[1] return self.parameters.split('&') except: return self.parameters
def param(self)
Returns params
5.971359
5.492177
1.087248
remove_pac = self.cleanup.replace( "https://", "").replace("http://", "").replace("www.", "") try: return remove_pac.split('/')[0] except: return None
def domain(self)
Return domain from the url
6.434042
4.80463
1.339134
root = ET.Element("entry") for x in self.xml_tags: if getattr(self, x): if x in ['episodes', 'scores', 'status', 'dates', 'storage', 'rewatched', 'flags', 'tags']: if x == 'episodes': if self.episodes.current: temp = ET.SubElement(root, 'episode') temp.text = str(self.episodes.current) elif x == 'scores': if self.scores.user: temp = ET.SubElement(root, 'score') temp.text = str(self.scores.user) elif x == 'status': if self.status.user: temp = ET.SubElement(root, 'status') temp.text = str(self.status.user) elif x == 'dates': if self.dates.user.start: start = ET.SubElement(root, 'date_start') start.text = format_date(self.dates.user.start) if self.dates.user.end: end = ET.SubElement(root, 'date_finish') end.text = format_date(self.dates.user.end) elif x == 'storage': if self.storage.type: stype = ET.SubElement(root, 'storage_type') stype.text = str(self.storage.type) if self.storage.value: sval = ET.SubElement(root, 'storage_value') sval.text = str(self.storage.value) elif x == 'rewatched': if self.rewatched.times: rt = ET.SubElement(root, 'times_rewatched') rt.text = str(self.rewatched.times) if self.rewatched.value: rv = ET.SubElement(root, 'rewatch_value') rv.text = str(self.rewatched.value) elif x == 'flags': if self.flags.discussion: df = ET.SubElement(root, 'enable_discussion') df.text = '1' if self.flags.discussion else '0' if self.flags.rewatching: rf = ET.SubElement(root, 'enable_rewatching') rf.text = '1' if self.flags.rewatching else '0' else: if self.tags: temp = ET.SubElement(root, 'tags') temp.text = ','.join(self.tags) else: temp = ET.SubElement(root, x) temp.text = str(getattr(self, x)) return '<?xml version="1.0" encoding="UTF-8"?>{}'.format(ET.tostring(root, encoding="unicode"))
def to_xml(self)
Convert data to XML String. :return: Str of valid XML data
1.769881
1.779511
0.994588
r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['meta']['count']: return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header) else: return None
def search(self, term)
Search for a user by name. :param str term: What to search for. :return: The results as a SearchWrapper iterator or None if no results. :rtype: SearchWrapper or None
3.629252
3.2988
1.100173
final_dict = {"data": {"type": "users", "attributes": data}} r = requests.post(self.apiurl + "/users", json=final_dict, headers=self.header) if r.status_code != 200: raise ServerError return r.json()
def create(self, data)
Create a user. Please review the attributes required. You need only provide the attributes. :param data: A dictionary of the required attributes :return: Dictionary returned by server or a ServerError exception :rtype: Dictionary or Exception
3.637848
3.58411
1.014993
r = requests.get(self.apiurl + "/users/{}".format(uid), headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['data']: return jsd['data'] else: return None
def get(self, uid)
Get a user's information by their id. :param uid str: User ID :return: The user's information or None :rtype: Dictionary or None
3.249507
3.454177
0.940747
final_dict = {"data": {"id": uid, "type": "users", "attributes": data}} final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.patch(self.apiurl + "/users/{}".format(uid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ServerError return True
def update(self, uid, data, token)
Update a user's data. Requires an auth token. :param uid str: User ID to update :param data dict: The dictionary of data attributes to change. Just the attributes. :param token str: The authorization token for this user :return: True or Exception :rtype: Bool or ServerError
3.052691
3.043456
1.003035
query_string = vars = {"query": term, "page": page, "perpage": perpage} r = requests.post(self.settings['apiurl'], headers=self.settings['header'], json={'query': query_string, 'variables': vars}) jsd = r.text try: jsd = json.loads(jsd) except ValueError: return None else: return jsd
def character(self, term, page = 1, perpage = 3)
Search for a character by term. Results are paginated by default. Page specifies which page we're on. Perpage specifies how many per page to request. 3 is just the example from the API docs. :param term str: Name to search by :param page int: Which page are we requesting? Starts at 1. :param perpage int: How many results per page are we requesting? :return: Json object with returned results. :rtype: Json object with returned results.
4.020381
4.20344
0.95645
r = requests.get( "http://anisearch.outrance.pl/index.php", params={ "task": "search", "query": term, "langs": "ja,x-jat,en" if lang is None else ','.join(lang) } ) if r.status_code != 200: raise ServerError tree = ET.fromstring(r.text) root = tree.getroot() for item in root.iter("anime"): # Parse XML http://wiki.anidb.net/w/User:Eloyard/anititles_dump results[aid]={} for title in item.iter('title'): if title.attrib['type'] in ['official', 'main']: results[aid][title.attrib['xml:lang']] = title.text return results
def search(term, lang=None)
As a convenient alternative to downloading and parsing a dump, This function will instead query the AID search provided by Eloyard. This is the same information available at http://anisearch.outrance.pl/. :param str term: Search Term :param list lang: A list of language codes which determines what titles are returned
6.304342
4.569329
1.379709
if not isinstance(flags, str): if isinstance(flags, list): finflags = ",".join(flags) else: raise SyntaxError("Flags should be a list or comma separated string") else: finflags = flags if not isinstance(filters, str): raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".") if stype not in self.stypes: raise SyntaxError("{} not a valid Search type.".format(stype)) if '"' not in filters or "'" not in filters: newfilters = self.helperpat.split(filters) newfilters = [x.strip() for x in newfilters] newfilters[1] = '"' + newfilters[1] + '"' op = self.helperpat.search(filters) newfilters = op.group(0).join(newfilters) command = '{} {} ({}){}'.format(stype, finflags, newfilters, ' ' + ujson.dumps(options) if options is not None else '') else: command = '{} {} ({}){}'.format(stype, finflags, filters, ' ' + ujson.dumps(options) if options is not None else '') data = self.connection.send_command('get', command) if 'id' in data: raise ServerError(data['msg'], data['id']) else: return {'pages': data.get('more', default=False), 'data': data['items']}
def get(self, stype, flags, filters, options=None)
Send a request to the API to return results related to Visual Novels. :param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist :param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str. :param str filters: A string with the one filter to search by (apparently you only get one). This is kind of special. You need to pass them in the form <filter><op>"<term>" for strings or <filter><op><number> for numbers. This is counter intuitive. Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=. I will attempt to properly format this if not done so when called. :param dict options: A dictionary of options to customize the search by. Optional, defaults to None. :return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query. :raises ServerError: Raises a ServerError if an error is returned.
3.941069
3.504841
1.124464
if stype not in ['votelist', 'vnlist', 'wishlist']: raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype)) command = "{} {} {}".format(stype, id, ujson.dumps(fields)) data = self.connection.send_command('set', command) if 'id' in data: raise ServerError(data['msg'], data['id']) else: return True
def set(self, stype, sid, fields)
Send a request to the API to modify something in the database if logged in. :param str stype: What are we modifying? One of: votelist, vnlist, wishlist :param int sid: The ID that we're modifying. :param dict fields: A dictionary of the fields and their values :raises ServerError: Raises a ServerError if an error is returned :return bool: True if successful, error otherwise
5.108717
3.525424
1.449107
query_string = vars = {"id": item_id} r = requests.post(self.settings['apiurl'], headers=self.settings['header'], json={'query': query_string, 'variables': vars}) jsd = r.text try: jsd = json.loads(jsd) except ValueError: return None else: return jsd
def anime(self, item_id)
The function to retrieve an anime's details. :param int item_id: the anime's ID :return: dict or None :rtype: dict or NoneType
4.464542
4.178822
1.068374
query_string = vars = {"id": item_id, "html": html} r = requests.post(self.settings['apiurl'], headers=self.settings['header'], json={'query': query_string, 'variables': vars}) jsd = r.text try: jsd = json.loads(jsd) except ValueError: return None else: return jsd
def review(self, item_id, html = True)
With the change to v2 of the api, reviews have their own IDs. This accepts the ID of the review. You can set html to False if you want the review body returned without html formatting. The API Default is true. :param item_id: the Id of the review :param html: do you want the body returned with html formatting? :return: json object :rtype: json object containing review information
4.38605
4.35938
1.006118
r = requests.get(self.apiurl + "/mappings", params={"filter[externalSite]": external_site, "filter[externalId]": external_id}, headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if len(jsd['data']) < 1: return None r = requests.get(jsd['data'][0]['relationships']['item']['links']['related'], headers=self.header) if r.status_code != 200: return jsd else: return r.json()
def get(self, external_site: str, external_id: int)
Get a kitsu mapping by external site ID :param str external_site: string representing the external site :param int external_id: ID of the entry in the external site. :return: Dictionary or None (for not found) :rtype: Dictionary or None :raises: :class:`Pymoe.errors.ServerError`
3.024502
2.834165
1.067158
r = requests.post(self.apiurl + "/token", params={"grant_type": "password", "username": username, "password": password, "client_id": self.cid, "client_secret": self.csecret}) if r.status_code != 200: raise ServerError jsd = r.json() if self.remember: self.token_storage[username] = {'token': jsd['access_token'], 'refresh': jsd['refresh_token'], 'expiration': int(jsd['created_at']) + int(jsd['expires_in'])} return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']), jsd['refresh_token']
def authenticate(self, username, password)
Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens for this session, it will store the token under the username given. :param username: username :param password: password :param alias: A list of alternative names for a person if using the KitsuAuth token storage :return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError
2.491035
2.091174
1.191214
r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid, "client_secret": self.csecret, "refresh_token": refresh_token}) if r.status_code != 200: raise ServerError jsd = r.json() return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at'])
def refresh(self, refresh_token)
Renew an oauth token given an appropriate refresh token. :param refresh_token: The Refresh Token :return: A tuple of (token, expiration time in unix time stamp)
2.839306
2.697119
1.052718
if not self.remember: raise NotSaving if username not in self.token_storage: raise UserNotFound if self.token_storage[username]['expiration'] < time.time(): new_token = self.refresh(self.token_storage[username]['refresh']) self.token_storage[username]['token'] = new_token[0] self.token_storage[username]['expiration'] = new_token[1] return new_token[0] else: return self.token_storage[username]['token']
def get(self, username)
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one. :param username: The username whose token we are retrieving :return: A token, NotFound or NotSaving error
2.61446
2.369356
1.103447
r = requests.get(url, stream=True) with open(destination, 'wb') as fd: for chunk in r.iter_content(chunk_size=128): fd.write(chunk)
def save(url, destination)
This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to
1.696508
2.238753
0.757792
if destination: if not os.path.exists(destination): os.makedirs(destination) pthread = threading.Thread( target=save, args=( self.urls[which], os.path.join(destination, self.urls[which]) ) ) pthread.start() return pthread
def download(which, destination=None)
I realize that the download for the dumps is going to take awhile. Given that, I've decided to approach this using threads. When you call this method, it will launch a thread to download the data. By default, the dump is dropped into the current working directory. If the directory given doesn't exist, we'll try to make it. Don't use '..' in the path as this confuses makedirs. :param int which: 0 for dat (txt), 1 for xml :param str destination: a file path to save to, defaults to cwd
3.302012
3.495755
0.944578
r = requests.get(self.apiurl + "/manga/{}".format(aid), headers=self.header) if r.status_code != 200: if r.status_code == 404: return None else: raise ServerError return r.json()
def get(self, aid)
Get manga information by id. :param int aid: ID of the manga. :return: Dictionary or None (for not found) :rtype: Dictionary or None :raises: :class:`Pymoe.errors.ServerError`
2.875458
2.64685
1.08637
projects = [] r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500', 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' in jsd: while True: r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500', 'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'], 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' not in jsd: break else: break return projects[0]
def active(self)
Get a list of active projects. :return list: A list of tuples containing a title and pageid in that order.
1.831058
1.750845
1.045814
projects = [] r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")), 'cmtype': 'page', 'cmlimit': '500', 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' in jsd: while True: r = requests.get(self.api, params={'action': 'query', 'list': 'categorymembers', 'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")), 'cmtype': 'page', 'cmlimit': '500', 'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'], 'format': 'json'}, headers=self.header) if r.status_code == 200: jsd = r.json() projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']]) if 'query-continue' not in jsd: break else: break return projects[0]
def light_novels(self, language="English")
Get a list of light novels under a certain language. :param str language: Defaults to English. Replace with whatever language you want to query. :return list: A list of tuples containing a title and pageid element in that order.
1.748657
1.715265
1.019467
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")), headers=self.header) if r.status_code != 200: raise requests.HTTPError("Not Found") else: parsed = soup(r.text, 'html.parser') dd = parsed.find_all("a") volumes = [] for link in dd: if 'class' in link.attrs: if 'image' in link.get('class'): continue if 'href' in link.attrs: if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'): volumes.append(link) seplist = OrderedDict() for item in volumes: if 'title' in item.attrs: result = re.search(self.separate_regex, item.get('title').lower()) else: result = re.search(self.separate_regex, item.text.lower()) if result and result.groups(): if result.group('chapter').lstrip('0') in seplist: seplist[result.group('chapter').lstrip('0')].append([item.get('href'), item.get('title') if 'title' in item.attrs else item.text]) else: seplist[result.group('chapter').lstrip('0')] = [[item.get('href'), item.get('title') if 'title' in item.attrs else item.text]] return seplist
def chapters(self, title)
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage. :param str title: The title of the novel you want chapters from :return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
2.445384
2.402259
1.017952
r = requests.get(self.api, params={'action': 'query', 'prop': 'pageimages', 'pageids': pageid, 'format': 'json'}, headers=self.header) jsd = r.json() image = "File:" + jsd['query']['pages'][str(pageid)]['pageimage'] r = requests.get(self.api, params={'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'titles': image, 'format': 'json'}, headers=self.header) jsd = r.json() return jsd['query']['pages'][list(jsd['query']['pages'].keys())[0]]['imageinfo'][0]['url']
def cover(self, pageid)
Get a cover image given a page id. :param str pageid: The pageid for the light novel you want a cover image for :return str: the image url
2.034456
2.080904
0.977679
r = requests.get(self.api, params={'action': 'parse', 'page': title, 'format': 'json'}, headers=self.header) jsd = r.json() return jsd['parse']['text']['*']
def get_text(self, title)
This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too. :param title: Title for the page you want the content of :return: a string containing the html content
3.560498
3.608535
0.986688
r = requests.get(self.apiurl + "account/verify_credentials.xml", auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 200: raise UserLoginFailed("Username or Password incorrect.")
def _verify_credentials(self)
An internal method that verifies the credentials given at instantiation. :raises: :class:`Pymoe.errors.UserLoginFailed`
4.250855
3.779902
1.124594
url = self.apiurl + "{}/search.xml".format('anime' if which == 1 else 'manga') r = requests.get(url, params={'q': term}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 200: return [] data = ET.fromstring(r.text) final_list = [] if which == 1: for item in data.findall('entry'): syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else [] final_list.append(Anime( item.find('id').text, title=item.find('title').text, synonyms=syn.append(item.find('english').text), episodes=item.find('episodes').text, average=item.find('score').text, anime_start=item.find('start_date').text, anime_end=item.find('end_date').text, synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find( 'synopsis').text else None, image=item.find('image').text, status_anime=item.find('status').text, type=item.find('type').text )) return NT_SEARCH_ANIME( airing=[x for x in final_list if x.status.series == "Currently Airing"], finished=[x for x in final_list if x.status.series == "Finished Airing"], unaired=[x for x in final_list if x.status.series == "Not Yet Aired"], dropped=[x for x in final_list if x.status.series == "Dropped"], planned=[x for x in final_list if x.status.series == "Plan to Watch"] ) else: for item in data.findall('entry'): syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else [] final_list.append(Manga( item.find('id').text, title=item.find('title').text, synonyms=syn.append(item.find('english').text), chapters=item.find('chapters').text, volumes=item.find('volumes').text, average=item.find('score').text, manga_start=item.find('start_date').text, manga_end=item.find('end_date').text, synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find( 'synopsis').text else None, image=item.find('image').text, status_manga=item.find('status').text, type=item.find('type').text )) return NT_SEARCH_MANGA( publishing=[x for x in final_list if x.status.series == "Publishing"], finished=[x for x in final_list if x.status.series == "Finished"], unpublished=[x for x in final_list if x.status.series == "Not Yet Published"], dropped=[x for x in final_list if x.status.series == "Dropped"], planned=[x for x in final_list if x.status.series == "Plan to Read"] )
def _search(self, which, term)
The real search method. :param which: 1 for anime, 2 for manga :param term: What to search for :rtype: list :return: list of :class:`Pymoe.Mal.Objects.Manga` or :class:`Pymoe.Mal.Objects.Anime` objects as per the type param.
1.72971
1.708302
1.012532
if isinstance(data, Anime): xmlstr = data.to_xml() r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id), params={'data': xmlstr}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 201: raise ServerError(r.text, r.status_code) return True else: raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
def _anime_add(self, data)
Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success
4.281292
2.930033
1.461176