signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def setup_signals():
signal.signal(signal.SIGINT, shutit_util.ctrl_c_signal_handler)<EOL>signal.signal(signal.SIGQUIT, shutit_util.ctrl_quit_signal_handler)<EOL>
Set up the signal handlers.
f10350:m0
def get_shutit_pexpect_sessions():
sessions = []<EOL>for shutit_object in shutit_global_object.shutit_objects:<EOL><INDENT>for key in shutit_object.shutit_pexpect_sessions:<EOL><INDENT>sessions.append(shutit_object.shutit_pexpect_sessions[key])<EOL><DEDENT><DEDENT>return sessions<EOL>
Returns all the shutit_pexpect sessions in existence.
f10350:m1
def __init__(self):
self.shutit_objects = []<EOL>assert self.only_one is None, shutit_util.print_debug()<EOL>self.only_one = True<EOL>self.owd = os.getcwd()<EOL>self.ispy3 = (sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>)<EOL>self.global_thread_lock = threading.Lock()<EOL>self.global_thread_lock.acquire()<EOL>self.secret_words_set = set()<EOL>self.logstream = None<EOL>self.logstream_size = <NUM_LIT><EOL>self.log_trace_when_idle = False<EOL>self.signal_id = None<EOL>self.window_size_max = <NUM_LIT><EOL>self.username = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL>self.default_timeout = <NUM_LIT><EOL>self.delaybeforesend = <NUM_LIT:0><EOL>self.default_encoding = '<STR_LIT:utf-8>'<EOL>self.managed_panes = False<EOL>self.pane_manager = None<EOL>self.lower_pane_rotate_count = <NUM_LIT:0><EOL>self.stacktrace_lines_arr = []<EOL>self.bash_startup_command = "<STR_LIT>"<EOL>self.prompt_command = "<STR_LIT>"<EOL>self.base_prompt = '<STR_LIT>'<EOL>self.line_limit = <NUM_LIT><EOL>def terminal_size():<EOL><INDENT>h, w, _, _ = struct.unpack('<STR_LIT>', fcntl.ioctl(<NUM_LIT:0>, termios.TIOCGWINSZ, struct.pack('<STR_LIT>', <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)))<EOL>return int(h), int(w)<EOL><DEDENT>try:<EOL><INDENT>self.root_window_size = terminal_size()<EOL><DEDENT>except IOError:<EOL><INDENT>self.root_window_size = (<NUM_LIT>,<NUM_LIT>)<EOL><DEDENT>self.pexpect_window_size = (self.window_size_max,self.window_size_max)<EOL>self.interactive = <NUM_LIT:1> <EOL>self.shutit_pexpect_session_environments = set()<EOL>if self.username == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>if os.getlogin() != '<STR_LIT>':<EOL><INDENT>self.username = os.getlogin()<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>self.username = getpass.getuser()<EOL><DEDENT>if self.username == '<STR_LIT>':<EOL><INDENT>self.handle_exit(msg='<STR_LIT>' + '<STR_LIT>' + '<STR_LIT>', exit_code=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>self.real_user = os.environ.get('<STR_LIT>', self.username)<EOL>self.real_user_id = pwd.getpwnam(self.real_user).pw_uid<EOL>self.build_id = (socket.gethostname() + '<STR_LIT:_>' + self.real_user + '<STR_LIT:_>' + str(time.time()) + '<STR_LIT:.>' + str(datetime.datetime.now().microsecond))<EOL>shutit_state_dir_base = '<STR_LIT>' + self.username<EOL>if not os.access(shutit_state_dir_base,os.F_OK):<EOL><INDENT>mkpath(shutit_state_dir_base,mode=<NUM_LIT>)<EOL><DEDENT>self.shutit_state_dir = shutit_state_dir_base + '<STR_LIT:/>' + self.build_id<EOL>os.chmod(shutit_state_dir_base,<NUM_LIT>)<EOL>if not os.access(self.shutit_state_dir,os.F_OK):<EOL><INDENT>mkpath(self.shutit_state_dir,mode=<NUM_LIT>)<EOL><DEDENT>os.chmod(self.shutit_state_dir,<NUM_LIT>)<EOL>self.shutit_state_dir_build_db_dir = self.shutit_state_dir + '<STR_LIT>'<EOL>self.allowed_delivery_methods = ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>
Constructor.
f10350:c0:m0
def determine_interactive(self):
try:<EOL><INDENT>if not sys.stdout.isatty() or os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):<EOL><INDENT>self.interactive = <NUM_LIT:0><EOL>return False<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>self.interactive = <NUM_LIT:0><EOL>return False<EOL><DEDENT>if self.interactive == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>
Determine whether we're in an interactive shell. Sets interactivity off if appropriate. cf http://stackoverflow.com/questions/24861351/how-to-detect-if-python-script-is-being-run-as-a-background-process
f10350:c0:m4
def shutit_print(self, msg):
if self.pane_manager is None:<EOL><INDENT>print(msg)<EOL><DEDENT>
Handles simple printing of a msg at the global level.
f10350:c0:m8
def __init__(self, shutit_global_object):
assert self.only_one is None<EOL>self.only_one is True<EOL>self.shutit_global = shutit_global_object<EOL>self.top_left_session_pane = SessionPane('<STR_LIT>')<EOL>self.top_right_session_pane = SessionPane('<STR_LIT>')<EOL>self.bottom_left_session_pane = SessionPane('<STR_LIT>')<EOL>self.bottom_right_session_pane = SessionPane('<STR_LIT>')<EOL>self.window = None<EOL>self.screen_arr = None<EOL>self.wheight = None<EOL>self.wwidth = None<EOL>self.do_render = True<EOL>self.refresh_window()<EOL>
only_one - singleton insurance
f10350:c1:m0
def create_session(docker_image=None,<EOL>docker_rm=None,<EOL>echo=False,<EOL>loglevel='<STR_LIT>',<EOL>nocolor=False,<EOL>session_type='<STR_LIT>',<EOL>vagrant_session_name=None,<EOL>vagrant_image='<STR_LIT>',<EOL>vagrant_gui=False,<EOL>vagrant_memory='<STR_LIT>',<EOL>vagrant_num_machines='<STR_LIT:1>',<EOL>vagrant_provider='<STR_LIT>',<EOL>vagrant_root_folder=None,<EOL>vagrant_swapsize='<STR_LIT>',<EOL>vagrant_version='<STR_LIT>',<EOL>vagrant_virt_method='<STR_LIT>',<EOL>vagrant_cpu='<STR_LIT:1>',<EOL>video=-<NUM_LIT:1>,<EOL>walkthrough=False):
assert session_type in ('<STR_LIT>','<STR_LIT>','<STR_LIT>'), shutit_util.print_debug()<EOL>shutit_global_object = shutit_global.shutit_global_object<EOL>if video != -<NUM_LIT:1> and video > <NUM_LIT:0>:<EOL><INDENT>walkthrough = True<EOL><DEDENT>if session_type in ('<STR_LIT>','<STR_LIT>'):<EOL><INDENT>return shutit_global_object.create_session(session_type,<EOL>docker_image=docker_image,<EOL>rm=docker_rm,<EOL>echo=echo,<EOL>walkthrough=walkthrough,<EOL>walkthrough_wait=video,<EOL>nocolor=nocolor,<EOL>loglevel=loglevel)<EOL><DEDENT>elif session_type == '<STR_LIT>':<EOL><INDENT>if vagrant_session_name is None:<EOL><INDENT>vagrant_session_name = '<STR_LIT>' + shutit_util.random_id()<EOL><DEDENT>if isinstance(vagrant_num_machines, int):<EOL><INDENT>vagrant_num_machines = str(vagrant_num_machines)<EOL><DEDENT>assert isinstance(vagrant_num_machines, str)<EOL>assert isinstance(int(vagrant_num_machines), int)<EOL>if vagrant_root_folder is None:<EOL><INDENT>vagrant_root_folder = shutit_global.shutit_global_object.owd<EOL><DEDENT>return create_session_vagrant(vagrant_session_name,<EOL>vagrant_num_machines,<EOL>vagrant_image,<EOL>vagrant_provider,<EOL>vagrant_gui,<EOL>vagrant_memory,<EOL>vagrant_swapsize,<EOL>echo,<EOL>walkthrough,<EOL>nocolor,<EOL>video,<EOL>vagrant_version,<EOL>vagrant_virt_method,<EOL>vagrant_root_folder,<EOL>vagrant_cpu,<EOL>loglevel)<EOL><DEDENT>
Creates a distinct ShutIt session. Sessions can be of type: bash - a bash shell is spawned and vagrant - a Vagrantfile is created and 'vagrant up'ped
f10351:m0
def main():
<EOL>shutit = shutit_global.shutit_global_object.shutit_objects[<NUM_LIT:0>]<EOL>if sys.version_info[<NUM_LIT:0>] == <NUM_LIT:2>:<EOL><INDENT>if sys.version_info[<NUM_LIT:1>] < <NUM_LIT:7>:<EOL><INDENT>shutit.fail('<STR_LIT>') <EOL><DEDENT><DEDENT>try:<EOL><INDENT>shutit.setup_shutit_obj()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>shutit_util.print_debug(sys.exc_info())<EOL>shutit_global.shutit_global_object.shutit_print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>
Main ShutIt function. Handles the configured actions: - skeleton - create skeleton module - list_configs - output computed configuration - depgraph - output digraph of module dependencies
f10351:m2
def map_package(shutit_pexpect_session, package, install_type):
if package in PACKAGE_MAP.keys():<EOL><INDENT>for itype in PACKAGE_MAP[package].keys():<EOL><INDENT>if itype == install_type:<EOL><INDENT>ret = PACKAGE_MAP[package][install_type]<EOL>if isinstance(ret,str):<EOL><INDENT>return ret<EOL><DEDENT>if callable(ret):<EOL><INDENT>ret(shutit_pexpect_session)<EOL>return '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return package<EOL>
If package mapping exists, then return it, else return package.
f10352:m2
def is_file_secure(file_name):
if not os.path.isfile(file_name):<EOL><INDENT>return True<EOL><DEDENT>file_mode = os.stat(file_name).st_mode<EOL>if file_mode & (stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>
Returns false if file is considered insecure, true if secure. If file doesn't exist, it's considered secure!
f10353:m0
def colorise(code, msg):
return '<STR_LIT>' % (code, msg) if code else msg<EOL>
Colorize the given string for a terminal. See https://misc.flogisoft.com/bash/tip_colors_and_formatting
f10353:m1
def emblinken(msg):
return '<STR_LIT>' % msg<EOL>
Blink the message for a terminal
f10353:m2
def random_id(size=<NUM_LIT:8>, chars=string.ascii_letters + string.digits):
return '<STR_LIT>'.join(random.choice(chars) for _ in range(size))<EOL>
Generates a random string of given size from the given chars. @param size: The size of the random string. @param chars: Constituent pool of characters to draw random characters from. @type size: number @type chars: string @rtype: string @return: The string of random characters.
f10353:m3
def random_word(size=<NUM_LIT:6>):
words = shutit_assets.get_words().splitlines()<EOL>word = '<STR_LIT>'<EOL>while len(word) != size or "<STR_LIT:'>" in word:<EOL><INDENT>word = words[int(random.random() * (len(words) - <NUM_LIT:1>))]<EOL><DEDENT>return word.lower()<EOL>
Returns a random word in lower case.
f10353:m4
def get_hash(string_to_hash):
return abs(binascii.crc32(string_to_hash.encode()))<EOL>
Helper function to get preceding integer eg com.openbet == 1003189494 >>> import binascii >>> abs(binascii.crc32(b'shutit.tk')) 782914092 Recommended means of determining run order integer part.
f10353:m5
def ctrl_c_signal_handler(_, frame):
global ctrl_c_calls<EOL>ctrl_c_calls += <NUM_LIT:1><EOL>if ctrl_c_calls > <NUM_LIT:10>:<EOL><INDENT>shutit_global.shutit_global_object.handle_exit(exit_code=<NUM_LIT:1>)<EOL><DEDENT>shutit_frame = get_shutit_frame(frame)<EOL>if in_ctrlc:<EOL><INDENT>msg = '<STR_LIT>'<EOL>if shutit_frame:<EOL><INDENT>shutit_global.shutit_global_object.shutit_print('<STR_LIT:\n>')<EOL>shutit = shutit_frame.f_locals['<STR_LIT>']<EOL>shutit.log(msg,level=logging.CRITICAL)<EOL><DEDENT>else:<EOL><INDENT>shutit_global.shutit_global_object.shutit_print(msg)<EOL><DEDENT>shutit_global.shutit_global_object.handle_exit(exit_code=<NUM_LIT:1>)<EOL><DEDENT>if shutit_frame:<EOL><INDENT>shutit = shutit_frame.f_locals['<STR_LIT>']<EOL>if shutit.build['<STR_LIT>']:<EOL><INDENT>shutit.self.get_current_shutit_pexpect_session().pexpect_child.sendline(r'<STR_LIT>')<EOL>return<EOL><DEDENT>shutit_global.shutit_global_object.shutit_print(colorise(<NUM_LIT>,"<STR_LIT:\r>" + r"<STR_LIT>"))<EOL>shutit.build['<STR_LIT>'] = True<EOL>t = threading.Thread(target=ctrlc_background)<EOL>t.daemon = True<EOL>t.start()<EOL>ctrl_c_calls = <NUM_LIT:0><EOL>return<EOL><DEDENT>shutit_global.shutit_global_object.shutit_print(colorise(<NUM_LIT>,'<STR_LIT:\n>' + '<STR_LIT:*>' * <NUM_LIT>))<EOL>shutit_global.shutit_global_object.shutit_print(colorise(<NUM_LIT>,"<STR_LIT>"))<EOL>shutit_global.shutit_global_object.shutit_print(colorise(<NUM_LIT>,'<STR_LIT:*>' * <NUM_LIT>))<EOL>t = threading.Thread(target=ctrlc_background)<EOL>t.daemon = True<EOL>t.start()<EOL>ctrl_c_calls = <NUM_LIT:0><EOL>
CTRL-c signal handler - enters a pause point if it can.
f10353:m9
def sendline(child, line):
child.sendline(line)<EOL>
Handles sending of line to pexpect object.
f10353:m13
def util_raw_input(prompt='<STR_LIT>', default=None, ispass=False, use_readline=True):
if use_readline:<EOL><INDENT>try:<EOL><INDENT>readline.read_init_file('<STR_LIT>')<EOL><DEDENT>except IOError:<EOL><INDENT>pass<EOL><DEDENT>readline.parse_and_bind('<STR_LIT>')<EOL><DEDENT>prompt = '<STR_LIT:\r\n>' + prompt<EOL>if ispass:<EOL><INDENT>prompt += '<STR_LIT>'<EOL><DEDENT>sanitize_terminal()<EOL>if shutit_global.shutit_global_object.interactive == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>if not shutit_global.shutit_global_object.determine_interactive():<EOL><INDENT>return default<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>if ispass:<EOL><INDENT>return getpass.getpass(prompt=prompt)<EOL><DEDENT>else:<EOL><INDENT>return input(prompt).strip() or default<EOL><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>continue<EOL><DEDENT>except IOError:<EOL><INDENT>msg = '<STR_LIT>'<EOL><DEDENT><DEDENT>if ispass:<EOL><INDENT>return getpass.getpass(prompt=prompt)<EOL><DEDENT>else:<EOL><INDENT>return input(prompt).strip() or default<EOL><DEDENT>shutit_global.shutit_global_object.set_noninteractive(msg=msg)<EOL>return default<EOL>
Handles raw_input calls, and switches off interactivity if there is apparently no controlling terminal (or there are any other problems)
f10353:m16
def get_input(msg, default='<STR_LIT>', valid=None, boolean=False, ispass=False, color=None):
<EOL>log_trace_when_idle_original_value = shutit_global.shutit_global_object.log_trace_when_idle<EOL>shutit_global.shutit_global_object.log_trace_when_idle = False<EOL>if boolean and valid is None:<EOL><INDENT>valid = ('<STR_LIT:yes>','<STR_LIT:y>','<STR_LIT:Y>','<STR_LIT:1>','<STR_LIT:true>','<STR_LIT>','<STR_LIT:n>','<STR_LIT:N>','<STR_LIT:0>','<STR_LIT:false>')<EOL><DEDENT>if color:<EOL><INDENT>answer = util_raw_input(prompt=colorise(color,msg),ispass=ispass)<EOL><DEDENT>else:<EOL><INDENT>answer = util_raw_input(msg,ispass=ispass)<EOL><DEDENT>if boolean and answer in ('<STR_LIT>', None) and default != '<STR_LIT>':<EOL><INDENT>shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value<EOL>return default<EOL><DEDENT>if valid is not None:<EOL><INDENT>while answer not in valid:<EOL><INDENT>shutit_global.shutit_global_object.shutit_print('<STR_LIT>' + str(valid),transient=True)<EOL>if color:<EOL><INDENT>answer = util_raw_input(prompt=colorise(color,msg),ispass=ispass)<EOL><DEDENT>else:<EOL><INDENT>answer = util_raw_input(msg,ispass=ispass)<EOL><DEDENT><DEDENT><DEDENT>if boolean:<EOL><INDENT>if answer.lower() in ('<STR_LIT:yes>','<STR_LIT:y>','<STR_LIT:1>','<STR_LIT:true>','<STR_LIT:t>'):<EOL><INDENT>shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value<EOL>return True<EOL><DEDENT>elif answer.lower() in ('<STR_LIT>','<STR_LIT:n>','<STR_LIT:0>','<STR_LIT:false>','<STR_LIT:f>'):<EOL><INDENT>shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value<EOL>return False<EOL><DEDENT><DEDENT>shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value<EOL>return answer or default<EOL>
Gets input from the user, and returns the answer. @param msg: message to send to user @param default: default value if nothing entered @param valid: valid input values (default == empty list == anything allowed) @param boolean: whether return value should be boolean @param ispass: True if this is a password (ie whether to not echo input) @param color: Color code to colorize with (eg 32 = green)
f10353:m17
def managing_thread_main_simple():
import shutit_global<EOL>last_msg = '<STR_LIT>'<EOL>while True:<EOL><INDENT>printed_anything = False<EOL>if shutit_global.shutit_global_object.log_trace_when_idle and time.time() - shutit_global.shutit_global_object.last_log_time > <NUM_LIT:10>:<EOL><INDENT>this_msg = '<STR_LIT>'<EOL>this_header = '<STR_LIT>'<EOL>for thread_id, stack in sys._current_frames().items():<EOL><INDENT>if thread_id == threading.current_thread().ident:<EOL><INDENT>continue<EOL><DEDENT>printed_thread_started = False<EOL>for filename, lineno, name, line in traceback.extract_stack(stack):<EOL><INDENT>if not printed_anything:<EOL><INDENT>printed_anything = True<EOL>this_header += '<STR_LIT>'*<NUM_LIT> + '<STR_LIT:\n>'<EOL>this_header += '<STR_LIT>' + str(thread_id) + '<STR_LIT>' + time.strftime('<STR_LIT>') + '<STR_LIT:\n>'<EOL>this_header += '<STR_LIT:=>'*<NUM_LIT> + '<STR_LIT:\n>'<EOL><DEDENT>if not printed_thread_started:<EOL><INDENT>printed_thread_started = True<EOL><DEDENT>this_msg += '<STR_LIT>' % (filename, lineno, name) + '<STR_LIT:\n>'<EOL>if line:<EOL><INDENT>this_msg += '<STR_LIT>' % (line,) + '<STR_LIT:\n>'<EOL><DEDENT><DEDENT><DEDENT>if printed_anything:<EOL><INDENT>this_msg += '<STR_LIT:=>'*<NUM_LIT> + '<STR_LIT:\n>'<EOL>this_msg += '<STR_LIT>'<EOL>this_msg += '<STR_LIT:=>'*<NUM_LIT> + '<STR_LIT:\n>'<EOL><DEDENT>if this_msg != last_msg:<EOL><INDENT>print(this_header + this_msg)<EOL>last_msg = this_msg<EOL><DEDENT><DEDENT>time.sleep(<NUM_LIT:5>)<EOL><DEDENT>
Simpler thread to track whether main thread has been quiet for long enough that a thread dump should be printed.
f10354:m2
def parse_shutitfile_args(args_str):
ret = []<EOL>if args_str == '<STR_LIT>':<EOL><INDENT>return ret<EOL><DEDENT>if args_str[<NUM_LIT:0>] == '<STR_LIT:[>' and args_str[-<NUM_LIT:1>] == '<STR_LIT:]>':<EOL><INDENT>ret = eval(args_str)<EOL>assert isinstance(ret, list)<EOL><DEDENT>else:<EOL><INDENT>ret = args_str.split()<EOL><DEDENT>nv_pairs = True<EOL>for item in ret:<EOL><INDENT>if item.find('<STR_LIT:=>') < <NUM_LIT:0>:<EOL><INDENT>nv_pairs = False<EOL><DEDENT><DEDENT>if nv_pairs:<EOL><INDENT>d = {}<EOL>for item in ret:<EOL><INDENT>item_nv = item.split('<STR_LIT:=>')<EOL>d.update({item_nv[<NUM_LIT:0>]:item_nv[<NUM_LIT:1>]})<EOL><DEDENT>ret = d<EOL><DEDENT>return ret<EOL>
Parse shutitfile args (eg in the line 'RUN some args', the passed-in args_str would be 'some args'). If the string is bounded by square brackets, then it's treated in the form: ['arg1','arg2'], and the returned list looks the same. If the string composed entirely of name-value pairs (eg RUN a=b c=d) then it's returned as a dict (eg {'a':'b','c':'d'}). If what's passed-in is of the form: "COMMAND ['a=b','c=d']" then a dict is also returned.' Also eg: ["asd and space=value","asd 2=asdgasdg"]
f10358:m2
def scan_text(text):
while True:<EOL><INDENT>match = re.match("<STR_LIT>", text)<EOL>if match:<EOL><INDENT>before = match.group(<NUM_LIT:1>)<EOL>name = match.group(<NUM_LIT:2>)<EOL>after = match.group(<NUM_LIT:3>)<EOL>text = before + """<STR_LIT>""" + name + """<STR_LIT>""" + after<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return text<EOL>
Scan text, and replace items that match shutit's pattern format, ie: {{ shutit.THING }}
f10358:m8
def conn_module():
return [<EOL>ConnDocker('<STR_LIT>', -<NUM_LIT:0.1>, description='<STR_LIT>'),<EOL>ConnBash ('<STR_LIT>', -<NUM_LIT:0.1>, description='<STR_LIT>'),<EOL>]<EOL>
Connects ShutIt to something
f10361:m0
def is_installed(self, shutit):
return False<EOL>
Always considered false for ShutIt setup.
f10361:c1:m0
def build(self, shutit):
target_child = self.start_container(shutit, '<STR_LIT>')<EOL>self.setup_host_child(shutit)<EOL>self.setup_target_child(shutit, target_child)<EOL>shutit.send('<STR_LIT>' + shutit_global.shutit_global_object.shutit_state_dir + '<STR_LIT>' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '<STR_LIT:/>' + shutit_global.shutit_global_object.build_id, shutit_pexpect_child=target_child, echo=False)<EOL>return True<EOL>
Sets up the target ready for building.
f10361:c1:m3
def finalize(self, shutit):
<EOL>target_child_pexpect_session = shutit.get_shutit_pexpect_session_from_id('<STR_LIT>')<EOL>assert not target_child_pexpect_session.sendline(ShutItSendSpec(target_child_pexpect_session,'<STR_LIT>',ignore_background=True)), shutit_util.print_debug()<EOL>host_child_pexpect_session = shutit.get_shutit_pexpect_session_from_id('<STR_LIT>')<EOL>host_child = host_child_pexpect_session.pexpect_child<EOL>shutit.set_default_shutit_pexpect_session(host_child_pexpect_session)<EOL>shutit.set_default_shutit_pexpect_session_expect(shutit.expect_prompts['<STR_LIT>'])<EOL>shutit.do_repository_work(shutit.repository['<STR_LIT:name>'], docker_executable=shutit.host['<STR_LIT>'], password=shutit.host['<STR_LIT:password>'])<EOL>host_child.sendline('<STR_LIT>' + shutit.build['<STR_LIT>']) <EOL>host_child.sendline('<STR_LIT>') <EOL>return True<EOL>
Finalizes the target, exiting for us back to the original shell and performing any repository work required.
f10361:c1:m4
def is_installed(self, shutit):
return False<EOL>
Always considered false for ShutIt setup.
f10361:c2:m0
def build(self, shutit):
shutit_pexpect_session = ShutItPexpectSession(shutit, '<STR_LIT>','<STR_LIT>')<EOL>target_child = shutit_pexpect_session.pexpect_child<EOL>shutit_pexpect_session.expect(shutit_global.shutit_global_object.base_prompt.strip(), timeout=<NUM_LIT:10>)<EOL>self.setup_host_child(shutit)<EOL>self.setup_target_child(shutit, target_child)<EOL>return True<EOL>
Sets up the machine ready for building.
f10361:c2:m2
def finalize(self, shutit):
<EOL>target_child_pexpect_session = shutit.get_shutit_pexpect_session_from_id('<STR_LIT>')<EOL>assert not target_child_pexpect_session.sendline(ShutItSendSpec(target_child_pexpect_session,'<STR_LIT>',ignore_background=True)), shutit_util.print_debug()<EOL>return True<EOL>
Finalizes the target, exiting for us back to the original shell and performing any repository work required.
f10361:c2:m3
def is_installed(self, shutit):
return False<EOL>
Always considered false for ShutIt setup.
f10361:c3:m0
def build(self, shutit):
if shutit.build['<STR_LIT>'] in ('<STR_LIT>','<STR_LIT>'):<EOL><INDENT>if shutit.get_current_shutit_pexpect_session_environment().install_type == '<STR_LIT>':<EOL><INDENT>shutit.add_to_bashrc('<STR_LIT>')<EOL>if not shutit.command_available('<STR_LIT>'):<EOL><INDENT>shutit.install('<STR_LIT>')<EOL><DEDENT>shutit.lsb_release()<EOL><DEDENT>elif shutit.get_current_shutit_pexpect_session_environment().install_type == '<STR_LIT>':<EOL><INDENT>shutit.send('<STR_LIT>', timeout=<NUM_LIT>, exit_values=['<STR_LIT:0>', '<STR_LIT:1>'])<EOL><DEDENT>shutit.pause_point('<STR_LIT>' + '<STR_LIT>', level=<NUM_LIT:2>)<EOL><DEDENT>return True<EOL>
Initializes target ready for build and updating package management if in container.
f10361:c3:m1
def remove(self, shutit):
return True<EOL>
Removes anything performed as part of build.
f10361:c3:m2
def get_config(self, shutit):
return True<EOL>
Gets the configured core pacakges, and whether to perform the package management update.
f10361:c3:m3
def __init__(self,<EOL>shutit_pexpect_child,<EOL>send=None,<EOL>send_dict=None,<EOL>expect=None,<EOL>timeout=None,<EOL>check_exit=None,<EOL>fail_on_empty_before=True,<EOL>record_command=True,<EOL>exit_values=None,<EOL>echo=None,<EOL>escape=False,<EOL>check_sudo=True,<EOL>retry=<NUM_LIT:3>,<EOL>note=None,<EOL>assume_gnu=True,<EOL>follow_on_commands=None,<EOL>searchwindowsize=None,<EOL>maxread=None,<EOL>delaybeforesend=None,<EOL>secret=False,<EOL>nonewline=False,<EOL>user=None,<EOL>password=None,<EOL>is_ssh=None,<EOL>go_home=True,<EOL>prompt_prefix=None,<EOL>remove_on_match=None,<EOL>fail_on_fail=True,<EOL>ignore_background=False,<EOL>run_in_background=False,<EOL>block_other_commands=True,<EOL>wait_cadence=<NUM_LIT:2>,<EOL>loglevel=logging.INFO):
self.send = send<EOL>self.original_send = send<EOL>self.send_dict = send_dict<EOL>self.expect = expect<EOL>self.shutit_pexpect_child = shutit_pexpect_child<EOL>self.timeout = timeout<EOL>self.check_exit = check_exit<EOL>self.fail_on_empty_before = fail_on_empty_before<EOL>self.record_command = record_command<EOL>self.exit_values = exit_values<EOL>self.echo = echo<EOL>self.escape = escape<EOL>self.check_sudo = check_sudo<EOL>self.retry = retry<EOL>self.note = note<EOL>self.assume_gnu = assume_gnu<EOL>self.follow_on_commands = follow_on_commands<EOL>self.searchwindowsize = searchwindowsize<EOL>self.maxread = maxread<EOL>self.delaybeforesend = delaybeforesend<EOL>self.secret = secret<EOL>self.nonewline = nonewline<EOL>self.loglevel = loglevel<EOL>self.user = user<EOL>self.password = password<EOL>self.is_ssh = is_ssh<EOL>self.go_home = go_home<EOL>self.prompt_prefix = prompt_prefix<EOL>self.remove_on_match = remove_on_match<EOL>self.fail_on_fail = fail_on_fail<EOL>self.ignore_background = ignore_background<EOL>self.run_in_background = run_in_background<EOL>self.block_other_commands = block_other_commands<EOL>self.wait_cadence = wait_cadence<EOL>self.started = False<EOL>if self.check_exit and self.run_in_background:<EOL><INDENT>self.check_exit = False<EOL><DEDENT>if self.send_dict is not None:<EOL><INDENT>assert isinstance(self.send_dict, dict), shutit_util.print_debug()<EOL>for key in self.send_dict:<EOL><INDENT>val = self.send_dict[key]<EOL>assert isinstance(val,(str,list)), shutit_util.print_debug()<EOL>if isinstance(val,str):<EOL><INDENT>self.send_dict.update({key:[val,False]})<EOL><DEDENT>elif isinstance(val,list):<EOL><INDENT>assert len(val) == <NUM_LIT:2>, shutit_util.print_debug()<EOL><DEDENT>else:<EOL><INDENT>assert False, shutit_util.print_debug(msg='<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if self.exit_values is None:<EOL><INDENT>self.exit_values = ['<STR_LIT:0>',]<EOL><DEDENT>
Specification for arguments to send to shutit functions. @param send: String to send, ie the command being issued. If set to None, we consume up to the expect string, which is useful if we just matched output that came before a standard command that returns to the prompt. @param send_dict: dict of sends and expects, eg: {'interim prompt:',['some input',False],'input password:':['mypassword',True]} Note that the boolean indicates whether the match results in the removal of the send dict expects from the interaction and assumes a prompt follows. @param expect: String that we expect to see in the output. Usually a prompt. Defaults to currently-set expect string (see set_default_shutit_pexpect_session_expect) @param shutit_pexpect_child: pexpect child to issue command to. @param timeout: Timeout on response @param check_exit: Whether to check the shell exit code of the passed-in command. If the exit value was non-zero an error is thrown. (default=None, which takes the currently-configured check_exit value) See also fail_on_empty_before. @param fail_on_empty_before: If debug is set, fail on empty match output string (default=True) If this is set to False, then we don't check the exit value of the command. @param record_command: Whether to record the command for output at end. As a safety measure, if the command matches any 'password's then we don't record it. @param exit_values: Array of acceptable exit values as strings @param echo: Whether to suppress any logging output from pexpect to the terminal or not. We don't record the command if this is set to False unless record_command is explicitly passed in as True. @param escape: Whether to escape the characters in a bash-friendly way, eg $'\\Uxxxxxx' @param check_sudo: Check whether we have sudo available and if we already have sudo rights cached. @param retry: Number of times to retry the command if the first attempt doesn't work. Useful if going to the network @param note: If a note is passed in, and we are in walkthrough mode, pause with the note printed @param assume_gnu: Assume the gnu version of commands, which are not in @param follow_on_commands: A dictionary of the form: {match_string: command, match_string2: command2} which runs commands based on whether the output matched. Follow-on commands are always foregrounded and always ignore backgrounded processes. @param searchwindowsize: Passed into pexpect session @param maxread: Passed into pexpect session @param delaybeforesend: Passed into pexpect session @param secret: Whether what is being sent is a secret @param nonewline: Whether to omit the newline from the send @param user: If logging in, user to use. Default is 'root'. @param password: If logging in, password to use. Default is 'root'. @param is_ssh: Indicates whether the login is an ssh one if it is not an ssh command @param go_home: On logging in, whether to go to the home dir. Default is True. @param prompt_prefix: Override of random prompt prefix created by prompt setup. @param remove_on_match: If the item matches, remove the send_dict from future expects (eg if it's a password). This makes the 'am I logged in yet?' checking more robust. @param ignore_background: Whether to block if there are background tasks running in this session that are blocking, or ignore ALL background tasks and run anyway. Default is False. @param run_in_background: Whether to run in the background @param block_other_commands: Whether to block other commands from running (unless ignore_background is set on those other commands). Default is True. @param wait_cadence: If blocked and waiting on a background tasks, wait this number of seconds before re-checking. Default is 2. @param loglevel: Log level at which to operate. Background Commands =================== +------------------+-------------------+----------------------+------------------------------------------+ |run_in_background | ignore_background | block_other_commands | Outcome | +------------------+-------------------+----------------------+------------------------------------------+ |T | T | T | 'Just run in background and queue others'| | | | | Runs the command in the background, | | | | | ignoring all blocking background tasks | | | | | even if they are blocking, and blocking | | | | | new background tasks (if they don't | | | | | ignore blocking background tasks). | +------------------+-------------------+----------------------+------------------------------------------+ |T | F | T | 'Run in background if not blocked, and | | | | | queue others' | | | | | Runs the command in the background, | | | | | but will block if there are blocking | | | | | background tasks running. It will block | | | | | new background tasks (if they don't | | | | | ignore blocking background tasks). | +------------------+-------------------+----------------------+------------------------------------------+ |T | F | F | 'Run in background if not blocked, and | | | | | let others run' | +------------------+-------------------+----------------------+------------------------------------------+ |F | T | N/A | 'Run in foreground, ignoring any | | | | | background commands and block any new | | | | | background commands.' | +------------------+-------------------+----------------------+------------------------------------------+ |F | F | N/A | 'Run in foreground, blocking if there are| | | | | any background tasks running, and | | | | | blocking any new background commands.' | +------------------+-------------------+----------------------+------------------------------------------+ Example ======= Scenario is that we want to: update the file database with 'updatedb' then find a file that we expect to be in that database with 'locate file_to_find' and then add a line to that file with 'echo line >> file_to_find' Statement: I want to run this command in the background in this ShutIt session. I want to stop other background commands from running. I don't care if other background commands are running which block this. Example send: updatedb Args: run_in_background=True, ignore_background=True, block_other_commands=True Statement: I want to run this command in the background in this ShutIt session. I want to stop other background commands from running. I don't want to run if other blocking background commands are running. Example send: locate file_to_find Args: run_in_background=True, ignore_background=False, block_other_commands=True Statement: I just want to run this command in the background in the ShutIt session and forget about it. I don't care if there are other background tasks running which block this. I don't want to block other commands, nothing will depend on this completing. Example send: echo 'Add line to file' >> /path/to/file_to_find Args: run_in_background=True, ignore_background=True, block_other_commands=False
f10362:c0:m0
def __init__(self,<EOL>prefix):
if prefix == '<STR_LIT>':<EOL><INDENT>self.environment_id = prefix<EOL><DEDENT>else:<EOL><INDENT>self.environment_id = shutit_util.random_id()<EOL><DEDENT>self.module_root_dir = '<STR_LIT:/>'<EOL>self.modules_installed = [] <EOL>self.modules_not_installed = [] <EOL>self.modules_ready = [] <EOL>self.modules_recorded = []<EOL>self.modules_recorded_cache_valid = False<EOL>self.install_type = '<STR_LIT>'<EOL>self.distro = '<STR_LIT>'<EOL>self.distro_version = '<STR_LIT>'<EOL>self.users = dict()<EOL>self.build = {}<EOL>self.build['<STR_LIT>'] = False<EOL>self.build['<STR_LIT>'] = False<EOL>self.build['<STR_LIT>'] = False<EOL>
Represents a new 'environment' in ShutIt, which corresponds to a host or any machine-like location (eg docker container, ssh'd to host, or even a chroot jail with a /tmp folder that has not been touched by shutit.
f10363:c0:m0
def has_blocking_background_send(self):
for background_object in self.background_objects:<EOL><INDENT>if background_object.block_other_commands and background_object.run_state in ('<STR_LIT:S>','<STR_LIT:N>'):<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(self),level=logging.DEBUG)<EOL>self.shutit_obj.log('<STR_LIT>' + str(background_object),level=logging.DEBUG)<EOL>return True<EOL><DEDENT>elif background_object.block_other_commands and background_object.run_state in ('<STR_LIT:F>','<STR_LIT:C>','<STR_LIT:T>'):<EOL><INDENT>assert False, shutit_util.print_debug(msg='<STR_LIT>' + background_object.run_state)<EOL><DEDENT>else:<EOL><INDENT>assert background_object.block_other_commands is False, shutit_util.print_debug()<EOL><DEDENT><DEDENT>return False<EOL>
Check whether any blocking background commands are waiting to run. If any are, return True. If none are, return False.
f10365:c1:m2
def check_background_commands_complete(self):
unstarted_command_exists = False<EOL>self.shutit_obj.log('<STR_LIT>' + str(self.background_objects),level=logging.DEBUG)<EOL>self.shutit_obj.log('<STR_LIT>' + str(self.login_id),level=logging.DEBUG)<EOL>for background_object in self.background_objects:<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(background_object.sendspec.send),level=logging.DEBUG)<EOL><DEDENT>background_objects_to_remove = []<EOL>def remove_background_objects(a_background_objects_to_remove):<EOL><INDENT>for background_object in a_background_objects_to_remove:<EOL><INDENT>self.background_objects.remove(background_object)<EOL><DEDENT><DEDENT>for background_object in self.background_objects:<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(background_object),level=logging.DEBUG)<EOL>state = background_object.check_background_command_state()<EOL>self.shutit_obj.log('<STR_LIT>' + state,level=logging.DEBUG)<EOL>if state in ('<STR_LIT:C>','<STR_LIT:F>','<STR_LIT:T>'):<EOL><INDENT>background_objects_to_remove.append(background_object)<EOL>self.background_objects_completed.append(background_object)<EOL><DEDENT>elif state == '<STR_LIT:S>':<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(background_object),level=logging.DEBUG)<EOL>remove_background_objects(background_objects_to_remove)<EOL>return False, '<STR_LIT:S>', background_object<EOL><DEDENT>elif state == '<STR_LIT:N>':<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(background_object.sendspec.send),level=logging.DEBUG)<EOL>unstarted_command_exists = True<EOL><DEDENT>else:<EOL><INDENT>remove_background_objects(background_objects_to_remove)<EOL>assert False, shutit_util.print_debug(msg='<STR_LIT>' + state)<EOL><DEDENT>if state == '<STR_LIT:F>':<EOL><INDENT>self.shutit_obj.log('<STR_LIT>' + str(background_object),level=logging.DEBUG)<EOL>remove_background_objects(background_objects_to_remove)<EOL>return False, '<STR_LIT:F>', background_object<EOL><DEDENT><DEDENT>remove_background_objects(background_objects_to_remove)<EOL>self.shutit_obj.log('<STR_LIT>',level=logging.DEBUG)<EOL>if unstarted_command_exists:<EOL><INDENT>for background_object in self.background_objects:<EOL><INDENT>state = background_object.check_background_command_state()<EOL>if state == '<STR_LIT:N>':<EOL><INDENT>background_object.run_background_command()<EOL>self.shutit_obj.log('<STR_LIT>' + str(background_object),level=logging.DEBUG)<EOL>return False, '<STR_LIT:N>', background_object<EOL><DEDENT><DEDENT><DEDENT>self.shutit_obj.log('<STR_LIT>',level=logging.DEBUG)<EOL>return True, '<STR_LIT:OK>', None<EOL>
Check whether any background commands are running or to be run. If none are, return True. If any are, return False.
f10365:c1:m3
def __init__( self, cfg_section, shutit):
self.shutit = shutit<EOL>self.config = {}<EOL>self.__set_config(cfg_section)<EOL>self.lines = []<EOL>self.attaches = []<EOL>
Initialise the emailer object cfg_section - section in shutit config to look for email configuration items, allowing easier config according to shutit_module. e.g. 'com.my_module','shutit.core.alerting.emailer.subject': My Module Build Failed! Config Items: shutit.core.alerting.emailer.mailto - address to send the mail to (no default) shutit.core.alerting.emailer.mailfrom - address to send the mail from (angry@shutit.tk) shutit.core.alerting.emailer.smtp_server - server to send the mail (localhost) shutit.core.alerting.emailer.smtp_port - port to contact the smtp server on (587) shutit.core.alerting.emailer.use_tls - should we use tls to connect (True) shutit.core.alerting.emailer.subject - subject of the email (Shutit Report) shutit.core.alerting.emailer.signature - --Angry Shutit shutit.core.alerting.emailer.compress - gzip attachments? (True) shutit.core.alerting.emailer.username - mail username shutit.core.alerting.emailer.password - mail password shutit.core.alerting.emailer.safe_mode - don't fail the build if we get an exception shutit.core.alerting.emailer.mailto_maintainer - email the maintainer of the module as well as the shutit.core.alerting.emailer.mailto address
f10369:c0:m0
def __set_config(self, cfg_section):
defaults = [<EOL>'<STR_LIT>', None,<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT:localhost>',<EOL>'<STR_LIT>', <NUM_LIT>,<EOL>'<STR_LIT>', True,<EOL>'<STR_LIT>', True,<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', True,<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', True,<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>', True<EOL>]<EOL>for cfg_name, cfg_default in zip(defaults[<NUM_LIT:0>::<NUM_LIT:2>], defaults[<NUM_LIT:1>::<NUM_LIT:2>]):<EOL><INDENT>try:<EOL><INDENT>self.config[cfg_name] = self.shutit.cfg[cfg_section][cfg_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>if cfg_default is None:<EOL><INDENT>raise Exception(cfg_section + '<STR_LIT:U+0020>' + cfg_name + '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.config[cfg_name] = cfg_default<EOL><DEDENT><DEDENT><DEDENT>if self.config['<STR_LIT>'] and(self.config['<STR_LIT>'] == "<STR_LIT>" orself.config['<STR_LIT>'] == self.config['<STR_LIT>']):<EOL><INDENT>self.config['<STR_LIT>'] = False<EOL>self.config['<STR_LIT>'] = "<STR_LIT>"<EOL><DEDENT>
Set a local config array up according to defaults and main shutit configuration cfg_section - see __init__
f10369:c0:m1
@staticmethod<EOL><INDENT>def __gzip(filename):<DEDENT>
zipname = filename + '<STR_LIT>'<EOL>file_pointer = open(filename,'<STR_LIT:rb>')<EOL>zip_pointer = gzip.open(zipname,'<STR_LIT:wb>')<EOL>zip_pointer.writelines(file_pointer)<EOL>file_pointer.close()<EOL>zip_pointer.close()<EOL>return zipname<EOL>
Compress a file returning the new filename (.gz)
f10369:c0:m2
def __get_smtp(self):
use_tls = self.config['<STR_LIT>']<EOL>if use_tls:<EOL><INDENT>smtp = SMTP(self.config['<STR_LIT>'], self.config['<STR_LIT>'])<EOL>smtp.starttls()<EOL><DEDENT>else:<EOL><INDENT>smtp = SMTP_SSL(self.config['<STR_LIT>'], self.config['<STR_LIT>'])<EOL><DEDENT>return smtp<EOL>
Return the appropraite smtplib depending on wherther we're using TLS
f10369:c0:m3
def add_line(self, line):
self.lines.append(line)<EOL>
Add a single line to the email body
f10369:c0:m4
def add_body(self, msg):
self.lines = msg.rsplit('<STR_LIT:\n>')<EOL>
Add an entire email body as a string, will be split on newlines and overwrite anything currently in the body (e.g added by add_lines)
f10369:c0:m5
def attach(self, filename, filetype="<STR_LIT>"):
shutit = self.shutit<EOL>host_path = '<STR_LIT>'<EOL>host_fn = shutit.get_file(filename, host_path)<EOL>if self.config['<STR_LIT>']:<EOL><INDENT>filetype = '<STR_LIT>'<EOL>filename = self.__gzip(host_fn)<EOL>host_fn = os.path.join(host_path, os.path.basename(filename))<EOL><DEDENT>file_pointer = open(host_fn, '<STR_LIT:rb>')<EOL>attach = MIMEApplication(file_pointer.read(), _subtype=filetype)<EOL>file_pointer.close()<EOL>attach.add_header('<STR_LIT>', '<STR_LIT>', filename=os.path.basename(filename))<EOL>self.attaches.append(attach)<EOL>
Attach a file - currently needs to be entered as root (shutit) Filename - absolute path, relative to the target host! filetype - MIMEApplication._subtype
f10369:c0:m6
def __compose(self):
msg = MIMEMultipart()<EOL>msg['<STR_LIT>'] = self.config['<STR_LIT>']<EOL>msg['<STR_LIT>'] = self.config['<STR_LIT>']<EOL>msg['<STR_LIT>'] = self.config['<STR_LIT>']<EOL>if self.config['<STR_LIT>']:<EOL><INDENT>msg['<STR_LIT>'] = self.config['<STR_LIT>']<EOL><DEDENT>if self.config['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>signature = '<STR_LIT>' + self.config['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>signature = self.config['<STR_LIT>']<EOL><DEDENT>body = MIMEText('<STR_LIT:\n>'.join(self.lines) + signature)<EOL>msg.attach(body)<EOL>for attach in self.attaches:<EOL><INDENT>msg.attach(attach)<EOL><DEDENT>return msg<EOL>
Compose the message, pulling together body, attachments etc
f10369:c0:m7
def send(self, attachment_failure=False):
if not self.config['<STR_LIT>']:<EOL><INDENT>self.shutit.log('<STR_LIT>',level=logging.INFO)<EOL>return True<EOL><DEDENT>msg = self.__compose()<EOL>mailto = [self.config['<STR_LIT>']]<EOL>smtp = self.__get_smtp()<EOL>if self.config['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>smtp.login(self.config['<STR_LIT>'], self.config['<STR_LIT>'])<EOL><DEDENT>if self.config['<STR_LIT>']:<EOL><INDENT>mailto.append(self.config['<STR_LIT>'])<EOL><DEDENT>try:<EOL><INDENT>self.shutit.log('<STR_LIT>',level=logging.INFO)<EOL>smtp.sendmail(self.config['<STR_LIT>'], mailto, msg.as_string())<EOL><DEDENT>except SMTPSenderRefused as refused:<EOL><INDENT>code = refused.args[<NUM_LIT:0>]<EOL>if code == <NUM_LIT> and not attachment_failure:<EOL><INDENT>self.shutit.log("<STR_LIT>" + "<STR_LIT>",level=logging.INFO)<EOL>self.attaches = []<EOL>self.lines.append("<STR_LIT>")<EOL>self.send(attachment_failure=True)<EOL><DEDENT>else:<EOL><INDENT>self.shutit.log("<STR_LIT>" + str(refused),level=logging.INFO)<EOL>if not self.config['<STR_LIT>']:<EOL><INDENT>raise refused<EOL><DEDENT><DEDENT><DEDENT>except Exception as error:<EOL><INDENT>self.shutit.log('<STR_LIT>' + str(error),level=logging.INFO)<EOL>if not self.config['<STR_LIT>']:<EOL><INDENT>raise error<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>smtp.quit()<EOL><DEDENT>
Send the email according to the configured setup attachment_failure - used to indicate a recursive call after the smtp server has refused based on file size. Should not be used externally
f10369:c0:m8
def cut_levels(nodes, start_level):
final = []<EOL>removed = []<EOL>for node in nodes:<EOL><INDENT>if not hasattr(node, '<STR_LIT>'):<EOL><INDENT>remove(node, removed)<EOL>continue<EOL><DEDENT>if node.attr.get('<STR_LIT>', False):<EOL><INDENT>remove(node, removed)<EOL>continue<EOL><DEDENT>if node.level == start_level:<EOL><INDENT>final.append(node)<EOL>node.parent = None<EOL>if not node.visible and not node.children:<EOL><INDENT>remove(node, removed)<EOL><DEDENT><DEDENT>elif node.level == start_level + <NUM_LIT:1>:<EOL><INDENT>node.children = []<EOL><DEDENT>else:<EOL><INDENT>remove(node, removed)<EOL><DEDENT>if not node.visible:<EOL><INDENT>keep_node = False<EOL>for child in node.children:<EOL><INDENT>keep_node = keep_node or child.visible<EOL><DEDENT>if not keep_node:<EOL><INDENT>remove(node, removed)<EOL><DEDENT><DEDENT><DEDENT>for node in removed:<EOL><INDENT>if node in final:<EOL><INDENT>final.remove(node)<EOL><DEDENT><DEDENT>return final<EOL>
cutting nodes away from menus
f10371:m0
def ci(data, statfunction=None, alpha=<NUM_LIT>, n_samples=<NUM_LIT>,<EOL>method='<STR_LIT>', output='<STR_LIT>', epsilon=<NUM_LIT>, multi=None,<EOL>_iter=True):
<EOL>if np.iterable(alpha):<EOL><INDENT>alphas = np.array(alpha)<EOL><DEDENT>else:<EOL><INDENT>alphas = np.array([alpha/<NUM_LIT:2>, <NUM_LIT:1>-alpha/<NUM_LIT:2>])<EOL><DEDENT>if multi is None:<EOL><INDENT>if isinstance(data, tuple):<EOL><INDENT>multi = True<EOL><DEDENT>else:<EOL><INDENT>multi = False<EOL><DEDENT><DEDENT>if statfunction is None:<EOL><INDENT>if _iter:<EOL><INDENT>statfunction = np.average<EOL><DEDENT>else:<EOL><INDENT>def statfunc_wrapper(x, *args, **kwargs):<EOL><INDENT>return np.average(x, axis=-<NUM_LIT:1>, *args, **kwargs)<EOL><DEDENT>statfunction = statfunc_wrapper<EOL><DEDENT><DEDENT>if not multi:<EOL><INDENT>data = np.array(data)<EOL>tdata = (data,)<EOL><DEDENT>else:<EOL><INDENT>tdata = tuple( np.array(x) for x in data )<EOL><DEDENT>if method == '<STR_LIT:abc>':<EOL><INDENT>n = tdata[<NUM_LIT:0>].shape[<NUM_LIT:0>]*<NUM_LIT:1.0><EOL>nn = tdata[<NUM_LIT:0>].shape[<NUM_LIT:0>]<EOL>I = np.identity(nn)<EOL>ep = epsilon / n*<NUM_LIT:1.0><EOL>p0 = np.repeat(<NUM_LIT:1.0>/n,nn)<EOL>try:<EOL><INDENT>t0 = statfunction(*tdata,weights=p0)<EOL><DEDENT>except TypeError as e:<EOL><INDENT>raise TypeError("<STR_LIT>".format(e.message))<EOL><DEDENT>di_full = I - p0<EOL>tp = np.fromiter((statfunction(*tdata, weights=p0+ep*di)<EOL>for di in di_full), dtype=np.float)<EOL>tm = np.fromiter((statfunction(*tdata, weights=p0-ep*di)<EOL>for di in di_full), dtype=np.float)<EOL>t1 = (tp-tm)/(<NUM_LIT:2>*ep)<EOL>t2 = (tp-<NUM_LIT:2>*t0+tm)/ep**<NUM_LIT:2><EOL>sighat = np.sqrt(np.sum(t1**<NUM_LIT:2>))/n<EOL>a = (np.sum(t1**<NUM_LIT:3>))/(<NUM_LIT:6>*n**<NUM_LIT:3>*sighat**<NUM_LIT:3>)<EOL>delta = t1/(n**<NUM_LIT:2>*sighat)<EOL>cq = (statfunction(*tdata,weights=p0+ep*delta)-<NUM_LIT:2>*t0+statfunction(*tdata,weights=p0-ep*delta))/(<NUM_LIT:2>*sighat*ep**<NUM_LIT:2>)<EOL>bhat = np.sum(t2)/(<NUM_LIT:2>*n**<NUM_LIT:2>)<EOL>curv = bhat/sighat-cq<EOL>z0 = nppf(<NUM_LIT:2>*ncdf(a)*ncdf(-curv))<EOL>Z = z0+nppf(alphas)<EOL>za = Z/(<NUM_LIT:1>-a*Z)**<NUM_LIT:2><EOL>abc = np.zeros_like(alphas)<EOL>for i in range(<NUM_LIT:0>,len(alphas)):<EOL><INDENT>abc[i] = statfunction(*tdata,weights=p0+za[i]*delta)<EOL><DEDENT>if output == '<STR_LIT>':<EOL><INDENT>return abc<EOL><DEDENT>elif output == '<STR_LIT>':<EOL><INDENT>return abs(abc-statfunction(tdata))[np.newaxis].T<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(output))<EOL><DEDENT><DEDENT>if _iter:<EOL><INDENT>bootindexes = bootstrap_indexes(tdata[<NUM_LIT:0>], n_samples)<EOL>stat = np.array([statfunction(*(x[indexes] for x in tdata))<EOL>for indexes in bootindexes])<EOL><DEDENT>else:<EOL><INDENT>bootindexes = bootstrap_indexes_array(tdata[<NUM_LIT:0>], n_samples)<EOL>stat = statfunction(*(x[bootindexes] for x in tdata))<EOL><DEDENT>stat.sort(axis=<NUM_LIT:0>)<EOL>if method == '<STR_LIT>':<EOL><INDENT>avals = alphas<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>ostat = statfunction(*tdata)<EOL>z0 = nppf( ( <NUM_LIT:1.0>*np.sum(stat < ostat, axis=<NUM_LIT:0>) ) / n_samples )<EOL>jackindexes = jackknife_indexes(tdata[<NUM_LIT:0>])<EOL>jstat = [statfunction(*(x[indexes] for x in tdata)) for indexes in jackindexes]<EOL>jmean = np.mean(jstat,axis=<NUM_LIT:0>)<EOL>oldnperr = np.seterr(invalid='<STR_LIT:ignore>')<EOL>a = np.sum((jmean - jstat)**<NUM_LIT:3>, axis=<NUM_LIT:0>) / (<EOL><NUM_LIT> * np.sum((jmean - jstat)**<NUM_LIT:2>, axis=<NUM_LIT:0>)**<NUM_LIT>)<EOL>if np.any(np.isnan(a)):<EOL><INDENT>nanind = np.nonzero(np.isnan(a))<EOL>warnings.warn("<STR_LIT>".format(nanind), InstabilityWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>zs = z0 + nppf(alphas).reshape(alphas.shape+(<NUM_LIT:1>,)*z0.ndim)<EOL>avals = ncdf(z0 + zs/(<NUM_LIT:1>-a*zs))<EOL>np.seterr(**oldnperr)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(method))<EOL><DEDENT>nvals = np.round((n_samples-<NUM_LIT:1>)*avals)<EOL>oldnperr = np.seterr(invalid='<STR_LIT:ignore>')<EOL>if np.any(np.isnan(nvals)):<EOL><INDENT>warnings.warn("<STR_LIT>" +<EOL>"<STR_LIT>", InstabilityWarning,<EOL>stacklevel=<NUM_LIT:2>)<EOL><DEDENT>if np.any(nvals == <NUM_LIT:0>) or np.any(nvals == n_samples-<NUM_LIT:1>):<EOL><INDENT>warnings.warn("<STR_LIT>" +<EOL>"<STR_LIT>",<EOL>InstabilityWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>elif np.any(nvals < <NUM_LIT:10>) or np.any(nvals >= n_samples-<NUM_LIT:10>):<EOL><INDENT>warnings.warn("<STR_LIT>" +<EOL>"<STR_LIT>",<EOL>InstabilityWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>np.seterr(**oldnperr)<EOL>nvals = np.nan_to_num(nvals).astype('<STR_LIT:int>')<EOL>if output == '<STR_LIT>':<EOL><INDENT>if nvals.ndim == <NUM_LIT:1>:<EOL><INDENT>return stat[nvals]<EOL><DEDENT>else:<EOL><INDENT>return stat[(nvals, np.indices(nvals.shape)[<NUM_LIT:1>:].squeeze())]<EOL><DEDENT><DEDENT>elif output == '<STR_LIT>':<EOL><INDENT>if nvals.ndim == <NUM_LIT:1>:<EOL><INDENT>return abs(statfunction(data)-stat[nvals])[np.newaxis].T<EOL><DEDENT>else:<EOL><INDENT>return abs(statfunction(data)-stat[(nvals, np.indices(nvals.shape)[<NUM_LIT:1>:])])[np.newaxis].T<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(output))<EOL><DEDENT>
Given a set of data ``data``, and a statistics function ``statfunction`` that applies to that data, computes the bootstrap confidence interval for ``statfunction`` on that data. Data points are assumed to be delineated by axis 0. Parameters ---------- data: array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...) Input data. Data points are assumed to be delineated by axis 0. Beyond this, the shape doesn't matter, so long as ``statfunction`` can be applied to the array. If a tuple of array_likes is passed, then samples from each array (along axis 0) are passed in order as separate parameters to the statfunction. The type of data (single array or tuple of arrays) can be explicitly specified by the multi parameter. statfunction: function (data, weights=(weights, optional)) -> value This function should accept samples of data from ``data``. It is applied to these samples individually. If using the ABC method, the function _must_ accept a named ``weights`` parameter which will be an array_like with weights for each sample, and must return a _weighted_ result. Otherwise this parameter is not used or required. Note that numpy's np.average accepts this. (default=np.average) alpha: float or iterable, optional The percentiles to use for the confidence interval (default=0.05). If this is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence intervals. If it is an iterable, alpha is assumed to be an iterable of each desired percentile. n_samples: float, optional The number of bootstrap samples to use (default=10000) method: string, optional The method to use: one of 'pi', 'bca', or 'abc' (default='bca') output: string, optional The format of the output. 'lowhigh' gives low and high confidence interval values. 'errorbar' gives transposed abs(value-confidence interval value) values that are suitable for use with matplotlib's errorbar function. (default='lowhigh') epsilon: float, optional (only for ABC method) The step size for finite difference calculations in the ABC method. Ignored for all other methods. (default=0.001) multi: boolean, optional If False, assume data is a single array. If True, assume data is a tuple/other iterable of arrays of the same length that should be sampled together. If None, decide based on whether the data is an actual tuple. (default=None) Returns ------- confidences: tuple of floats The confidence percentiles specified by alpha Calculation Methods ------------------- 'pi': Percentile Interval (Efron 13.3) The percentile interval method simply returns the 100*alphath bootstrap sample's values for the statistic. This is an extremely simple method of confidence interval calculation. However, it has several disadvantages compared to the bias-corrected accelerated method, which is the default. 'bca': Bias-Corrected Accelerated (BCa) Non-Parametric (Efron 14.3) (default) This method is much more complex to explain. However, it gives considerably better results, and is generally recommended for normal situations. Note that in cases where the statistic is smooth, and can be expressed with weights, the ABC method will give approximated results much, much faster. Note that in a case where the statfunction results in equal output for every bootstrap sample, the BCa confidence interval is technically undefined, as the acceleration value is undefined. To match the percentile interval method and give reasonable output, the implementation of this method returns a confidence interval of zero width using the 0th bootstrap sample in this case, and warns the user. 'abc': Approximate Bootstrap Confidence (Efron 14.4, 22.6) This method provides approximated bootstrap confidence intervals without actually taking bootstrap samples. This requires that the statistic be smooth, and allow for weighting of individual points with a weights= parameter (note that np.average allows this). This is _much_ faster than all other methods for situations where it can be used. Examples -------- To calculate the confidence intervals for the mean of some numbers: >> boot.ci( np.randn(100), np.average ) Given some data points in arrays x and y calculate the confidence intervals for all linear regression coefficients simultaneously: >> boot.ci( (x,y), scipy.stats.linregress ) References ---------- Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
f10373:m2
def bootstrap_indexes(data, n_samples=<NUM_LIT>):
for _ in xrange(n_samples):<EOL><INDENT>yield randint(data.shape[<NUM_LIT:0>], size=(data.shape[<NUM_LIT:0>],))<EOL><DEDENT>
Given data points data, where axis 0 is considered to delineate points, return an generator for sets of bootstrap indexes. This can be used as a list of bootstrap indexes (with list(bootstrap_indexes(data))) as well.
f10373:m3
def jackknife_indexes(data):
base = np.arange(<NUM_LIT:0>,len(data))<EOL>return (np.delete(base,i) for i in base)<EOL>
Given data points data, where axis 0 is considered to delineate points, return a list of arrays where each array is a set of jackknife indexes. For a given set of data Y, the jackknife sample J[i] is defined as the data set Y with the ith data point deleted.
f10373:m5
def subsample_indexes(data, n_samples=<NUM_LIT:1000>, size=<NUM_LIT:0.5>):
if size == -<NUM_LIT:1>:<EOL><INDENT>size = len(data)<EOL><DEDENT>elif (size < <NUM_LIT:1>) and (size > <NUM_LIT:0>):<EOL><INDENT>size = int(round(size*len(data)))<EOL><DEDENT>elif size > <NUM_LIT:1>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(size))<EOL><DEDENT>base = np.tile(np.arange(len(data)),(n_samples,<NUM_LIT:1>))<EOL>for sample in base: np.random.shuffle(sample)<EOL>return base[:,<NUM_LIT:0>:size]<EOL>
Given data points data, where axis 0 is considered to delineate points, return a list of arrays where each array is indexes a subsample of the data of size ``size``. If size is >= 1, then it will be taken to be an absolute size. If size < 1, it will be taken to be a fraction of the data size. If size == -1, it will be taken to mean subsamples the same size as the sample (ie, permuted samples)
f10373:m6
def bootstrap_indexes_moving_block(data, n_samples=<NUM_LIT>,<EOL>block_length=<NUM_LIT:3>, wrap=False):
n_obs = data.shape[<NUM_LIT:0>]<EOL>n_blocks = int(ceil(n_obs / block_length))<EOL>nexts = np.repeat(np.arange(<NUM_LIT:0>, block_length)[None, :], n_blocks, axis=<NUM_LIT:0>)<EOL>if wrap:<EOL><INDENT>last_block = n_obs<EOL><DEDENT>else:<EOL><INDENT>last_block = n_obs - block_length<EOL><DEDENT>for _ in xrange(n_samples):<EOL><INDENT>blocks = np.random.randint(<NUM_LIT:0>, last_block, size=n_blocks)<EOL>if not wrap:<EOL><INDENT>yield (blocks[:, None]+nexts).ravel()[:n_obs]<EOL><DEDENT>else:<EOL><INDENT>yield np.mod((blocks[:, None]+nexts).ravel()[:n_obs], n_obs)<EOL><DEDENT><DEDENT>
Generate moving-block bootstrap samples. Given data points `data`, where axis 0 is considered to delineate points, return a generator for sets of bootstrap indexes. This can be used as a list of bootstrap indexes (with list(bootstrap_indexes_moving_block(data))) as well. Parameters ---------- n_samples [default 10000]: the number of subsamples to generate. block_length [default 3]: the length of block. wrap [default False]: if false, choose only blocks within the data, making the last block for data of length L start at L-block_length. If true, choose blocks starting anywhere, and if they extend past the end of the data, wrap around to the beginning of the data again.
f10373:m7
def __init__(self, signal, events, event_names = [], covariates = None, durations = None, sample_frequency = <NUM_LIT:1.0>, deconvolution_interval = [-<NUM_LIT:0.5>, <NUM_LIT:5>], deconvolution_frequency = None):
self.logger = logging.getLogger('<STR_LIT>')<EOL>ch = logging.StreamHandler()<EOL>ch.setLevel(logging.DEBUG)<EOL>formatter = logging.Formatter('<STR_LIT>')<EOL>ch.setFormatter(formatter)<EOL>self.logger.addHandler(ch)<EOL>self.logger.debug('<STR_LIT>' % (sample_frequency))<EOL>self.signal = signal <EOL>if len(self.signal.shape) == <NUM_LIT:1>:<EOL><INDENT>self.signal = self.signal[np.newaxis, :]<EOL><DEDENT>if event_names == []:<EOL><INDENT>self.event_names = [str(i) for i in np.arange(len(events))] <EOL><DEDENT>else:<EOL><INDENT>self.event_names = event_names<EOL><DEDENT>assert len(self.event_names) == len(events),'<STR_LIT>' %(len(self.event_names), self.event_names, len(events))<EOL>self.events = dict(zip(self.event_names, events))<EOL>self.sample_frequency = sample_frequency<EOL>self.deconvolution_interval = deconvolution_interval<EOL>if deconvolution_frequency is None:<EOL><INDENT>self.deconvolution_frequency = sample_frequency<EOL><DEDENT>else:<EOL><INDENT>self.deconvolution_frequency = deconvolution_frequency<EOL><DEDENT>self.resampling_factor = self.sample_frequency/self.deconvolution_frequency <EOL>self.deconvolution_interval_size = np.round((self.deconvolution_interval[<NUM_LIT:1>] - self.deconvolution_interval[<NUM_LIT:0>]) * self.deconvolution_frequency)<EOL>if not np.allclose([round(self.deconvolution_interval_size)], [self.deconvolution_interval_size]):<EOL><INDENT>print('<STR_LIT>'%self.deconvolution_interval_size)<EOL><DEDENT>self.deconvolution_interval_size = int(self.deconvolution_interval_size)<EOL>self.deconvolution_interval_timepoints = np.linspace(self.deconvolution_interval[<NUM_LIT:0>],self.deconvolution_interval[<NUM_LIT:1>],self.deconvolution_interval_size)<EOL>self.signal_duration = self.signal.shape[-<NUM_LIT:1>] / self.sample_frequency<EOL>self.resampled_signal_size = int(self.signal_duration*self.deconvolution_frequency)<EOL>self.resampled_signal = scipy.signal.resample(self.signal, self.resampled_signal_size, axis = -<NUM_LIT:1>)<EOL>if covariates == None:<EOL><INDENT>self.covariates = dict(zip(self.event_names, [np.ones(len(ev)) for ev in events]))<EOL><DEDENT>else:<EOL><INDENT>self.covariates = covariates<EOL><DEDENT>if durations == None:<EOL><INDENT>self.durations = dict(zip(self.event_names, [np.ones(len(ev))/deconvolution_frequency for ev in events]))<EOL><DEDENT>else:<EOL><INDENT>self.durations = durations<EOL><DEDENT>self.number_of_event_types = len(self.covariates)<EOL>self.event_times_indices = dict(zip(self.event_names, [((ev + self.deconvolution_interval[<NUM_LIT:0>])*self.deconvolution_frequency).astype(int) for ev in events]))<EOL>self.duration_indices = dict(zip(self.event_names, [(self.durations[ev]*self.deconvolution_frequency).astype(int) for ev in self.event_names]))<EOL>
FIRDeconvolution takes a signal and events in order to perform FIR fitting of the event-related responses in the signal. Most settings for the analysis are set here. :param signal: input signal. :type signal: numpy array, (nr_signals x nr_samples) :param events: event occurrence times. :type events: list of numpy arrays, (nr_event_types x nr_events_per_type) :param event_names: event names. :type events: list of strings, if empty, event names will be string representations of range(nr_event_types) :param covariates: covariates belonging to event_types. If None, covariates with a value of 1 for all events are created and used internally. :type covariates: dictionary, with keys "event_type.covariate_name" and values numpy arrays, (nr_events) :param durations: durations belonging to event_types. If None, durations with a value of 1 sample for all events are created and used internally. :type durations: dictionary, with keys "event_type" and values numpy arrays, (nr_events) :param sample_frequency: input signal sampling frequency in Hz, standard value: 1.0 :type sample_frequency: float :param deconvolution_interval: interval of time around the events for which FIR fitting is performed. :type deconvolution_interval: list: [float, float] :param deconvolution_frequency: effective frequency in Hz at which analysis is performed. If None, identical to the sample_frequency. :type deconvolution_frequency: float :returns: Nothing, but the created FIRDeconvolution object.
f10378:c0:m0
def create_event_regressors(self, event_times_indices, covariates = None, durations = None):
<EOL>if covariates is None:<EOL><INDENT>covariates = np.ones(self.event_times_indices.shape)<EOL><DEDENT>if durations is None:<EOL><INDENT>durations = np.ones(self.event_times_indices.shape)<EOL><DEDENT>else:<EOL><INDENT>durations = np.round(durations*self.deconvolution_frequency).astype(int)<EOL><DEDENT>mean_duration = np.mean(durations)<EOL>regressors_for_event = np.zeros((self.deconvolution_interval_size, self.resampled_signal_size))<EOL>for cov, eti, dur in zip(covariates, event_times_indices, durations):<EOL><INDENT>valid = True<EOL>if eti < <NUM_LIT:0>:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL>valid = False<EOL><DEDENT>if eti+self.deconvolution_interval_size > self.resampled_signal_size:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL>valid = False<EOL><DEDENT>if eti > self.resampled_signal_size:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL>valid = False<EOL><DEDENT>if valid: <EOL><INDENT>this_event_design_matrix = (np.diag(np.ones(self.deconvolution_interval_size)) * cov)<EOL>over_durations_dm = np.copy(this_event_design_matrix)<EOL>if dur > <NUM_LIT:1>: <EOL><INDENT>for d in np.arange(<NUM_LIT:1>,dur):<EOL><INDENT>over_durations_dm[d:] += this_event_design_matrix[:-d]<EOL><DEDENT>over_durations_dm /= mean_duration<EOL><DEDENT>regressors_for_event[:,eti:int(eti+self.deconvolution_interval_size)] += over_durations_dm<EOL><DEDENT><DEDENT>return regressors_for_event<EOL>
create_event_regressors creates the part of the design matrix corresponding to one event type. :param event_times_indices: indices in the resampled data, on which the events occurred. :type event_times_indices: numpy array, (nr_events) :param covariates: covariates belonging to this event type. If None, covariates with a value of 1 for all events are created and used internally. :type covariates: numpy array, (nr_events) :param durations: durations belonging to this event type. If None, durations with a value of 1 sample for all events are created and used internally. :type durations: numpy array, (nr_events) :returns: This event type's part of the design matrix.
f10378:c0:m1
def create_design_matrix(self, demean = False, intercept = True):
self.design_matrix = np.zeros((int(self.number_of_event_types*self.deconvolution_interval_size), self.resampled_signal_size))<EOL>for i, covariate in enumerate(self.covariates.keys()):<EOL><INDENT>self.logger.debug('<STR_LIT>' + covariate)<EOL>indices = np.arange(i*self.deconvolution_interval_size,(i+<NUM_LIT:1>)*self.deconvolution_interval_size, dtype = int)<EOL>if len(covariate.split('<STR_LIT:.>')) > <NUM_LIT:0>:<EOL><INDENT>which_event_time_indices = covariate.split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>which_event_time_indices = covariate<EOL><DEDENT>self.design_matrix[indices] = self.create_event_regressors( self.event_times_indices[which_event_time_indices], <EOL>self.covariates[covariate], <EOL>self.durations[which_event_time_indices])<EOL><DEDENT>if demean:<EOL><INDENT>self.design_matrix = (self.design_matrix.T - self.design_matrix.mean(axis = -<NUM_LIT:1>)).T<EOL><DEDENT>if intercept:<EOL><INDENT>self.design_matrix = np.vstack((self.design_matrix, np.ones((<NUM_LIT:1>,self.design_matrix.shape[-<NUM_LIT:1>]))))<EOL><DEDENT>self.logger.debug('<STR_LIT>' % (str(self.design_matrix.shape)))<EOL>
create_design_matrix calls create_event_regressors for each of the covariates in the self.covariates dict. self.designmatrix is created and is shaped (nr_regressors, self.resampled_signal.shape[-1])
f10378:c0:m2
def add_continuous_regressors_to_design_matrix(self, regressors):
previous_design_matrix_shape = self.design_matrix.shape<EOL>if len(regressors.shape) == <NUM_LIT:1>:<EOL><INDENT>regressors = regressors[np.newaxis, :]<EOL><DEDENT>if regressors.shape[<NUM_LIT:1>] != self.resampled_signal.shape[<NUM_LIT:1>]:<EOL><INDENT>self.logger.warning('<STR_LIT>' % (regressors.shape, self.resampled_signal.shape))<EOL><DEDENT>self.design_matrix = np.vstack((self.design_matrix, regressors))<EOL>self.logger.debug('<STR_LIT>' % (str(regressors.shape), str(previous_design_matrix_shape), str(self.design_matrix.shape)))<EOL>
add_continuous_regressors_to_design_matrix appends continuously sampled regressors to the existing design matrix. One uses this addition to the design matrix when one expects the data to contain nuisance factors that aren't tied to the moments of specific events. For instance, in fMRI analysis this allows us to add cardiac / respiratory regressors, as well as tissue and head motion timecourses to the designmatrix. :param regressors: the signal to be appended to the design matrix. :type regressors: numpy array, with shape equal to (nr_regressors, self.resampled_signal.shape[-1])
f10378:c0:m3
def regress(self, method = '<STR_LIT>'):
if method is '<STR_LIT>':<EOL><INDENT>self.betas, residuals_sum, rank, s = LA.lstsq(self.design_matrix.T, self.resampled_signal.T)<EOL>self.residuals = self.resampled_signal - self.predict_from_design_matrix(self.design_matrix)<EOL><DEDENT>elif method is '<STR_LIT>':<EOL><INDENT>import statsmodels.api as sm<EOL>assert self.resampled_signal.shape[<NUM_LIT:0>] == <NUM_LIT:1>,'<STR_LIT>' % str(self.resampled_signal.shape)<EOL>model = sm.OLS(np.squeeze(self.resampled_signal),self.design_matrix.T)<EOL>results = model.fit()<EOL>self.betas = np.array(results.params).reshape((self.design_matrix.shape[<NUM_LIT:0>], self.resampled_signal.shape[<NUM_LIT:0>]))<EOL>self.residuals = np.array(results.resid).reshape(self.resampled_signal.shape)<EOL><DEDENT>self.logger.debug('<STR_LIT>' % (method, str(self.design_matrix.shape), str(self.resampled_signal.shape)))<EOL>
regress performs linear least squares regression of the designmatrix on the data. :param method: method, or backend to be used for the regression analysis. :type method: string, one of ['lstsq', 'sm_ols'] :returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
f10378:c0:m4
def ridge_regress(self, cv = <NUM_LIT:20>, alphas = None ):
if alphas is None:<EOL><INDENT>alphas = np.logspace(<NUM_LIT:7>, <NUM_LIT:0>, <NUM_LIT:20>)<EOL><DEDENT>self.rcv = linear_model.RidgeCV(alphas=alphas, <EOL>fit_intercept=False, <EOL>cv=cv) <EOL>self.rcv.fit(self.design_matrix.T, self.resampled_signal.T)<EOL>self.betas = self.rcv.coef_.T<EOL>self.residuals = self.resampled_signal - self.rcv.predict(self.design_matrix.T)<EOL>self.logger.debug('<STR_LIT>' % (str(self.design_matrix.shape), str(self.resampled_signal.shape), self.rcv.alpha_))<EOL>
perform k-folds cross-validated ridge regression on the design_matrix. To be used when the design matrix contains very collinear regressors. For cross-validation and ridge fitting, we use sklearn's RidgeCV functionality. Note: intercept is not fit, and data are not prenormalized. :param cv: cross-validated folds, inherits RidgeCV cv argument's functionality. :type cv: int, standard = 20 :param alphas: values of penalization parameter to be traversed by the procedure, inherits RidgeCV cv argument's functionality. Standard value, when parameter is None, is np.logspace(7, 0, 20) :type alphas: numpy array, from >0 to 1. :returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
f10378:c0:m5
def betas_for_cov(self, covariate = '<STR_LIT:0>'):
<EOL>this_covariate_index = list(self.covariates.keys()).index(covariate)<EOL>return self.betas[int(this_covariate_index*self.deconvolution_interval_size):int((this_covariate_index+<NUM_LIT:1>)*self.deconvolution_interval_size)]<EOL>
betas_for_cov returns the beta values (i.e. IRF) associated with a specific covariate. :param covariate: name of covariate. :type covariate: string
f10378:c0:m6
def betas_for_events(self):
self.betas_per_event_type = np.zeros((len(self.covariates), self.deconvolution_interval_size, self.resampled_signal.shape[<NUM_LIT:0>]))<EOL>for i, covariate in enumerate(self.covariates.keys()):<EOL><INDENT>self.betas_per_event_type[i] = self.betas_for_cov(covariate)<EOL><DEDENT>
betas_for_events creates an internal self.betas_per_event_type array, of (nr_covariates x self.devonvolution_interval_size), which holds the outcome betas per event type,in the order generated by self.covariates.keys()
f10378:c0:m7
def predict_from_design_matrix(self, design_matrix):
<EOL>assert hasattr(self, '<STR_LIT>'), '<STR_LIT>'<EOL>assert design_matrix.shape[<NUM_LIT:0>] == self.betas.shape[<NUM_LIT:0>],'<STR_LIT>'<EOL>prediction = np.dot(self.betas.astype(np.float32).T, design_matrix.astype(np.float32))<EOL>return prediction<EOL>
predict_from_design_matrix predicts signals given a design matrix. :param design_matrix: design matrix from which to predict a signal. :type design_matrix: numpy array, (nr_samples x betas.shape) :returns: predicted signal(s) :rtype: numpy array (nr_signals x nr_samples)
f10378:c0:m8
def calculate_rsq(self):
assert hasattr(self, '<STR_LIT>'), '<STR_LIT>'<EOL>explained_times = self.design_matrix.sum(axis = <NUM_LIT:0>) != <NUM_LIT:0><EOL>explained_signal = self.predict_from_design_matrix(self.design_matrix)<EOL>self.rsq = <NUM_LIT:1.0> - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**<NUM_LIT:2>, axis = -<NUM_LIT:1>) / np.sum(self.resampled_signal[:,explained_times].squeeze()**<NUM_LIT:2>, axis = -<NUM_LIT:1>)<EOL>self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**<NUM_LIT:2>, axis = -<NUM_LIT:1>)<EOL>return np.squeeze(self.rsq)<EOL>
calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
f10378:c0:m9
def bootstrap_on_residuals(self, nr_repetitions = <NUM_LIT:1000>):
assert self.resampled_signal.shape[<NUM_LIT:0>] == <NUM_LIT:1>,'<STR_LIT>' % str(self.resampled_signal.shape)<EOL>assert hasattr(self, '<STR_LIT>'), '<STR_LIT>'<EOL>bootstrap_data = np.zeros((self.resampled_signal_size, nr_repetitions))<EOL>explained_signal = self.predict_from_design_matrix(self.design_matrix).T<EOL>for x in range(bootstrap_data.shape[-<NUM_LIT:1>]): <EOL><INDENT>bootstrap_data[:,x] = (self.residuals.T[np.random.permutation(self.resampled_signal_size)] + explained_signal).squeeze()<EOL><DEDENT>self.bootstrap_betas, bs_residuals, rank, s = LA.lstsq(self.design_matrix.T, bootstrap_data)<EOL>self.bootstrap_betas_per_event_type = np.zeros((len(self.covariates), self.deconvolution_interval_size, nr_repetitions))<EOL>for i, covariate in enumerate(list(self.covariates.keys())):<EOL><INDENT>this_covariate_index = list(self.covariates.keys()).index(covariate)<EOL>self.bootstrap_betas_per_event_type[i] = self.bootstrap_betas[this_covariate_index*self.deconvolution_interval_size:(this_covariate_index+<NUM_LIT:1>)*self.deconvolution_interval_size]<EOL><DEDENT>
bootstrap_on_residuals bootstraps, by shuffling the residuals. bootstrap_on_residuals should only be used on single-channel data, as otherwise the memory load might increase too much. This uses the lstsq backend regression for a single-pass fit across repetitions. Please note that shuffling the residuals may change the autocorrelation of the bootstrap samples relative to that of the original data and that may reduce its validity. Reference: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Resampling_residuals :param nr_repetitions: number of repetitions for the bootstrap. :type nr_repetitions: int
f10378:c0:m10
def infomax(data, weights=None, l_rate=None, block=None, w_change=<NUM_LIT>,<EOL>anneal_deg=<NUM_LIT>, anneal_step=<NUM_LIT>, extended=False, n_subgauss=<NUM_LIT:1>,<EOL>kurt_size=<NUM_LIT>, ext_blocks=<NUM_LIT:1>, max_iter=<NUM_LIT:200>,<EOL>random_state=None, verbose=None):
rng = check_random_state(random_state)<EOL>max_weight = <NUM_LIT><EOL>restart_fac = <NUM_LIT><EOL>min_l_rate = <NUM_LIT><EOL>blowup = <NUM_LIT><EOL>blowup_fac = <NUM_LIT:0.5><EOL>n_small_angle = <NUM_LIT:20><EOL>degconst = <NUM_LIT> / np.pi<EOL>extmomentum = <NUM_LIT:0.5><EOL>signsbias = <NUM_LIT><EOL>signcount_threshold = <NUM_LIT><EOL>signcount_step = <NUM_LIT:2><EOL>if ext_blocks > <NUM_LIT:0>: <EOL><INDENT>n_subgauss = <NUM_LIT:1> <EOL><DEDENT>n_samples, n_features = data.shape<EOL>n_features_square = n_features ** <NUM_LIT:2><EOL>if l_rate is None:<EOL><INDENT>l_rate = <NUM_LIT> / math.log(n_features ** <NUM_LIT>)<EOL><DEDENT>if block is None:<EOL><INDENT>block = int(math.floor(math.sqrt(n_samples / <NUM_LIT>)))<EOL><DEDENT>logger.info('<STR_LIT>' % '<STR_LIT>' if extended is True<EOL>else '<STR_LIT:U+0020>')<EOL>nblock = n_samples // block<EOL>lastt = (nblock - <NUM_LIT:1>) * block + <NUM_LIT:1><EOL>if weights is None:<EOL><INDENT>weights = np.identity(n_features, dtype=np.float64)<EOL><DEDENT>BI = block * np.identity(n_features, dtype=np.float64)<EOL>bias = np.zeros((n_features, <NUM_LIT:1>), dtype=np.float64)<EOL>onesrow = np.ones((<NUM_LIT:1>, block), dtype=np.float64)<EOL>startweights = weights.copy()<EOL>oldweights = startweights.copy()<EOL>step = <NUM_LIT:0><EOL>count_small_angle = <NUM_LIT:0><EOL>wts_blowup = False<EOL>blockno = <NUM_LIT:0><EOL>signcount = <NUM_LIT:0><EOL>if extended is True:<EOL><INDENT>signs = np.identity(n_features)<EOL>signs.flat[slice(<NUM_LIT:0>, n_features * n_subgauss, n_features)]<EOL>kurt_size = min(kurt_size, n_samples)<EOL>old_kurt = np.zeros(n_features, dtype=np.float64)<EOL>oldsigns = np.zeros((n_features, n_features))<EOL><DEDENT>olddelta, oldchange = <NUM_LIT:1.>, <NUM_LIT:0.><EOL>while step < max_iter:<EOL><INDENT>permute = list(range(n_samples))<EOL>rng.shuffle(permute)<EOL>for t in range(<NUM_LIT:0>, lastt, block):<EOL><INDENT>u = np.dot(data[permute[t:t + block], :], weights)<EOL>u += np.dot(bias, onesrow).T<EOL>if extended is True:<EOL><INDENT>y = np.tanh(u)<EOL>weights += l_rate * np.dot(weights,<EOL>BI - np.dot(np.dot(u.T, y), signs) -<EOL>np.dot(u.T, u))<EOL>bias += l_rate * np.reshape(np.sum(y, axis=<NUM_LIT:0>,<EOL>dtype=np.float64) * -<NUM_LIT>,<EOL>(n_features, <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>y = <NUM_LIT:1.0> / (<NUM_LIT:1.0> + np.exp(-u))<EOL>weights += l_rate * np.dot(weights,<EOL>BI + np.dot(u.T, (<NUM_LIT:1.0> - <NUM_LIT> * y)))<EOL>bias += l_rate * np.reshape(np.sum((<NUM_LIT:1.0> - <NUM_LIT> * y), axis=<NUM_LIT:0>,<EOL>dtype=np.float64), (n_features, <NUM_LIT:1>))<EOL><DEDENT>max_weight_val = np.max(np.abs(weights))<EOL>if max_weight_val > max_weight:<EOL><INDENT>wts_blowup = True<EOL><DEDENT>blockno += <NUM_LIT:1><EOL>if wts_blowup:<EOL><INDENT>break<EOL><DEDENT>if extended is True:<EOL><INDENT>n = np.fix(blockno / ext_blocks)<EOL>if np.abs(n) * ext_blocks == blockno:<EOL><INDENT>if kurt_size < n_samples:<EOL><INDENT>rp = np.floor(rng.uniform(<NUM_LIT:0>, <NUM_LIT:1>, kurt_size) *<EOL>(n_samples - <NUM_LIT:1>))<EOL>tpartact = np.dot(data[rp.astype(int), :], weights).T<EOL><DEDENT>else:<EOL><INDENT>tpartact = np.dot(data, weights).T<EOL><DEDENT>kurt = kurtosis(tpartact, axis=<NUM_LIT:1>, fisher=True)<EOL>if extmomentum != <NUM_LIT:0>:<EOL><INDENT>kurt = (extmomentum * old_kurt +<EOL>(<NUM_LIT:1.0> - extmomentum) * kurt)<EOL>old_kurt = kurt<EOL><DEDENT>signs.flat[::n_features + <NUM_LIT:1>] = ((kurt + signsbias) /<EOL>np.abs(kurt + signsbias))<EOL>ndiff = ((signs.flat[::n_features + <NUM_LIT:1>] -<EOL>oldsigns.flat[::n_features + <NUM_LIT:1>]) != <NUM_LIT:0>).sum()<EOL>if ndiff == <NUM_LIT:0>:<EOL><INDENT>signcount += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>signcount = <NUM_LIT:0><EOL><DEDENT>oldsigns = signs<EOL>if signcount >= signcount_threshold:<EOL><INDENT>ext_blocks = np.fix(ext_blocks * signcount_step)<EOL>signcount = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not wts_blowup:<EOL><INDENT>oldwtchange = weights - oldweights<EOL>step += <NUM_LIT:1><EOL>angledelta = <NUM_LIT:0.0><EOL>delta = oldwtchange.reshape(<NUM_LIT:1>, n_features_square)<EOL>change = np.sum(delta * delta, dtype=np.float64)<EOL>if step > <NUM_LIT:1>:<EOL><INDENT>angledelta = math.acos(np.sum(delta * olddelta) /<EOL>math.sqrt(change * oldchange))<EOL>angledelta *= degconst<EOL><DEDENT>oldweights = weights.copy()<EOL>if angledelta > anneal_deg:<EOL><INDENT>l_rate *= anneal_step <EOL>olddelta = delta<EOL>oldchange = change<EOL>count_small_angle = <NUM_LIT:0> <EOL><DEDENT>else:<EOL><INDENT>if step == <NUM_LIT:1>: <EOL><INDENT>olddelta = delta <EOL>oldchange = change<EOL><DEDENT>count_small_angle += <NUM_LIT:1><EOL>if count_small_angle > n_small_angle:<EOL><INDENT>max_iter = step<EOL><DEDENT><DEDENT>if step > <NUM_LIT:2> and change < w_change:<EOL><INDENT>step = max_iter<EOL><DEDENT>elif change > blowup:<EOL><INDENT>l_rate *= blowup_fac<EOL><DEDENT><DEDENT>else:<EOL><INDENT>step = <NUM_LIT:0> <EOL>wts_blowup = <NUM_LIT:0> <EOL>blockno = <NUM_LIT:1><EOL>l_rate *= restart_fac <EOL>weights = startweights.copy()<EOL>oldweights = startweights.copy()<EOL>olddelta = np.zeros((<NUM_LIT:1>, n_features_square), dtype=np.float64)<EOL>bias = np.zeros((n_features, <NUM_LIT:1>), dtype=np.float64)<EOL>if extended:<EOL><INDENT>signs = np.identity(n_features)<EOL>signs.flat[slice(<NUM_LIT:0>, n_features * n_subgauss, n_features)]<EOL>oldsigns = np.zeros((n_features, n_features))<EOL><DEDENT>if l_rate > min_l_rate:<EOL><INDENT>if verbose:<EOL><INDENT>logger.info('<STR_LIT>'<EOL>'<STR_LIT>' % l_rate)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return weights.T<EOL>
Run the (extended) Infomax ICA decomposition on raw data based on the publications of Bell & Sejnowski 1995 (Infomax) and Lee, Girolami & Sejnowski, 1999 (extended Infomax) Parameters ---------- data : np.ndarray, shape (n_samples, n_features) The data to unmix. w_init : np.ndarray, shape (n_features, n_features) The initialized unmixing matrix. Defaults to None. If None, the identity matrix is used. l_rate : float This quantity indicates the relative size of the change in weights. Note. Smaller learining rates will slow down the procedure. Defaults to 0.010d / alog(n_features ^ 2.0) block : int The block size of randomly chosen data segment. Defaults to floor(sqrt(n_times / 3d)) w_change : float The change at which to stop iteration. Defaults to 1e-12. anneal_deg : float The angle at which (in degree) the learning rate will be reduced. Defaults to 60.0 anneal_step : float The factor by which the learning rate will be reduced once ``anneal_deg`` is exceeded: l_rate *= anneal_step Defaults to 0.9 extended : bool Wheather to use the extended infomax algorithm or not. Defaults to True. n_subgauss : int The number of subgaussian components. Only considered for extended Infomax. kurt_size : int The window size for kurtosis estimation. Only considered for extended Infomax. ext_blocks : int The number of blocks after which to recompute Kurtosis. Only considered for extended Infomax. max_iter : int The maximum number of iterations. Defaults to 200. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- unmixing_matrix : np.ndarray of float, shape (n_features, n_features) The linear unmixing operator.
f10381:m0
def loadmat(filename):
data = sploadmat(filename, struct_as_record=False, squeeze_me=True)<EOL>return _check_keys(data)<EOL>
This function should be called instead of direct spio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects
f10384:m0
def _check_keys(dictionary):
for key in dictionary:<EOL><INDENT>if isinstance(dictionary[key], matlab.mio5_params.mat_struct):<EOL><INDENT>dictionary[key] = _todict(dictionary[key])<EOL><DEDENT><DEDENT>return dictionary<EOL>
checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries
f10384:m1
def _todict(matobj):
dictionary = {}<EOL>for strg in matobj._fieldnames:<EOL><INDENT>elem = matobj.__dict__[strg]<EOL>if isinstance(elem, matlab.mio5_params.mat_struct):<EOL><INDENT>dictionary[strg] = _todict(elem)<EOL><DEDENT>else:<EOL><INDENT>dictionary[strg] = elem<EOL><DEDENT><DEDENT>return dictionary<EOL>
a recursive function which constructs from matobjects nested dictionaries
f10384:m2
def generate_covsig(covmat, n):
global randn_index<EOL>global randn<EOL>covmat = np.atleast_2d(covmat)<EOL>m = covmat.shape[<NUM_LIT:0>]<EOL>l = np.linalg.cholesky(covmat)<EOL>x = []<EOL>while len(x) < n * m:<EOL><INDENT>to_go = min(randn_index + n * m - len(x), len(randn))<EOL>x.extend(randn[randn_index:to_go])<EOL>randn_index = to_go % len(randn)<EOL><DEDENT>x = np.reshape(x, (m, n))<EOL>d = np.linalg.inv(np.linalg.cholesky(np.atleast_2d(np.cov(x))))<EOL>x = l.dot(d).dot(x)<EOL>return x<EOL>
generate pseudorandom stochastic signals with covariance matrix covmat
f10391:m0
def singletrial(num_trials, skipstep=<NUM_LIT:1>):
for t in range(<NUM_LIT:0>, num_trials, skipstep):<EOL><INDENT>trainset = [t]<EOL>testset = [i for i in range(trainset[<NUM_LIT:0>])] +[i for i in range(trainset[-<NUM_LIT:1>] + <NUM_LIT:1>, num_trials)]<EOL>testset = sort([t % num_trials for t in testset])<EOL>yield trainset, testset<EOL><DEDENT>
Single-trial cross-validation schema Use one trial for training, all others for testing. Parameters ---------- num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for training Returns ------- gen : generator object the generator returns tuples (trainset, testset)
f10406:m0
def multitrial(num_trials, skipstep=<NUM_LIT:1>):
for t in range(<NUM_LIT:0>, num_trials, skipstep):<EOL><INDENT>testset = [t]<EOL>trainset = [i for i in range(testset[<NUM_LIT:0>])] +[i for i in range(testset[-<NUM_LIT:1>] + <NUM_LIT:1>, num_trials)]<EOL>trainset = sort([t % num_trials for t in trainset])<EOL>yield trainset, testset<EOL><DEDENT>
Multi-trial cross-validation schema Use one trial for testing, all others for training. Parameters ---------- num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for testing Returns ------- gen : generator object the generator returns tuples (trainset, testset)
f10406:m1
def splitset(num_trials, skipstep=None):
split = num_trials // <NUM_LIT:2><EOL>a = list(range(<NUM_LIT:0>, split))<EOL>b = list(range(split, num_trials))<EOL>yield a, b<EOL>yield b, a<EOL>
Split-set cross validation Use half the trials for training, and the other half for testing. Then repeat the other way round. Parameters ---------- num_trials : int Total number of trials skipstep : int unused Returns ------- gen : generator object the generator returns tuples (trainset, testset)
f10406:m2
def make_nfold(n):
return partial(_nfold, n=n)<EOL>
n-fold cross validation Use each of n blocks for testing once. Parameters ---------- n : int number of blocks Returns ------- gengen : func a function that returns the generator
f10406:m3
def plainica(x, reducedim=<NUM_LIT>, backend=None, random_state=None):
x = atleast_3d(x)<EOL>t, m, l = np.shape(x)<EOL>if backend is None:<EOL><INDENT>backend = scotbackend<EOL><DEDENT>if reducedim == '<STR_LIT>':<EOL><INDENT>c = np.eye(m)<EOL>d = np.eye(m)<EOL>xpca = x<EOL><DEDENT>else:<EOL><INDENT>c, d, xpca = backend['<STR_LIT>'](x, reducedim)<EOL><DEDENT>mx, ux = backend['<STR_LIT>'](cat_trials(xpca), random_state=random_state)<EOL>mx = mx.dot(d)<EOL>ux = c.dot(ux)<EOL>class Result:<EOL><INDENT>unmixing = ux<EOL>mixing = mx<EOL><DEDENT>return Result<EOL>
Source decomposition with ICA. Apply ICA to the data x, with optional PCA dimensionality reduction. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples) data set reducedim : {int, float, 'no_pca'}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. If set to 'no_pca' the PCA step is skipped. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. Returns ------- result : ResultICA Source decomposition
f10407:m0
def parallel_loop(func, n_jobs=<NUM_LIT:1>, verbose=<NUM_LIT:1>):
if n_jobs:<EOL><INDENT>try:<EOL><INDENT>from joblib import Parallel, delayed<EOL><DEDENT>except ImportError:<EOL><INDENT>try:<EOL><INDENT>from sklearn.externals.joblib import Parallel, delayed<EOL><DEDENT>except ImportError:<EOL><INDENT>n_jobs = None<EOL><DEDENT><DEDENT><DEDENT>if not n_jobs:<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>', func, '<STR_LIT>')<EOL><DEDENT>par = lambda x: list(x)<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>', func, '<STR_LIT>')<EOL><DEDENT>func = delayed(func)<EOL>par = Parallel(n_jobs=n_jobs, verbose=verbose)<EOL><DEDENT>return par, func<EOL>
run loops in parallel, if joblib is available. Parameters ---------- func : function function to be executed in parallel n_jobs : int | None Number of jobs. If set to None, do not attempt to use joblib. verbose : int verbosity level Notes ----- Execution of the main script must be guarded with `if __name__ == '__main__':` when using parallelization.
f10409:m0
def prepare_topoplots(topo, values):
values = np.atleast_2d(values)<EOL>topomaps = []<EOL>for i in range(values.shape[<NUM_LIT:0>]):<EOL><INDENT>topo.set_values(values[i, :])<EOL>topo.create_map()<EOL>topomaps.append(topo.get_map())<EOL><DEDENT>return topomaps<EOL>
Prepare multiple topo maps for cached plotting. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` Scalp maps are created with this class values : array, shape = [n_topos, n_channels] Channel values for each topo plot Returns ------- topomaps : list of array The map for each topo plot
f10410:m4
def plot_topo(axis, topo, topomap, crange=None, offset=(<NUM_LIT:0>,<NUM_LIT:0>),<EOL>plot_locations=True, plot_head=True):
topo.set_map(topomap)<EOL>h = topo.plot_map(axis, crange=crange, offset=offset)<EOL>if plot_locations:<EOL><INDENT>topo.plot_locations(axis, offset=offset)<EOL><DEDENT>if plot_head:<EOL><INDENT>topo.plot_head(axis, offset=offset)<EOL><DEDENT>return h<EOL>
Draw a topoplot in given axis. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- axis : axis Axis to draw into. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomap : array, shape = [w_pixels, h_pixels] Scalp-projected data crange : [int, int], optional Range of values covered by the colormap. If set to None, [-max(abs(topomap)), max(abs(topomap))] is substituted. offset : [float, float], optional Shift the topo plot by [x,y] in axis units. plot_locations : bool, optional Plot electrode locations. plot_head : bool, optional Plot head cartoon. Returns ------- h : image Image object the map was plotted into
f10410:m5
def plot_sources(topo, mixmaps, unmixmaps, global_scale=None, fig=None):
urange, mrange = None, None<EOL>m = len(mixmaps)<EOL>if global_scale:<EOL><INDENT>tmp = np.asarray(unmixmaps)<EOL>tmp = tmp[np.logical_not(np.isnan(tmp))]<EOL>umax = np.percentile(np.abs(tmp), global_scale)<EOL>umin = -umax<EOL>urange = [umin, umax]<EOL>tmp = np.asarray(mixmaps)<EOL>tmp = tmp[np.logical_not(np.isnan(tmp))]<EOL>mmax = np.percentile(np.abs(tmp), global_scale)<EOL>mmin = -mmax<EOL>mrange = [mmin, mmax]<EOL><DEDENT>y = np.floor(np.sqrt(m * <NUM_LIT:3> / <NUM_LIT:4>))<EOL>x = np.ceil(m / y)<EOL>if fig is None:<EOL><INDENT>fig = new_figure()<EOL><DEDENT>axes = []<EOL>for i in range(m):<EOL><INDENT>axes.append(fig.add_subplot(<NUM_LIT:2> * y, x, i + <NUM_LIT:1>))<EOL>plot_topo(axes[-<NUM_LIT:1>], topo, unmixmaps[i], crange=urange)<EOL>axes[-<NUM_LIT:1>].set_title(str(i))<EOL>axes.append(fig.add_subplot(<NUM_LIT:2> * y, x, m + i + <NUM_LIT:1>))<EOL>plot_topo(axes[-<NUM_LIT:1>], topo, mixmaps[i], crange=mrange)<EOL>axes[-<NUM_LIT:1>].set_title(str(i))<EOL><DEDENT>for a in axes:<EOL><INDENT>a.set_yticks([])<EOL>a.set_xticks([])<EOL>a.set_frame_on(False)<EOL><DEDENT>axes[<NUM_LIT:0>].set_ylabel('<STR_LIT>')<EOL>axes[<NUM_LIT:1>].set_ylabel('<STR_LIT>')<EOL>return fig<EOL>
Plot all scalp projections of mixing- and unmixing-maps. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot mixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected mixing matrix unmixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected unmixing matrix global_scale : float, optional Set common color scale as given percentile of all map values to use as the maximum. `None` scales each plot individually (default). fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted.
f10410:m6
def plot_connectivity_topos(layout='<STR_LIT>', topo=None, topomaps=None, fig=None):
m = len(topomaps)<EOL>if fig is None:<EOL><INDENT>fig = new_figure()<EOL><DEDENT>if layout == '<STR_LIT>':<EOL><INDENT>for i in range(m):<EOL><INDENT>ax = fig.add_subplot(m, m, i*(<NUM_LIT:1>+m) + <NUM_LIT:1>)<EOL>plot_topo(ax, topo, topomaps[i])<EOL>ax.set_yticks([])<EOL>ax.set_xticks([])<EOL>ax.set_frame_on(False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(m):<EOL><INDENT>for j in [i+<NUM_LIT:2>, (i+<NUM_LIT:1>)*(m+<NUM_LIT:1>)+<NUM_LIT:1>]:<EOL><INDENT>ax = fig.add_subplot(m+<NUM_LIT:1>, m+<NUM_LIT:1>, j)<EOL>plot_topo(ax, topo, topomaps[i])<EOL>ax.set_yticks([])<EOL>ax.set_xticks([])<EOL>ax.set_frame_on(False)<EOL><DEDENT><DEDENT><DEDENT>return fig<EOL>
Place topo plots in a figure suitable for connectivity visualization. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- layout : str 'diagonal' -> place topo plots on diagonal. otherwise -> place topo plots in left column and top row. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomaps : array, shape = [w_pixels, h_pixels] Scalp-projected map fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted.
f10410:m7
def plot_connectivity_spectrum(a, fs=<NUM_LIT:2>, freq_range=(-np.inf, np.inf), diagonal=<NUM_LIT:0>, border=False, fig=None):
a = np.atleast_3d(a)<EOL>if a.ndim == <NUM_LIT:3>:<EOL><INDENT>[_, m, f] = a.shape<EOL>l = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>[l, _, m, f] = a.shape<EOL><DEDENT>freq = np.linspace(<NUM_LIT:0>, fs / <NUM_LIT:2>, f)<EOL>lowest, highest = np.inf, <NUM_LIT:0><EOL>left = max(freq_range[<NUM_LIT:0>], freq[<NUM_LIT:0>])<EOL>right = min(freq_range[<NUM_LIT:1>], freq[-<NUM_LIT:1>])<EOL>if fig is None:<EOL><INDENT>fig = new_figure()<EOL><DEDENT>axes = []<EOL>for i in range(m):<EOL><INDENT>if diagonal == <NUM_LIT:1>:<EOL><INDENT>jrange = [i]<EOL><DEDENT>elif diagonal == <NUM_LIT:0>:<EOL><INDENT>jrange = range(m)<EOL><DEDENT>else:<EOL><INDENT>jrange = [j for j in range(m) if j != i]<EOL><DEDENT>for j in jrange:<EOL><INDENT>if border:<EOL><INDENT>ax = fig.add_subplot(m+<NUM_LIT:1>, m+<NUM_LIT:1>, j + (i+<NUM_LIT:1>) * (m+<NUM_LIT:1>) + <NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>ax = fig.add_subplot(m, m, j + i * m + <NUM_LIT:1>)<EOL><DEDENT>axes.append((i, j, ax))<EOL>if l == <NUM_LIT:0>:<EOL><INDENT>ax.plot(freq, a[i, j, :])<EOL>lowest = min(lowest, np.min(a[i, j, :]))<EOL>highest = max(highest, np.max(a[i, j, :]))<EOL><DEDENT>elif l == <NUM_LIT:1>:<EOL><INDENT>ax.fill_between(freq, <NUM_LIT:0>, a[<NUM_LIT:0>, i, j, :], facecolor=[<NUM_LIT>, <NUM_LIT>, <NUM_LIT>], alpha=<NUM_LIT>)<EOL>lowest = min(lowest, np.min(a[<NUM_LIT:0>, i, j, :]))<EOL>highest = max(highest, np.max(a[<NUM_LIT:0>, i, j, :]))<EOL><DEDENT>else:<EOL><INDENT>baseline, = ax.plot(freq, a[<NUM_LIT:0>, i, j, :])<EOL>ax.fill_between(freq, a[<NUM_LIT:1>, i, j, :], a[<NUM_LIT:2>, i, j, :], facecolor=baseline.get_color(), alpha=<NUM_LIT>)<EOL>lowest = min(lowest, np.min(a[:, i, j, :]))<EOL>highest = max(highest, np.max(a[:, i, j, :]))<EOL><DEDENT><DEDENT><DEDENT>for i, j, ax in axes:<EOL><INDENT>ax.xaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:4>, <NUM_LIT:10> - m)))<EOL>ax.yaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:4>, <NUM_LIT:10> - m)))<EOL>ax.set_ylim(<NUM_LIT:0>, highest)<EOL>ax.set_xlim(left, right)<EOL>if <NUM_LIT:0> < i < m - <NUM_LIT:1>:<EOL><INDENT>ax.set_xticklabels([])<EOL><DEDENT>ax.set_yticklabels([])<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>ax.xaxis.set_tick_params(labeltop="<STR_LIT>", labelbottom="<STR_LIT>")<EOL><DEDENT>if j == m-<NUM_LIT:1>:<EOL><INDENT>ax.yaxis.set_tick_params(labelright="<STR_LIT>", labelleft="<STR_LIT>")<EOL><DEDENT>ax.tick_params(labelsize=<NUM_LIT:10>)<EOL><DEDENT>_plot_labels(fig, {'<STR_LIT:x>': <NUM_LIT:0.5>, '<STR_LIT:y>': <NUM_LIT>, '<STR_LIT:s>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'})<EOL>return fig<EOL>
Draw connectivity plots. Parameters ---------- a : array, shape (n_channels, n_channels, n_fft) or (1 or 3, n_channels, n_channels, n_fft) If a.ndim == 3, normal plots are created, If a.ndim == 4 and a.shape[0] == 1, the area between the curve and y=0 is filled transparently, If a.ndim == 4 and a.shape[0] == 3, a[0,:,:,:] is plotted normally and the area between a[1,:,:,:] and a[2,:,:,:] is filled transparently. fs : float Sampling frequency freq_range : (float, float) Frequency range to plot diagonal : {-1, 0, 1} If diagonal == -1 nothing is plotted on the diagonal (a[i,i,:] are not plotted), if diagonal == 0, a is plotted on the diagonal too (all a[i,i,:] are plotted), if diagonal == 1, a is plotted on the diagonal only (only a[i,i,:] are plotted) border : bool If border == true the leftmost column and the topmost row are left blank fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted.
f10410:m8
def plot_connectivity_significance(s, fs=<NUM_LIT:2>, freq_range=(-np.inf, np.inf), diagonal=<NUM_LIT:0>, border=False, fig=None):
a = np.atleast_3d(s)<EOL>[_, m, f] = a.shape<EOL>freq = np.linspace(<NUM_LIT:0>, fs / <NUM_LIT:2>, f)<EOL>left = max(freq_range[<NUM_LIT:0>], freq[<NUM_LIT:0>])<EOL>right = min(freq_range[<NUM_LIT:1>], freq[-<NUM_LIT:1>])<EOL>imext = (freq[<NUM_LIT:0>], freq[-<NUM_LIT:1>], -<NUM_LIT>, <NUM_LIT>)<EOL>if fig is None:<EOL><INDENT>fig = new_figure()<EOL><DEDENT>axes = []<EOL>for i in range(m):<EOL><INDENT>if diagonal == <NUM_LIT:1>:<EOL><INDENT>jrange = [i]<EOL><DEDENT>elif diagonal == <NUM_LIT:0>:<EOL><INDENT>jrange = range(m)<EOL><DEDENT>else:<EOL><INDENT>jrange = [j for j in range(m) if j != i]<EOL><DEDENT>for j in jrange:<EOL><INDENT>if border:<EOL><INDENT>ax = fig.add_subplot(m+<NUM_LIT:1>, m+<NUM_LIT:1>, j + (i+<NUM_LIT:1>) * (m+<NUM_LIT:1>) + <NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>ax = fig.add_subplot(m, m, j + i * m + <NUM_LIT:1>)<EOL><DEDENT>axes.append((i, j, ax))<EOL>ax.imshow(s[i, j, np.newaxis], vmin=<NUM_LIT:0>, vmax=<NUM_LIT:2>, cmap='<STR_LIT>', aspect='<STR_LIT>', extent=imext, zorder=-<NUM_LIT>)<EOL>ax.xaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:1>, <NUM_LIT:7> - m)))<EOL>ax.yaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:1>, <NUM_LIT:7> - m)))<EOL>ax.set_xlim(left, right)<EOL>if <NUM_LIT:0> < i < m - <NUM_LIT:1>:<EOL><INDENT>ax.set_xticks([])<EOL><DEDENT>if <NUM_LIT:0> < j < m - <NUM_LIT:1>:<EOL><INDENT>ax.set_yticks([])<EOL><DEDENT>if j == <NUM_LIT:0>:<EOL><INDENT>ax.yaxis.tick_left()<EOL><DEDENT>if j == m-<NUM_LIT:1>:<EOL><INDENT>ax.yaxis.tick_right()<EOL><DEDENT><DEDENT><DEDENT>_plot_labels(fig,<EOL>{'<STR_LIT:x>': <NUM_LIT:0.5>, '<STR_LIT:y>': <NUM_LIT>, '<STR_LIT:s>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'},<EOL>{'<STR_LIT:x>': <NUM_LIT>, '<STR_LIT:y>': <NUM_LIT:0.5>, '<STR_LIT:s>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'})<EOL>return fig<EOL>
Plot significance. Significance is drawn as a background image where dark vertical stripes indicate freuquencies where a evaluates to True. Parameters ---------- a : array, shape (n_channels, n_channels, n_fft), dtype bool Significance fs : float Sampling frequency freq_range : (float, float) Frequency range to plot diagonal : {-1, 0, 1} If diagonal == -1 nothing is plotted on the diagonal (a[i,i,:] are not plotted), if diagonal == 0, a is plotted on the diagonal too (all a[i,i,:] are plotted), if diagonal == 1, a is plotted on the diagonal only (only a[i,i,:] are plotted) border : bool If border == true the leftmost column and the topmost row are left blank fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted.
f10410:m9
def plot_connectivity_timespectrum(a, fs=<NUM_LIT:2>, crange=None, freq_range=(-np.inf, np.inf), time_range=None, diagonal=<NUM_LIT:0>, border=False, fig=None):
a = np.asarray(a)<EOL>[_, m, _, t] = a.shape<EOL>if crange is None:<EOL><INDENT>crange = [np.min(a), np.max(a)]<EOL><DEDENT>if time_range is None:<EOL><INDENT>t0 = <NUM_LIT:0><EOL>t1 = t<EOL><DEDENT>else:<EOL><INDENT>t0, t1 = time_range<EOL><DEDENT>f0, f1 = fs / <NUM_LIT:2>, <NUM_LIT:0><EOL>extent = [t0, t1, f0, f1]<EOL>ymin = max(freq_range[<NUM_LIT:0>], f1)<EOL>ymax = min(freq_range[<NUM_LIT:1>], f0)<EOL>if fig is None:<EOL><INDENT>fig = new_figure()<EOL><DEDENT>axes = []<EOL>for i in range(m):<EOL><INDENT>if diagonal == <NUM_LIT:1>:<EOL><INDENT>jrange = [i]<EOL><DEDENT>elif diagonal == <NUM_LIT:0>:<EOL><INDENT>jrange = range(m)<EOL><DEDENT>else:<EOL><INDENT>jrange = [j for j in range(m) if j != i]<EOL><DEDENT>for j in jrange:<EOL><INDENT>if border:<EOL><INDENT>ax = fig.add_subplot(m+<NUM_LIT:1>, m+<NUM_LIT:1>, j + (i+<NUM_LIT:1>) * (m+<NUM_LIT:1>) + <NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>ax = fig.add_subplot(m, m, j + i * m + <NUM_LIT:1>)<EOL><DEDENT>axes.append(ax)<EOL>ax.imshow(a[i, j, :, :], vmin=crange[<NUM_LIT:0>], vmax=crange[<NUM_LIT:1>], aspect='<STR_LIT>', extent=extent)<EOL>ax.invert_yaxis()<EOL>ax.xaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:1>, <NUM_LIT:9> - m)))<EOL>ax.yaxis.set_major_locator(MaxNLocator(max(<NUM_LIT:1>, <NUM_LIT:7> - m)))<EOL>ax.set_ylim(ymin, ymax)<EOL>if <NUM_LIT:0> < i < m - <NUM_LIT:1>:<EOL><INDENT>ax.set_xticks([])<EOL><DEDENT>if <NUM_LIT:0> < j < m - <NUM_LIT:1>:<EOL><INDENT>ax.set_yticks([])<EOL><DEDENT>if i == <NUM_LIT:0>:<EOL><INDENT>ax.xaxis.tick_top()<EOL><DEDENT>if i == m-<NUM_LIT:1>:<EOL><INDENT>ax.xaxis.tick_bottom()<EOL><DEDENT>if j == <NUM_LIT:0>:<EOL><INDENT>ax.yaxis.tick_left()<EOL><DEDENT>if j == m-<NUM_LIT:1>:<EOL><INDENT>ax.yaxis.tick_right()<EOL><DEDENT><DEDENT><DEDENT>_plot_labels(fig,<EOL>{'<STR_LIT:x>': <NUM_LIT:0.5>, '<STR_LIT:y>': <NUM_LIT>, '<STR_LIT:s>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'},<EOL>{'<STR_LIT:x>': <NUM_LIT>, '<STR_LIT:y>': <NUM_LIT:0.5>, '<STR_LIT:s>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'})<EOL>return fig<EOL>
Draw time/frequency connectivity plots. Parameters ---------- a : array, shape (n_channels, n_channels, n_fft, n_timesteps) Values to draw fs : float Sampling frequency crange : [int, int], optional Range of values covered by the colormap. If set to None, [min(a), max(a)] is substituted. freq_range : (float, float) Frequency range to plot time_range : (float, float) Time range covered by `a` diagonal : {-1, 0, 1} If diagonal == -1 nothing is plotted on the diagonal (a[i,i,:] are not plotted), if diagonal == 0, a is plotted on the diagonal too (all a[i,i,:] are plotted), if diagonal == 1, a is plotted on the diagonal only (only a[i,i,:] are plotted) border : bool If border == true the leftmost column and the topmost row are left blank fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted.
f10410:m10
def plot_circular(widths, colors, curviness=<NUM_LIT>, mask=True, topo=None, topomaps=None, axes=None, order=None):
colors = np.asarray(colors)<EOL>widths = np.asarray(widths)<EOL>mask = np.asarray(mask)<EOL>colors = np.maximum(colors, <NUM_LIT:0>)<EOL>colors = np.minimum(colors, <NUM_LIT:1>)<EOL>if len(widths.shape) > <NUM_LIT:2>:<EOL><INDENT>[n, m] = widths.shape<EOL><DEDENT>elif len(colors.shape) > <NUM_LIT:3>:<EOL><INDENT>[n, m, c] = widths.shape<EOL><DEDENT>elif len(mask.shape) > <NUM_LIT:2>:<EOL><INDENT>[n, m] = mask.shape<EOL><DEDENT>else:<EOL><INDENT>n = len(topomaps)<EOL>m = n<EOL><DEDENT>if not order:<EOL><INDENT>order = list(range(n))<EOL><DEDENT>assert(n == m)<EOL>if axes is None:<EOL><INDENT>fig = new_figure()<EOL>axes = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>axes.set_yticks([])<EOL>axes.set_xticks([])<EOL>axes.set_frame_on(False)<EOL>if len(colors.shape) < <NUM_LIT:3>:<EOL><INDENT>colors = np.tile(colors, (n,n,<NUM_LIT:1>))<EOL><DEDENT>if len(widths.shape) < <NUM_LIT:2>:<EOL><INDENT>widths = np.tile(widths, (n,n))<EOL><DEDENT>if len(mask.shape) < <NUM_LIT:2>:<EOL><INDENT>mask = np.tile(mask, (n,n))<EOL><DEDENT>np.fill_diagonal(mask, False)<EOL>if topo:<EOL><INDENT>alpha = <NUM_LIT> if n < <NUM_LIT:10> else <NUM_LIT><EOL>r = alpha * topo.head_radius / (np.sin(np.pi/n))<EOL><DEDENT>else:<EOL><INDENT>r = <NUM_LIT:1><EOL><DEDENT>for i in range(n):<EOL><INDENT>if topo:<EOL><INDENT>o = (r*np.sin(i*<NUM_LIT:2>*np.pi/n), r*np.cos(i*<NUM_LIT:2>*np.pi/n))<EOL>plot_topo(axes, topo, topomaps[order[i]], offset=o)<EOL><DEDENT><DEDENT>for i in range(n):<EOL><INDENT>for j in range(n):<EOL><INDENT>if not mask[order[i], order[j]]:<EOL><INDENT>continue<EOL><DEDENT>a0 = j*<NUM_LIT:2>*np.pi/n<EOL>a1 = i*<NUM_LIT:2>*np.pi/n<EOL>x0, y0 = r*np.sin(a0), r*np.cos(a0)<EOL>x1, y1 = r*np.sin(a1), r*np.cos(a1)<EOL>ex = (x0 + x1) / <NUM_LIT:2><EOL>ey = (y0 + y1) / <NUM_LIT:2><EOL>en = np.sqrt(ex**<NUM_LIT:2> + ey**<NUM_LIT:2>)<EOL>if en < <NUM_LIT>:<EOL><INDENT>en = <NUM_LIT:0><EOL>ex = y0 / r<EOL>ey = -x0 / r<EOL>w = -r<EOL><DEDENT>else:<EOL><INDENT>ex /= en<EOL>ey /= en<EOL>w = np.sqrt((x1-x0)**<NUM_LIT:2> + (y1-y0)**<NUM_LIT:2>) / <NUM_LIT:2><EOL>if x0*y1-y0*x1 < <NUM_LIT:0>:<EOL><INDENT>w = -w<EOL><DEDENT><DEDENT>d = en*(<NUM_LIT:1>-curviness)<EOL>h = en-d<EOL>t = np.linspace(-<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:100>)<EOL>dist = (t**<NUM_LIT:2>+<NUM_LIT:2>*t+<NUM_LIT:1>)*w**<NUM_LIT:2> + (t**<NUM_LIT:4>-<NUM_LIT:2>*t**<NUM_LIT:2>+<NUM_LIT:1>)*h**<NUM_LIT:2><EOL>tmask1 = dist >= (<NUM_LIT>*topo.head_radius)**<NUM_LIT:2><EOL>tmask2 = dist >= (<NUM_LIT>*topo.head_radius)**<NUM_LIT:2><EOL>tmask = np.logical_and(tmask1, tmask2[::-<NUM_LIT:1>])<EOL>t = t[tmask]<EOL>x = (h*t*t+d)*ex - w*t*ey<EOL>y = (h*t*t+d)*ey + w*t*ex<EOL>s = np.sqrt((x[-<NUM_LIT:2>] - x[-<NUM_LIT:1>])**<NUM_LIT:2> + (y[-<NUM_LIT:2>] - y[-<NUM_LIT:1>])**<NUM_LIT:2>)<EOL>width = widths[order[i], order[j]]<EOL>x1 = <NUM_LIT:0.1>*width*(x[-<NUM_LIT:2>] - x[-<NUM_LIT:1>] + y[-<NUM_LIT:2>] - y[-<NUM_LIT:1>])/s + x[-<NUM_LIT:1>]<EOL>y1 = <NUM_LIT:0.1>*width*(y[-<NUM_LIT:2>] - y[-<NUM_LIT:1>] - x[-<NUM_LIT:2>] + x[-<NUM_LIT:1>])/s + y[-<NUM_LIT:1>]<EOL>x2 = <NUM_LIT:0.1>*width*(x[-<NUM_LIT:2>] - x[-<NUM_LIT:1>] - y[-<NUM_LIT:2>] + y[-<NUM_LIT:1>])/s + x[-<NUM_LIT:1>]<EOL>y2 = <NUM_LIT:0.1>*width*(y[-<NUM_LIT:2>] - y[-<NUM_LIT:1>] + x[-<NUM_LIT:2>] - x[-<NUM_LIT:1>])/s + y[-<NUM_LIT:1>]<EOL>x = np.concatenate([x, [x1, x[-<NUM_LIT:1>], x2]])<EOL>y = np.concatenate([y, [y1, y[-<NUM_LIT:1>], y2]])<EOL>axes.plot(x, y, lw=width, color=colors[order[i], order[j]], solid_capstyle='<STR_LIT>', solid_joinstyle='<STR_LIT>')<EOL><DEDENT><DEDENT>return axes<EOL>
Circluar connectivity plot. Topos are arranged in a circle, with arrows indicating connectivity Parameters ---------- widths : float or array, shape (n_channels, n_channels) Width of each arrow. Can be a scalar to assign the same width to all arrows. colors : array, shape (n_channels, n_channels, 3) or (3) RGB color values for each arrow or one RGB color value for all arrows. curviness : float, optional Factor that determines how much arrows tend to deviate from a straight line. mask : array, dtype = bool, shape (n_channels, n_channels) Enable or disable individual arrows topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomaps : array, shape = [w_pixels, h_pixels] Scalp-projected map axes : axis, optional Axis to draw into. A new figure is created by default. order : list of int Rearrange channels. Returns ------- axes : Axes object The axes into which was plotted.
f10410:m11
def plot_whiteness(var, h, repeats=<NUM_LIT:1000>, axis=None):
pr, q0, q = var.test_whiteness(h, repeats, True)<EOL>if axis is None:<EOL><INDENT>axis = current_axis()<EOL><DEDENT>pdf, _, _ = axis.hist(q0, <NUM_LIT:30>, normed=True, label='<STR_LIT>')<EOL>axis.plot([q,q], [<NUM_LIT:0>,np.max(pdf)], '<STR_LIT>', label='<STR_LIT>')<EOL>axis.set_title('<STR_LIT>'%pr)<EOL>axis.set_xlabel('<STR_LIT>')<EOL>axis.set_ylabel('<STR_LIT>')<EOL>axis.legend()<EOL>return pr<EOL>
Draw distribution of the Portmanteu whiteness test. Parameters ---------- var : :class:`~scot.var.VARBase`-like object Vector autoregressive model (VAR) object whose residuals are tested for whiteness. h : int Maximum lag to include in the test. repeats : int, optional Number of surrogate estimates to draw under the null hypothesis. axis : axis, optional Axis to draw into. By default draws into :func:`matplotlib.pyplot.gca()`. Returns ------- pr : float *p*-value of whiteness under the null hypothesis
f10410:m12
def _construct_var_eqns(data, p, delta=None):
t, m, l = np.shape(data)<EOL>n = (l - p) * t <EOL>rows = n if delta is None else n + m * p<EOL>x = np.zeros((rows, m * p))<EOL>for i in range(m):<EOL><INDENT>for k in range(<NUM_LIT:1>, p + <NUM_LIT:1>):<EOL><INDENT>x[:n, i * p + k - <NUM_LIT:1>] = np.reshape(data[:, i, p - k:-k].T, n)<EOL><DEDENT><DEDENT>if delta is not None:<EOL><INDENT>np.fill_diagonal(x[n:, :], delta)<EOL><DEDENT>y = np.zeros((rows, m))<EOL>for i in range(m):<EOL><INDENT>y[:n, i] = np.reshape(data[:, i, p:].T, n)<EOL><DEDENT>return x, y<EOL>
Construct VAR equation system (optionally with RLS constraint).
f10411:m0
def _calc_q_statistic(x, h, nt):
t, m, n = x.shape<EOL>c0 = acm(x, <NUM_LIT:0>)<EOL>c0f = sp.linalg.lu_factor(c0, overwrite_a=False, check_finite=True)<EOL>q = np.zeros((<NUM_LIT:3>, h + <NUM_LIT:1>))<EOL>for l in range(<NUM_LIT:1>, h + <NUM_LIT:1>):<EOL><INDENT>cl = acm(x, l)<EOL>a = sp.linalg.lu_solve(c0f, cl)<EOL>b = sp.linalg.lu_solve(c0f, cl.T)<EOL>tmp = a.dot(b).trace()<EOL>q[<NUM_LIT:0>, l] = tmp<EOL>q[<NUM_LIT:1>, l] = tmp / (nt - l)<EOL>q[<NUM_LIT:2>, l] = tmp<EOL><DEDENT>q *= nt<EOL>q[<NUM_LIT:1>, :] *= (nt + <NUM_LIT:2>)<EOL>q = np.cumsum(q, axis=<NUM_LIT:1>)<EOL>for l in range(<NUM_LIT:1>, h+<NUM_LIT:1>):<EOL><INDENT>q[<NUM_LIT:2>, l] = q[<NUM_LIT:0>, l] + m * m * l * (l + <NUM_LIT:1>) / (<NUM_LIT:2> * nt)<EOL><DEDENT>return q<EOL>
Calculate Portmanteau statistics up to a lag of h.
f10411:m2
def _calc_q_h0(n, x, h, nt, n_jobs=<NUM_LIT:1>, verbose=<NUM_LIT:0>, random_state=None):
rng = check_random_state(random_state)<EOL>par, func = parallel_loop(_calc_q_statistic, n_jobs, verbose)<EOL>q = par(func(rng.permutation(x.T).T, h, nt) for _ in range(n))<EOL>return np.array(q)<EOL>
Calculate q under the null hypothesis of whiteness.
f10411:m3
def copy(self):
other = self.__class__(self.p)<EOL>other.coef = self.coef.copy()<EOL>other.residuals = self.residuals.copy()<EOL>other.rescov = self.rescov.copy()<EOL>return other<EOL>
Create a copy of the VAR model.
f10411:c1:m1
def fit(self, data):
raise NotImplementedError('<STR_LIT>' +<EOL>str(self))<EOL>
Fit VAR model to data. .. warning:: This function must be implemented by derived classes. Parameters ---------- data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example).
f10411:c1:m2
def optimize(self, data):
raise NotImplementedError('<STR_LIT>' +<EOL>str(self))<EOL>
Optimize model fitting hyperparameters (e.g. regularization). .. warning:: This function must be implemented by derived classes. Parameters ---------- data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set.
f10411:c1:m3
def from_yw(self, acms):
if len(acms) != self.p + <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>".format(len(acms),<EOL>self.p))<EOL><DEDENT>n_channels = acms[<NUM_LIT:0>].shape[<NUM_LIT:0>]<EOL>acm = lambda l: acms[l] if l >= <NUM_LIT:0> else acms[-l].T<EOL>r = np.concatenate(acms[<NUM_LIT:1>:], <NUM_LIT:0>)<EOL>rr = np.array([[acm(m-k) for k in range(self.p)]<EOL>for m in range(self.p)])<EOL>rr = np.concatenate(np.concatenate(rr, -<NUM_LIT:2>), -<NUM_LIT:1>)<EOL>c = sp.linalg.solve(rr, r)<EOL>r = acm(<NUM_LIT:0>)<EOL>for k in range(self.p):<EOL><INDENT>bs = k * n_channels<EOL>r -= np.dot(c[bs:bs + n_channels, :].T, acm(k + <NUM_LIT:1>))<EOL><DEDENT>self.coef = np.concatenate([c[m::n_channels, :]<EOL>for m in range(n_channels)]).T<EOL>self.rescov = r<EOL>return self<EOL>
Determine VAR model from autocorrelation matrices by solving the Yule-Walker equations. Parameters ---------- acms : array, shape (n_lags, n_channels, n_channels) acms[l] contains the autocorrelation matrix at lag l. The highest lag must equal the model order. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example).
f10411:c1:m4
def simulate(self, l, noisefunc=None, random_state=None):
m, n = np.shape(self.coef)<EOL>p = n // m<EOL>try:<EOL><INDENT>l, t = l<EOL><DEDENT>except TypeError:<EOL><INDENT>t = <NUM_LIT:1><EOL><DEDENT>if noisefunc is None:<EOL><INDENT>rng = check_random_state(random_state)<EOL>noisefunc = lambda: rng.normal(size=(<NUM_LIT:1>, m))<EOL><DEDENT>n = l + <NUM_LIT:10> * p<EOL>y = np.zeros((n, m, t))<EOL>res = np.zeros((n, m, t))<EOL>for s in range(t):<EOL><INDENT>for i in range(p):<EOL><INDENT>e = noisefunc()<EOL>res[i, :, s] = e<EOL>y[i, :, s] = e<EOL><DEDENT>for i in range(p, n):<EOL><INDENT>e = noisefunc()<EOL>res[i, :, s] = e<EOL>y[i, :, s] = e<EOL>for k in range(<NUM_LIT:1>, p + <NUM_LIT:1>):<EOL><INDENT>y[i, :, s] += self.coef[:, (k - <NUM_LIT:1>)::p].dot(y[i - k, :, s])<EOL><DEDENT><DEDENT><DEDENT>self.residuals = res[<NUM_LIT:10> * p:, :, :].T<EOL>self.rescov = sp.cov(cat_trials(self.residuals).T, rowvar=False)<EOL>return y[<NUM_LIT:10> * p:, :, :].transpose([<NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:0>])<EOL>
Simulate vector autoregressive (VAR) model. This function generates data from the VAR model. Parameters ---------- l : int or [int, int] Number of samples to generate. Can be a tuple or list, where l[0] is the number of samples and l[1] is the number of trials. noisefunc : func, optional This function is used to create the generating noise process. If set to None, Gaussian white noise with zero mean and unit variance is used. Returns ------- data : array, shape (n_trials, n_samples, n_channels) Generated data.
f10411:c1:m5
def predict(self, data):
data = atleast_3d(data)<EOL>t, m, l = data.shape<EOL>p = int(np.shape(self.coef)[<NUM_LIT:1>] / m)<EOL>y = np.zeros(data.shape)<EOL>if t > l - p: <EOL><INDENT>for k in range(<NUM_LIT:1>, p + <NUM_LIT:1>):<EOL><INDENT>bp = self.coef[:, (k - <NUM_LIT:1>)::p]<EOL>for n in range(p, l):<EOL><INDENT>y[:, :, n] += np.dot(data[:, :, n - k], bp.T)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for k in range(<NUM_LIT:1>, p + <NUM_LIT:1>):<EOL><INDENT>bp = self.coef[:, (k - <NUM_LIT:1>)::p]<EOL>for s in range(t):<EOL><INDENT>y[s, :, p:] += np.dot(bp, data[s, :, (p - k):(l - k)])<EOL><DEDENT><DEDENT><DEDENT>return y<EOL>
Predict samples on actual data. The result of this function is used for calculating the residuals. Parameters ---------- data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set. Returns ------- predicted : array, shape `data`.shape Data as predicted by the VAR model. Notes ----- Residuals are obtained by r = x - var.predict(x)
f10411:c1:m6
def is_stable(self):
m, mp = self.coef.shape<EOL>p = mp // m<EOL>assert(mp == m * p) <EOL>top_block = []<EOL>for i in range(p):<EOL><INDENT>top_block.append(self.coef[:, i::p])<EOL><DEDENT>top_block = np.hstack(top_block)<EOL>im = np.eye(m)<EOL>eye_block = im<EOL>for i in range(p - <NUM_LIT:2>):<EOL><INDENT>eye_block = sp.linalg.block_diag(im, eye_block)<EOL><DEDENT>eye_block = np.hstack([eye_block, np.zeros((m * (p - <NUM_LIT:1>), m))])<EOL>tmp = np.vstack([top_block, eye_block])<EOL>return np.all(np.abs(np.linalg.eig(tmp)[<NUM_LIT:0>]) < <NUM_LIT:1>)<EOL>
Test if VAR model is stable. This function tests stability of the VAR model as described in [1]_. Returns ------- out : bool True if the model is stable. References ---------- .. [1] H. Lütkepohl, "New Introduction to Multiple Time Series Analysis", 2005, Springer, Berlin, Germany.
f10411:c1:m7
def _construct_eqns(self, data):
return _construct_var_eqns(data, self.p)<EOL>
Construct VAR equation system.
f10411:c1:m9
def set_locations(self, locations):
self.locations_ = locations<EOL>return self<EOL>
Set sensor locations. Parameters ---------- locations : array_like 3D Electrode locations. Each row holds the x, y, and z coordinates of an electrode. Returns ------- self : Workspace The Workspace object.
f10412:c0:m2
def set_premixing(self, premixing):
self.premixing_ = premixing<EOL>return self<EOL>
Set premixing matrix. The premixing matrix maps data to physical channels. If the data is actual channel data, the premixing matrix can be set to identity. Use this functionality if the data was pre- transformed with e.g. PCA. Parameters ---------- premixing : array_like, shape = [n_signals, n_channels] Matrix that maps data signals to physical channels. Returns ------- self : Workspace The Workspace object.
f10412:c0:m3
def set_data(self, data, cl=None, time_offset=<NUM_LIT:0>):
self.data_ = atleast_3d(data)<EOL>self.cl_ = np.asarray(cl if cl is not None else [None]*self.data_.shape[<NUM_LIT:0>])<EOL>self.time_offset_ = time_offset<EOL>self.var_model_ = None<EOL>self.var_cov_ = None<EOL>self.connectivity_ = None<EOL>self.trial_mask_ = np.ones(self.cl_.size, dtype=bool)<EOL>if self.unmixing_ is not None:<EOL><INDENT>self.activations_ = dot_special(self.unmixing_.T, self.data_)<EOL><DEDENT>return self<EOL>
Assign data to the workspace. This function assigns a new data set to the workspace. Doing so invalidates currently fitted VAR models, connectivity estimates, and activations. Parameters ---------- data : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] EEG data set cl : list of valid dict keys Class labels associated with each trial. time_offset : float, optional Trial starting time; used for labelling the x-axis of time/frequency plots. Returns ------- self : Workspace The Workspace object.
f10412:c0:m4
def set_used_labels(self, labels):
mask = np.zeros(self.cl_.size, dtype=bool)<EOL>for l in labels:<EOL><INDENT>mask = np.logical_or(mask, self.cl_ == l)<EOL><DEDENT>self.trial_mask_ = mask<EOL>return self<EOL>
Specify which trials to use in subsequent analysis steps. This function masks trials based on their class labels. Parameters ---------- labels : list of class labels Marks all trials that have a label that is in the `labels` list for further processing. Returns ------- self : Workspace The Workspace object.
f10412:c0:m5
def do_mvarica(self, varfit='<STR_LIT>', random_state=None):
if self.data_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>result = mvarica(x=self.data_[self.trial_mask_, :, :],<EOL>cl=self.cl_[self.trial_mask_], var=self.var_,<EOL>reducedim=self.reducedim_, backend=self.backend_,<EOL>varfit=varfit, random_state=random_state)<EOL>self.mixing_ = result.mixing<EOL>self.unmixing_ = result.unmixing<EOL>self.var_ = result.b<EOL>self.connectivity_ = Connectivity(result.b.coef, result.b.rescov,<EOL>self.nfft_)<EOL>self.activations_ = dot_special(self.unmixing_.T, self.data_)<EOL>self.mixmaps_ = []<EOL>self.unmixmaps_ = []<EOL>return self<EOL>
Perform MVARICA Perform MVARICA source decomposition and VAR model fitting. Parameters ---------- varfit : string Determines how to calculate the residuals for source decomposition. 'ensemble' (default) fits one model to the whole data set, 'class' fits a different model for each class, and 'trial' fits a different model for each individual trial. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain data. See Also -------- :func:`mvarica` : MVARICA implementation
f10412:c0:m6
def do_cspvarica(self, varfit='<STR_LIT>', random_state=None):
if self.data_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>try:<EOL><INDENT>sorted(self.cl_)<EOL>for c in self.cl_:<EOL><INDENT>assert(c is not None)<EOL><DEDENT><DEDENT>except (TypeError, AssertionError):<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>result = cspvarica(x=self.data_, var=self.var_, cl=self.cl_,<EOL>reducedim=self.reducedim_, backend=self.backend_,<EOL>varfit=varfit, random_state=random_state)<EOL>self.mixing_ = result.mixing<EOL>self.unmixing_ = result.unmixing<EOL>self.var_ = result.b<EOL>self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)<EOL>self.activations_ = dot_special(self.unmixing_.T, self.data_)<EOL>self.mixmaps_ = []<EOL>self.unmixmaps_ = []<EOL>return self<EOL>
Perform CSPVARICA Perform CSPVARICA source decomposition and VAR model fitting. Parameters ---------- varfit : string Determines how to calculate the residuals for source decomposition. 'ensemble' (default) fits one model to the whole data set, 'class' fits a different model for each class, and 'trial' fits a different model for each individual trial. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain data. See Also -------- :func:`cspvarica` : CSPVARICA implementation
f10412:c0:m7