_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q38100
decimate
train
def decimate(x, q=10, n=4, k=0.8, filterfun=ss.cheby1): """ scipy.signal.decimate like downsampling using filtfilt instead of lfilter, and filter coeffs from butterworth or chebyshev type 1. Parameters ---------- x : numpy.ndarray Array to be downsampled along last axis. q : int Downsampling factor. n : int Filter order. k : float Aliasing filter critical frequency Wn will be set as Wn=k/q. filterfun : function `scipy.signal.filter_design.cheby1` or `scipy.signal.filter_design.butter` function Returns ------- numpy.ndarray Array of downsampled signal. """ if not isinstance(q, int): raise TypeError("q must be an integer") if n is None: n = 1 if filterfun == ss.butter: b, a = filterfun(n, k / q) elif filterfun == ss.cheby1: b, a = filterfun(n, 0.05, k / q) else: raise Exception('only ss.butter or ss.cheby1 supported') try: y = ss.filtfilt(b, a, x) except: # Multidim array can only be processed at once for scipy >= 0.9.0 y = [] for data in x: y.append(ss.filtfilt(b, a, data)) y = np.array(y) try: return y[:, ::q] except: return y[::q]
python
{ "resource": "" }
q38101
mean
train
def mean(data, units=False, time=False): """ Function to compute mean of data Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time units : bool Average over units time : bool Average over time Returns ------- if units=False and time=False: error if units=True: 1 dim numpy.ndarray; time series if time=True: 1 dim numpy.ndarray; series of unit means across time if units=True and time=True: float; unit and time mean Examples -------- >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True) array([ 2.5, 3.5, 4.5]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True) array([ 2., 5.]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True) 3.5 """ assert(units is not False or time is not False) if units is True and time is False: return np.mean(data, axis=0) elif units is False and time is True: return np.mean(data, axis=1) elif units is True and time is True: return np.mean(data)
python
{ "resource": "" }
q38102
corrcoef
train
def corrcoef(time, crossf, integration_window=0.): """ Calculate the correlation coefficient for given auto- and crosscorrelation functions. Standard settings yield the zero lag correlation coefficient. Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions. The correlation coefficient between a zero signal with any other signal is defined as 0. Parameters ---------- time : numpy.ndarray 1 dim array of times corresponding to signal. crossf : numpy.ndarray Crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. integration_window: float Size of the integration window. Returns ------- cc : numpy.ndarray 2 dim array of correlation coefficient between two units. """ N = len(crossf) cc = np.zeros(np.shape(crossf)[:-1]) tbin = abs(time[1] - time[0]) lim = int(integration_window / tbin) if len(time)%2 == 0: mid = len(time)/2-1 else: mid = np.floor(len(time)/2.) for i in range(N): ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1]) offset_autoi = np.mean(crossf[i,i][:mid-1]) for j in range(N): cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1]) offset_cross = np.mean(crossf[i,j][:mid-1]) aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1]) offset_autoj = np.mean(crossf[j,j][:mid-1]) if ai > 0. and aj > 0.: cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \ (aj-offset_autoj)) else: cc[i, j] = 0. return cc
python
{ "resource": "" }
q38103
coherence
train
def coherence(freq, power, cross): """ Calculate frequency resolved coherence for given power- and crossspectra. Parameters ---------- freq : numpy.ndarray Frequencies, 1 dim array. power : numpy.ndarray Power spectra, 1st axis units, 2nd axis frequencies. cross : numpy.ndarray, Cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- freq: tuple 1 dim numpy.ndarray of frequencies. coh: tuple ndim 3 numpy.ndarray of coherences, 1st axis units, 2nd axis units, 3rd axis frequencies. """ N = len(power) coh = np.zeros(np.shape(cross)) for i in range(N): for j in range(N): coh[i, j] = cross[i, j] / np.sqrt(power[i] * power[j]) assert(len(freq) == len(coh[0, 0])) return freq, coh
python
{ "resource": "" }
q38104
add_sst_to_dot_display
train
def add_sst_to_dot_display(ax, sst, color= '0.',alpha= 1.): ''' suitable for plotting fraction of neurons ''' plt.sca(ax) N = len(sst) current_ymax = 0 counter = 0 while True: if len(ax.get_lines()) !=0: data = ax.get_lines()[-1-counter].get_data()[1] if np.sum(data) != 0: # if not empty array current_ymax = np.max(data) break counter +=1 else: break for i in np.arange(N): plt.plot(sst[i],np.ones_like(sst[i])+i+current_ymax -1, 'k o',ms=0.5, mfc=color,mec=color, alpha=alpha) plt.xlabel(r'time (ms)') plt.ylabel(r'neuron id') return ax
python
{ "resource": "" }
q38105
empty_bar_plot
train
def empty_bar_plot(ax): ''' Delete all axis ticks and labels ''' plt.sca(ax) plt.setp(plt.gca(),xticks=[],xticklabels=[]) return ax
python
{ "resource": "" }
q38106
add_to_bar_plot
train
def add_to_bar_plot(ax, x, number, name = '', color = '0.'): ''' This function takes an axes and adds one bar to it ''' plt.sca(ax) plt.setp(ax,xticks=np.append(ax.get_xticks(),np.array([x]))\ ,xticklabels=[item.get_text() for item in ax.get_xticklabels()] +[name]) plt.bar([x],number , color = color, width = 1.) return ax
python
{ "resource": "" }
q38107
add_to_line_plot
train
def add_to_line_plot(ax, x, y, color = '0.' , label = ''): ''' This function takes an axes and adds one line to it ''' plt.sca(ax) plt.plot(x,y, color = color, label = label) return ax
python
{ "resource": "" }
q38108
colorbar
train
def colorbar(fig, ax, im, width=0.05, height=1.0, hoffset=0.01, voffset=0.0, orientation='vertical'): ''' draw colorbar without resizing the axes object to make room kwargs: :: fig : matplotlib.figure.Figure ax : matplotlib.axes.AxesSubplot im : matplotlib.image.AxesImage width : float, colorbar width in fraction of ax width height : float, colorbar height in fraction of ax height hoffset : float, horizontal spacing to main axes in fraction of width voffset : float, vertical spacing to main axis in fraction of height orientation : str, 'horizontal' or 'vertical' return: :: object : colorbar handle ''' rect = np.array(ax.get_position().bounds) rect = np.array(ax.get_position().bounds) caxrect = [0]*4 caxrect[0] = rect[0] + rect[2] + hoffset*rect[2] caxrect[1] = rect[1] + voffset*rect[3] caxrect[2] = rect[2]*width caxrect[3] = rect[3]*height cax = fig.add_axes(caxrect) cb = fig.colorbar(im, cax=cax, orientation=orientation) return cb
python
{ "resource": "" }
q38109
frontiers_style
train
def frontiers_style(): ''' Figure styles for frontiers ''' inchpercm = 2.54 frontierswidth=8.5 textsize = 5 titlesize = 7 plt.rcdefaults() plt.rcParams.update({ 'figure.figsize' : [frontierswidth/inchpercm, frontierswidth/inchpercm], 'figure.dpi' : 160, 'xtick.labelsize' : textsize, 'ytick.labelsize' : textsize, 'font.size' : textsize, 'axes.labelsize' : textsize, 'axes.titlesize' : titlesize, 'axes.linewidth': 0.75, 'lines.linewidth': 0.75, 'legend.fontsize' : textsize, }) return None
python
{ "resource": "" }
q38110
annotate_subplot
train
def annotate_subplot(ax, ncols=1, nrows=1, letter='a', linear_offset=0.075, fontsize=8): '''add a subplot annotation number''' ax.text(-ncols*linear_offset, 1+nrows*linear_offset, letter, horizontalalignment='center', verticalalignment='center', fontsize=fontsize, fontweight='demibold', transform=ax.transAxes)
python
{ "resource": "" }
q38111
get_colors
train
def get_colors(num=16, cmap=plt.cm.Set1): '''return a list of color tuples to use in plots''' colors = [] for i in xrange(num): if analysis_params.bw: colors.append('k' if i % 2 == 0 else 'gray') else: i *= 256. if num > 1: i /= num - 1. else: i /= num colors.append(cmap(int(i))) return colors
python
{ "resource": "" }
q38112
Compressor.compress
train
def compress(self, data, windowLength = None): """Compresses text data using the LZ77 algorithm.""" if windowLength == None: windowLength = self.defaultWindowLength compressed = "" pos = 0 lastPos = len(data) - self.minStringLength while pos < lastPos: searchStart = max(pos - windowLength, 0); matchLength = self.minStringLength foundMatch = False bestMatchDistance = self.maxStringDistance bestMatchLength = 0 newCompressed = None while (searchStart + matchLength) < pos: m1 = data[searchStart : searchStart + matchLength] m2 = data[pos : pos + matchLength] isValidMatch = (m1 == m2 and matchLength < self.maxStringLength) if isValidMatch: matchLength += 1 foundMatch = True else: realMatchLength = matchLength - 1 if foundMatch and realMatchLength > bestMatchLength: bestMatchDistance = pos - searchStart - realMatchLength bestMatchLength = realMatchLength matchLength = self.minStringLength searchStart += 1 foundMatch = False if bestMatchLength: newCompressed = (self.referencePrefix + self.__encodeReferenceInt(bestMatchDistance, 2) + self.__encodeReferenceLength(bestMatchLength)) pos += bestMatchLength else: if data[pos] != self.referencePrefix: newCompressed = data[pos] else: newCompressed = self.referencePrefix + self.referencePrefix pos += 1 compressed += newCompressed return compressed + data[pos:].replace("`", "``")
python
{ "resource": "" }
q38113
Compressor.decompress
train
def decompress(self, data): """Decompresses LZ77 compressed text data""" decompressed = "" pos = 0 while pos < len(data): currentChar = data[pos] if currentChar != self.referencePrefix: decompressed += currentChar pos += 1 else: nextChar = data[pos + 1] if nextChar != self.referencePrefix: distance = self.__decodeReferenceInt(data[pos + 1 : pos + 3], 2) length = self.__decodeReferenceLength(data[pos + 3]) start = len(decompressed) - distance - length end = start + length decompressed += decompressed[start : end] pos += self.minStringLength - 1 else: decompressed += self.referencePrefix pos += 2 return decompressed
python
{ "resource": "" }
q38114
main
train
def main(): """ Main entry point for running baseconvert as a command. Examples: $ python -m baseconvert -n 0.5 -i 10 -o 20 -s True 0.A $ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True 3.243 """ # Parse arguments parser = argparse.ArgumentParser(description="Convert rational numbers between bases.") parser.add_argument("-n", "--number", default=None, help="The number to convert as a string, else stdin used.") parser.add_argument("-i", "--input-base", default=10, help="The input base (default 10).") parser.add_argument("-o", "--output-base", default=10, help="The output base (default 10).") parser.add_argument("-d", "--max_depth", default=10, type=int, help="The maximum fractional digits (default 10).") parser.add_argument("-r", "--recurring", default=True, type=bool, help="Boolean, if True will attempt to find recurring decimals (default True).") parser.add_argument("-s", "--string", type=bool, help="Boolean, if True will output number as String, else as tuple (default False).") args = parser.parse_args() args.input_base = float(args.input_base) args.output_base = float(args.output_base) if args.input_base == int(args.input_base): args.input_base = int(args.input_base) if args.output_base == int(args.output_base): args.output_base = int(args.output_base) if (args.number): return base(args.number, args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) elif not sys.stdin.isatty(): return base(sys.stdin.read().strip(), args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) else: raise ValueError("Please input a number!")
python
{ "resource": "" }
q38115
Counter.notify
train
def notify(self, value): """ Increment or decrement the value, according to the given value's sign The value should be an integer, an attempt to cast it to integer will be made """ value = int(value) with self.lock: self.value += value
python
{ "resource": "" }
q38116
ConfigStore.get_config
train
def get_config(self): """ Load user configuration or return default when not found. :rtype: :class:`Configuration` """ if not self._config: namespace = {} if os.path.exists(self.config_path): execfile(self.config_path, namespace) self._config = namespace.get('config') or Configuration() return self._config
python
{ "resource": "" }
q38117
CommandShell.open
train
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']): """ Opens the remote shell """ shell = dict() shell['rsp:InputStreams'] = " ".join(input_streams) shell['rsp:OutputStreams'] = " ".join(output_streams) shell['rsp:IdleTimeout'] = str(self.idle_timeout) if self.working_directory is not None: shell['rsp:WorkingDirectory'] = str(self.working_directory) if self.environment is not None: variables = [] for key, value in self.environment.items(): variables.append({'#text': str(value), '@Name': key}) shell['rsp:Environment'] = {'Variable': variables} response = self.session.create(self.resource, {'rsp:Shell': shell}) self.__shell_id = response['rsp:Shell']['rsp:ShellId']
python
{ "resource": "" }
q38118
OAuthSession.start_login_server
train
def start_login_server(self, ): """Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None """ self.login_server = oauth.LoginServer(session=self) target = self.login_server.serve_forever self.login_thread = threading.Thread(target=target) self.login_thread.setDaemon(True) log.debug('Starting login server thread.') self.login_thread.start()
python
{ "resource": "" }
q38119
OAuthSession.shutdown_login_server
train
def shutdown_login_server(self, ): """Shutdown the login server and thread :returns: None :rtype: None :raises: None """ log.debug('Shutting down the login server thread.') self.login_server.shutdown() self.login_server.server_close() self.login_thread.join()
python
{ "resource": "" }
q38120
TwitchSession.token
train
def token(self, token): """Set the oauth token and the current_user :param token: the oauth token :type token: :class:`dict` :returns: None :rtype: None :raises: None """ self._token = token if token: self.current_user = self.query_login_user()
python
{ "resource": "" }
q38121
TwitchSession.kraken_request
train
def kraken_request(self, method, endpoint, **kwargs): """Make a request to one of the kraken api endpoints. Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`. Also the client id from :data:`CLIENT_ID` will be set. The url will be constructed of :data:`TWITCH_KRAKENURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the kraken api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_KRAKENURL + endpoint headers = kwargs.setdefault('headers', {}) headers['Accept'] = TWITCH_HEADER_ACCEPT headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits return self.request(method, url, **kwargs)
python
{ "resource": "" }
q38122
TwitchSession.usher_request
train
def usher_request(self, method, endpoint, **kwargs): """Make a request to one of the usher api endpoints. The url will be constructed of :data:`TWITCH_USHERURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the usher api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_USHERURL + endpoint return self.request(method, url, **kwargs)
python
{ "resource": "" }
q38123
TwitchSession.oldapi_request
train
def oldapi_request(self, method, endpoint, **kwargs): """Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs)
python
{ "resource": "" }
q38124
TwitchSession.fetch_viewers
train
def fetch_viewers(self, game): """Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game
python
{ "resource": "" }
q38125
TwitchSession.search_games
train
def search_games(self, query, live=True): """Search for games that are similar to the query :param query: the query string :type query: :class:`str` :param live: If true, only returns games that are live on at least one channel :type live: :class:`bool` :returns: A list of games :rtype: :class:`list` of :class:`models.Game` instances :raises: None """ r = self.kraken_request('GET', 'search/games', params={'query': query, 'type': 'suggest', 'live': live}) games = models.Game.wrap_search(r) for g in games: self.fetch_viewers(g) return games
python
{ "resource": "" }
q38126
TwitchSession.top_games
train
def top_games(self, limit=10, offset=0): """Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'games/top', params={'limit': limit, 'offset': offset}) return models.Game.wrap_topgames(r)
python
{ "resource": "" }
q38127
TwitchSession.get_game
train
def get_game(self, name): """Get the game instance for a game name :param name: the name of the game :type name: :class:`str` :returns: the game instance :rtype: :class:`models.Game` | None :raises: None """ games = self.search_games(query=name, live=False) for g in games: if g.name == name: return g
python
{ "resource": "" }
q38128
TwitchSession.get_channel
train
def get_channel(self, name): """Return the channel for the given name :param name: the channel name :type name: :class:`str` :returns: the model instance :rtype: :class:`models.Channel` :raises: None """ r = self.kraken_request('GET', 'channels/' + name) return models.Channel.wrap_get_channel(r)
python
{ "resource": "" }
q38129
TwitchSession.search_channels
train
def search_channels(self, query, limit=25, offset=0): """Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None """ r = self.kraken_request('GET', 'search/channels', params={'query': query, 'limit': limit, 'offset': offset}) return models.Channel.wrap_search(r)
python
{ "resource": "" }
q38130
TwitchSession.get_stream
train
def get_stream(self, channel): """Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.kraken_request('GET', 'streams/' + channel) return models.Stream.wrap_get_stream(r)
python
{ "resource": "" }
q38131
TwitchSession.get_streams
train
def get_streams(self, game=None, channels=None, limit=25, offset=0): """Return a list of streams queried by a number of parameters sorted by number of viewers descending :param game: the game or name of the game :type game: :class:`str` | :class:`models.Game` :param channels: list of models.Channels or channel names (can be mixed) :type channels: :class:`list` of :class:`models.Channel` or :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` :raises: None """ if isinstance(game, models.Game): game = game.name channelnames = [] cparam = None if channels: for c in channels: if isinstance(c, models.Channel): c = c.name channelnames.append(c) cparam = ','.join(channelnames) params = {'limit': limit, 'offset': offset, 'game': game, 'channel': cparam} r = self.kraken_request('GET', 'streams', params=params) return models.Stream.wrap_search(r)
python
{ "resource": "" }
q38132
TwitchSession.search_streams
train
def search_streams(self, query, hls=False, limit=25, offset=0): """Search for streams and return them :param query: the query string :type query: :class:`str` :param hls: If true, only return streams that have hls stream :type hls: :class:`bool` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` instances :raises: None """ r = self.kraken_request('GET', 'search/streams', params={'query': query, 'hls': hls, 'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
python
{ "resource": "" }
q38133
TwitchSession.followed_streams
train
def followed_streams(self, limit=25, offset=0): """Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'streams/followed', params={'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
python
{ "resource": "" }
q38134
TwitchSession.get_user
train
def get_user(self, name): """Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None """ r = self.kraken_request('GET', 'user/' + name) return models.User.wrap_get_user(r)
python
{ "resource": "" }
q38135
TwitchSession.get_playlist
train
def get_playlist(self, channel): """Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline. """ if isinstance(channel, models.Channel): channel = channel.name token, sig = self.get_channel_access_token(channel) params = {'token': token, 'sig': sig, 'allow_audio_only': True, 'allow_source': True} r = self.usher_request( 'GET', 'channel/hls/%s.m3u8' % channel, params=params) playlist = m3u8.loads(r.text) return playlist
python
{ "resource": "" }
q38136
TwitchSession.get_quality_options
train
def get_quality_options(self, channel): """Get the available quality options for streams of the given channel Possible values in the list: * source * high * medium * low * mobile * audio :param channel: the channel or channel name :type channel: :class:`models.Channel` | :class:`str` :returns: list of quality options :rtype: :class:`list` of :class:`str` :raises: :class:`requests.HTTPError` if channel is offline. """ optionmap = {'chunked': 'source', 'high': 'high', 'medium': 'medium', 'low': 'low', 'mobile': 'mobile', 'audio_only': 'audio'} p = self.get_playlist(channel) options = [] for pl in p.playlists: q = pl.media[0].group_id options.append(optionmap[q]) return options
python
{ "resource": "" }
q38137
TwitchSession.get_channel_access_token
train
def get_channel_access_token(self, channel): """Return the token and sig for the given channel :param channel: the channel or channel name to get the access token for :type channel: :class:`channel` | :class:`str` :returns: The token and sig for the given channel :rtype: (:class:`unicode`, :class:`unicode`) :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.oldapi_request( 'GET', 'channels/%s/access_token' % channel).json() return r['token'], r['sig']
python
{ "resource": "" }
q38138
TwitchSession.get_chat_server
train
def get_chat_server(self, channel): """Get an appropriate chat server for the given channel Usually the server is irc.twitch.tv. But because of the delicate twitch chat, they use a lot of servers. Big events are on special event servers. This method tries to find a good one. :param channel: the channel with the chat :type channel: :class:`models.Channel` :returns: the server address and port :rtype: (:class:`str`, :class:`int`) :raises: None """ r = self.oldapi_request( 'GET', 'channels/%s/chat_properties' % channel.name) json = r.json() servers = json['chat_servers'] try: r = self.get(TWITCH_STATUSURL) except requests.HTTPError: log.debug('Error getting chat server status. Using random one.') address = servers[0] else: stats = [client.ChatServerStatus(**d) for d in r.json()] address = self._find_best_chat_server(servers, stats) server, port = address.split(':') return server, int(port)
python
{ "resource": "" }
q38139
TwitchSession._find_best_chat_server
train
def _find_best_chat_server(servers, stats): """Find the best from servers by comparing with the stats :param servers: a list if server adresses, e.g. ['0.0.0.0:80'] :type servers: :class:`list` of :class:`str` :param stats: list of server statuses :type stats: :class:`list` of :class:`chat.ChatServerStatus` :returns: the best server adress :rtype: :class:`str` :raises: None """ best = servers[0] # In case we sind no match with any status stats.sort() # gets sorted for performance for stat in stats: for server in servers: if server == stat: # found a chatserver that has the same address # than one of the chatserverstats. # since the stats are sorted for performance # the first hit is the best, thus break best = server break if best: # already found one, so no need to check the other # statuses, which are worse break return best
python
{ "resource": "" }
q38140
TwitchSession.get_emote_picture
train
def get_emote_picture(self, emote, size=1.0): """Return the picture for the given emote :param emote: the emote object :type emote: :class:`pytwitcherapi.chat.message.Emote` :param size: the size of the picture. Choices are: 1.0, 2.0, 3.0 :type size: :class:`float` :returns: A string resembling the picturedata of the emote :rtype: :class:`str` :raises: None """ r = self.get('http://static-cdn.jtvnw.net/emoticons/v1/%s/%s' % (emote.emoteid, size)) return r.content
python
{ "resource": "" }
q38141
strip_glob
train
def strip_glob(string, split_str=' '): """ Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str """ string = _GLOB_PORTION_RE.sub(split_str, string) return string.strip()
python
{ "resource": "" }
q38142
daterange
train
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN): """Returns a generator which creates the next value in the range on demand""" date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper) current = start if start in date_interval else start + delta while current in date_interval: yield current current = current + delta
python
{ "resource": "" }
q38143
new_metric
train
def new_metric(name, class_, *args, **kwargs): """Create a new metric of the given class. Raise DuplicateMetricError if the given name has been already registered before Internal function - use "new_<type> instead" """ with LOCK: try: item = REGISTRY[name] except KeyError: item = REGISTRY[name] = class_(*args, **kwargs) return item raise DuplicateMetricError("Metric {} already exists of type {}".format(name, type(item).__name__))
python
{ "resource": "" }
q38144
delete_metric
train
def delete_metric(name): """Remove the named metric""" with LOCK: old_metric = REGISTRY.pop(name, None) # look for the metric name in the tags and remove it for _, tags in py3comp.iteritems(TAGS): if name in tags: tags.remove(name) return old_metric
python
{ "resource": "" }
q38145
new_histogram
train
def new_histogram(name, reservoir=None): """ Build a new histogram metric with a given reservoir object If the reservoir is not provided, a uniform reservoir with the default size is used """ if reservoir is None: reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE) return new_metric(name, histogram.Histogram, reservoir)
python
{ "resource": "" }
q38146
new_histogram_with_implicit_reservoir
train
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new histogram metric and a reservoir from the given parameters """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) return new_histogram(name, reservoir)
python
{ "resource": "" }
q38147
new_reservoir
train
def new_reservoir(reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new reservoir """ try: reservoir_cls = RESERVOIR_TYPES[reservoir_type] except KeyError: raise InvalidMetricError("Unknown reservoir type: {}".format(reservoir_type)) return reservoir_cls(*reservoir_args, **reservoir_kwargs)
python
{ "resource": "" }
q38148
get_or_create_histogram
train
def get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs): """ Will return a histogram matching the given parameters or raise DuplicateMetricError if it can't be created due to a name collision with another histogram with different parameters. """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) try: hmetric = new_histogram(name, reservoir) except DuplicateMetricError: hmetric = metric(name) if not isinstance(hmetric, histogram.Histogram): raise DuplicateMetricError( "Metric {!r} already exists of type {!r}".format(name, type(hmetric).__name__)) if not hmetric.reservoir.same_kind(reservoir): raise DuplicateMetricError( "Metric {!r} already exists with a different reservoir: {}".format(name, hmetric.reservoir)) return hmetric
python
{ "resource": "" }
q38149
tag
train
def tag(name, tag_name): """ Tag the named metric with the given tag. """ with LOCK: # just to check if <name> exists metric(name) TAGS.setdefault(tag_name, set()).add(name)
python
{ "resource": "" }
q38150
untag
train
def untag(name, tag_name): """ Remove the given tag from the given metric. Return True if the metric was tagged, False otherwise """ with LOCK: by_tag = TAGS.get(tag_name, None) if not by_tag: return False try: by_tag.remove(name) # remove the tag if no associations left if not by_tag: TAGS.pop(tag_name) return True except KeyError: return False
python
{ "resource": "" }
q38151
quantity_yXL
train
def quantity_yXL(fig, left, bottom, top, quantity=params.L_yXL, label=r'$\mathcal{L}_{yXL}$'): '''make a bunch of image plots, each showing the spatial normalized connectivity of synapses''' layers = ['L1', 'L2/3', 'L4', 'L5', 'L6'] ncols = len(params.y) / 4 #assess vlims vmin = 0 vmax = 0 for y in params.y: if quantity[y].max() > vmax: vmax = quantity[y].max() gs = gridspec.GridSpec(4, 4, left=left, bottom=bottom, top=top) for i, y in enumerate(params.y): ax = fig.add_subplot(gs[i/4, i%4]) masked_array = np.ma.array(quantity[y], mask=quantity[y]==0) # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, im = ax.pcolormesh(masked_array, vmin=vmin, vmax=vmax, cmap=cmap, #interpolation='nearest', ) ax.invert_yaxis() ax.axis(ax.axis('tight')) ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(5)+0.5) #if divmod(i, 4)[1] == 0: if i % 4 == 0: ax.set_yticklabels(layers, ) ax.set_ylabel('$L$', labelpad=0.) else: ax.set_yticklabels([]) if i < 4: ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_xticklabels(params.X, rotation=270) else: ax.set_xticklabels([]) ax.xaxis.set_label_position('top') ax.text(0.5, -0.13, r'$y=$'+y, horizontalalignment='center', verticalalignment='center', # transform=ax.transAxes,fontsize=5.5) #colorbar rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[1] = bottom rect[2] = 0.01 rect[3] = top-bottom cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(label, ha='center') cbar.set_label(label, labelpad=0)
python
{ "resource": "" }
q38152
Connection.get
train
def get(self, url, proto='http'): """ Load an url using the GET method. Keyword arguments: url -- the Universal Resource Location proto -- the protocol (default 'http') """ self.last_response = self.session.get(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, allow_redirects=True, verify=self.verify) return self.last_response_soup
python
{ "resource": "" }
q38153
Connection.post
train
def post(self, url, data, proto='http', form_name=None): """ Load an url using the POST method. Keyword arguments: url -- the Universal Resource Location data -- the form to be sent proto -- the protocol (default 'http') form_name -- the form name to search the default values """ form = self.translator.fill_form(self.last_response_soup, form_name if form_name else url, data) self.last_response = self.session.post(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, data=form, allow_redirects=True, verify=self.verify) return self.last_response_soup
python
{ "resource": "" }
q38154
watch_record
train
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
python
{ "resource": "" }
q38155
run_sim
train
def run_sim(morphology='patdemo/cells/j4a.hoc', cell_rotation=dict(x=4.99, y=-4.33, z=3.14), closest_idx=dict(x=-200., y=0., z=800.)): '''set up simple cell simulation with LFPs in the plane''' # Create cell cell = LFPy.Cell(morphology=morphology, **cell_parameters) # Align cell cell.set_rotation(**cell_rotation) # Define synapse parameters synapse_parameters = { 'idx' : cell.get_closest_idx(**closest_idx), 'e' : 0., # reversal potential 'syntype' : 'ExpSynI', # synapse type 'tau' : 0.5, # synaptic time constant 'weight' : 0.0878, # synaptic weight 'record_current' : True, # record synapse current } # Create synapse and set time of synaptic input synapse = LFPy.Synapse(cell, **synapse_parameters) synapse.set_spike_times(np.array([1.])) # Create electrode object # Run simulation, electrode object argument in cell.simulate print "running simulation..." cell.simulate(rec_imem=True,rec_isyn=True) grid_electrode = LFPy.RecExtElectrode(cell,**grid_electrode_parameters) point_electrode = LFPy.RecExtElectrode(cell,**point_electrode_parameters) grid_electrode.calc_lfp() point_electrode.calc_lfp() print "done" return cell, synapse, grid_electrode, point_electrode
python
{ "resource": "" }
q38156
multicompartment_params._synDelayParams
train
def _synDelayParams(self): ''' set up the detailed synaptic delay parameters, loc is mean delay, scale is std with low bound cutoff, assumes numpy.random.normal is used later ''' delays = {} #mean delays loc = np.zeros((len(self.y), len(self.X))) loc[:, 0] = self.delays[0] loc[:, 1::2] = self.delays[0] loc[:, 2::2] = self.delays[1] #standard deviations scale = loc * self.delay_rel_sd #prepare output delay_loc = {} for i, y in enumerate(self.y): delay_loc.update({y : loc[i]}) delay_scale = {} for i, y in enumerate(self.y): delay_scale.update({y : scale[i]}) return delay_loc, delay_scale
python
{ "resource": "" }
q38157
multicompartment_params._calcDepths
train
def _calcDepths(self): ''' return the cortical depth of each subpopulation ''' depths = self.layerBoundaries.mean(axis=1)[1:] depth_y = [] for y in self.y: if y in ['p23', 'b23', 'nb23']: depth_y = np.r_[depth_y, depths[0]] elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']: depth_y = np.r_[depth_y, depths[1]] elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']: depth_y = np.r_[depth_y, depths[2]] elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']: depth_y = np.r_[depth_y, depths[3]] else: raise Exception, 'this aint right' return depth_y
python
{ "resource": "" }
q38158
multicompartment_params._yCellParams
train
def _yCellParams(self): ''' Return dict with parameters for each population. The main operation is filling in cell type specific morphology ''' #cell type specific parameters going into LFPy.Cell yCellParams = {} for layer, morpho, _, _ in self.y_zip_list: yCellParams.update({layer : self.cellParams.copy()}) yCellParams[layer].update({ 'morphology' : os.path.join(self.PATH_m_y, morpho), }) return yCellParams
python
{ "resource": "" }
q38159
PopulationSuper.run
train
def run(self): """ Distribute individual cell simulations across ranks. This method takes no keyword arguments. Parameters ---------- None Returns ------- None """ for cellindex in self.RANK_CELLINDICES: self.cellsim(cellindex) COMM.Barrier()
python
{ "resource": "" }
q38160
PopulationSuper.calc_min_cell_interdist
train
def calc_min_cell_interdist(self, x, y, z): """ Calculate cell interdistance from input coordinates. Parameters ---------- x, y, z : numpy.ndarray xyz-coordinates of each cell-body. Returns ------- min_cell_interdist : np.nparray For each cell-body center, the distance to nearest neighboring cell """ min_cell_interdist = np.zeros(self.POPULATION_SIZE) for i in range(self.POPULATION_SIZE): cell_interdist = np.sqrt((x[i] - x)**2 + (y[i] - y)**2 + (z[i] - z)**2) cell_interdist[i] = np.inf min_cell_interdist[i] = cell_interdist.min() return min_cell_interdist
python
{ "resource": "" }
q38161
PopulationSuper.calc_signal_sum
train
def calc_signal_sum(self, measure='LFP'): """ Superimpose each cell's contribution to the compound population signal, i.e., the population CSD or LFP Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray The populations-specific compound signal. """ #compute the total LFP of cells on this RANK if self.RANK_CELLINDICES.size > 0: for i, cellindex in enumerate(self.RANK_CELLINDICES): if i == 0: data = self.output[cellindex][measure] else: data += self.output[cellindex][measure] else: data = np.zeros((len(self.electrodeParams['x']), self.cellParams['tstopms']/self.dt_output + 1), dtype=np.float32) #container for full LFP on RANK 0 if RANK == 0: DATA = np.zeros_like(data, dtype=np.float32) else: DATA = None #sum to RANK 0 using automatic type discovery with MPI COMM.Reduce(data, DATA, op=MPI.SUM, root=0) return DATA
python
{ "resource": "" }
q38162
PopulationSuper.collect_data
train
def collect_data(self): """ Collect LFPs, CSDs and soma traces from each simulated population, and save to file. Parameters ---------- None Returns ------- None """ #collect some measurements resolved per file and save to file for measure in ['LFP', 'CSD']: if measure in self.savelist: self.collectSingleContribs(measure) #calculate lfp from all cell contribs lfp = self.calc_signal_sum(measure='LFP') #calculate CSD in every lamina if self.calculateCSD: csd = self.calc_signal_sum(measure='CSD') if RANK == 0 and self.POPULATION_SIZE > 0: #saving LFPs if 'LFP' in self.savelist: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'LFP')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=lfp, compression=4) f.close() del lfp assert(os.path.isfile(fname)) print('save lfp ok') #saving CSDs if 'CSD' in self.savelist and self.calculateCSD: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'CSD')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=csd, compression=4) f.close() del csd assert(os.path.isfile(fname)) print('save CSD ok') #save the somatic placements: pop_soma_pos = np.zeros((self.POPULATION_SIZE, 3)) keys = ['xpos', 'ypos', 'zpos'] for i in range(self.POPULATION_SIZE): for j in range(3): pop_soma_pos[i, j] = self.pop_soma_pos[i][keys[j]] fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'somapos.gdf')) np.savetxt(fname, pop_soma_pos) assert(os.path.isfile(fname)) print('save somapos ok') #save rotations using hdf5 fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'rotations.h5')) f = h5py.File(fname, 'w') f.create_dataset('x', (len(self.rotations),)) f.create_dataset('y', (len(self.rotations),)) f.create_dataset('z', (len(self.rotations),)) for i, rot in enumerate(self.rotations): for key, value in list(rot.items()): f[key][i] = value f.close() assert(os.path.isfile(fname)) print('save rotations ok') #resync threads COMM.Barrier()
python
{ "resource": "" }
q38163
Population.get_all_synIdx
train
def get_all_synIdx(self): """ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """ tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time()-tic)) #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for i, synidx in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i]),) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size,) print('\ttotal %i' % idxcount) return synIdx
python
{ "resource": "" }
q38164
Population.get_all_SpCells
train
def get_all_SpCells(self): """ For each postsynaptic cell existing on this RANK, load or compute the presynaptic cell index for each synaptic connection This function takes no kwargs. Parameters ---------- None Returns ------- SpCells : dict `output[cellindex][populationname][layerindex]`, np.array of presynaptic cell indices. See also -------- Population.fetchSpCells """ tic = time() #container SpCells = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + self.POPULATION_SIZE) SpCells[cellindex] = {} for i, X in enumerate(self.X): SpCells[cellindex][X] = self.fetchSpCells( self.networkSim.nodes[X], self.k_yXL[:, i]) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found presynaptic cells in %.2f seconds' % (time()-tic)) return SpCells
python
{ "resource": "" }
q38165
Population.get_all_synDelays
train
def get_all_synDelays(self): """ Create and load arrays of connection delays per connection on this rank Get random normally distributed synaptic delays, returns dict of nested list of same shape as SpCells. Delays are rounded to dt. This function takes no kwargs. Parameters ---------- None Returns ------- dict output[cellindex][populationname][layerindex]`, np.array of delays per connection. See also -------- numpy.random.normal """ tic = time() #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() #container delays = {} for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE) delays[cellindex] = {} for j, X in enumerate(self.X): delays[cellindex][X] = [] for i in self.k_yXL[:, j]: loc = self.synDelayLoc[j] loc /= self.dt scale = self.synDelayScale[j] if scale is not None: scale /= self.dt delay = np.random.normal(loc, scale, i).astype(int) while np.any(delay < 1): inds = delay < 1 delay[inds] = np.random.normal(loc, scale, inds.sum()).astype(int) delay = delay.astype(float) delay *= self.dt else: delay = np.zeros(i) + self.synDelayLoc[j] delays[cellindex][X].append(delay) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found delays in %.2f seconds' % (time()-tic)) return delays
python
{ "resource": "" }
q38166
Population.get_synidx
train
def get_synidx(self, cellindex): """ Local function, draw and return synapse locations corresponding to a single cell, using a random seed set as `POPULATIONSEED` + `cellindex`. Parameters ---------- cellindex : int Index of cell object. Returns ------- synidx : dict `LFPy.Cell` compartment indices See also -------- Population.get_all_synIdx, Population.fetchSynIdxCell """ #create a cell instance cell = self.cellsim(cellindex, return_just_cell=True) #local containers synidx = {} #get synaptic placements and cells from the network, #then set spike times, for i, X in enumerate(self.X): synidx[X] = self.fetchSynIdxCell(cell=cell, nidx=self.k_yXL[:, i], synParams=self.synParams.copy()) return synidx
python
{ "resource": "" }
q38167
Population.fetchSynIdxCell
train
def fetchSynIdxCell(self, cell, nidx, synParams): """ Find possible synaptic placements for each cell As synapses are placed within layers with bounds determined by self.layerBoundaries, it will check this matrix accordingly, and use the probabilities from `self.connProbLayer to distribute. For each layer, the synapses are placed with probability normalized by membrane area of each compartment Parameters ---------- cell : `LFPy.Cell` instance nidx : numpy.ndarray Numbers of synapses per presynaptic population X. synParams : which `LFPy.Synapse` parameters to use. Returns ------- syn_idx : list List of arrays of synapse placements per connection. See also -------- Population.get_all_synIdx, Population.get_synIdx, LFPy.Synapse """ #segment indices in each layer is stored here, list of np.array syn_idx = [] #loop over layer bounds, find synapse locations for i, zz in enumerate(self.layerBoundaries): if nidx[i] == 0: syn_idx.append(np.array([], dtype=int)) else: syn_idx.append(cell.get_rand_idx_area_norm( section=synParams['section'], nidx=nidx[i], z_min=zz.min(), z_max=zz.max()).astype('int16')) return syn_idx
python
{ "resource": "" }
q38168
Population.cellsim
train
def cellsim(self, cellindex, return_just_cell = False): """ Do the actual simulations of LFP, using synaptic spike times from network simulation. Parameters ---------- cellindex : int cell index between 0 and population size-1. return_just_cell : bool If True, return only the `LFPy.Cell` object if False, run full simulation, return None. Returns ------- None or `LFPy.Cell` object See also -------- hybridLFPy.csd, LFPy.Cell, LFPy.Synapse, LFPy.RecExtElectrode """ tic = time() cell = LFPy.Cell(**self.cellParams) cell.set_pos(**self.pop_soma_pos[cellindex]) cell.set_rotation(**self.rotations[cellindex]) if return_just_cell: #with several cells, NEURON can only hold one cell at the time allsecnames = [] allsec = [] for sec in cell.allseclist: allsecnames.append(sec.name()) for seg in sec: allsec.append(sec.name()) cell.allsecnames = allsecnames cell.allsec = allsec return cell else: self.insert_all_synapses(cellindex, cell) #electrode object where LFPs are calculated electrode = LFPy.RecExtElectrode(**self.electrodeParams) if self.calculateCSD: cell.tvec = np.arange(cell.totnsegs) cell.imem = np.eye(cell.totnsegs) csdcoeff = csd.true_lam_csd(cell, self.populationParams['radius'], electrode.z) csdcoeff *= 1E6 #nA mum^-3 -> muA mm^-3 conversion del cell.tvec, cell.imem cell.simulate(electrode, dotprodcoeffs=[csdcoeff], **self.simulationParams) cell.CSD = helpers.decimate(cell.dotprodresults[0], q=self.decimatefrac) else: cell.simulate(electrode, **self.simulationParams) cell.LFP = helpers.decimate(electrode.LFP, q=self.decimatefrac) cell.x = electrode.x cell.y = electrode.y cell.z = electrode.z cell.electrodecoeff = electrode.electrodecoeff #put all necessary cell output in output dict for attrbt in self.savelist: attr = getattr(cell, attrbt) if type(attr) == np.ndarray: self.output[cellindex][attrbt] = attr.astype('float32') else: try: self.output[cellindex][attrbt] = attr except: self.output[cellindex][attrbt] = str(attr) self.output[cellindex]['srate'] = 1E3 / self.dt_output print('cell %s population %s in %.2f s' % (cellindex, self.y, time()-tic))
python
{ "resource": "" }
q38169
Population.insert_all_synapses
train
def insert_all_synapses(self, cellindex, cell): """ Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse """ for i, X in enumerate(self.X): #range(self.k_yXL.shape[1]): synParams = self.synParams synParams.update({ 'weight' : self.J_yX[i], 'tau' : self.tau_yX[i], }) for j in range(len(self.synIdx[cellindex][X])): if self.synDelays is not None: synDelays = self.synDelays[cellindex][X][j] else: synDelays = None self.insert_synapses(cell = cell, cellindex = cellindex, synParams = synParams, idx = self.synIdx[cellindex][X][j], X=X, SpCell = self.SpCells[cellindex][X][j], synDelays = synDelays)
python
{ "resource": "" }
q38170
Population.insert_synapses
train
def insert_synapses(self, cell, cellindex, synParams, idx = np.array([]), X='EX', SpCell = np.array([]), synDelays = None): """ Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses """ #Insert synapses in an iterative fashion try: spikes = self.networkSim.dbs[X].select(SpCell[:idx.size]) except AttributeError as ae: raise ae, 'could not open CachedNetwork database objects' #apply synaptic delays if synDelays is not None and idx.size > 0: for i, delay in enumerate(synDelays): if spikes[i].size > 0: spikes[i] += delay #create synapse events: for i in range(idx.size): if len(spikes[i]) == 0: pass #print 'no spike times, skipping network cell #%i' % SpCell[i] else: synParams.update({'idx' : idx[i]}) # Create synapse(s) and setting times using class LFPy.Synapse synapse = LFPy.Synapse(cell, **synParams) #SpCell is a vector, or do not exist synapse.set_spike_times(spikes[i] + cell.tstartms)
python
{ "resource": "" }
q38171
run
train
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs): """ Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func """ if max_procs is None: max_procs = cpu_count() kw_arr = saturate_kwargs(keys=keys, **kwargs) if len(kw_arr) == 0: return if isinstance(affinity, int): win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity) task_queue = queue.Queue() while len(kw_arr) > 0: for _ in range(max_procs): if len(kw_arr) == 0: break kw = kw_arr.pop(0) p = Process(target=func, kwargs=kw) p.start() sys.stdout.flush() task_queue.put(p) if show_proc: signature = ', '.join([f'{k}={v}' for k, v in kw.items()]) print(f'[{func.__name__}] ({signature})') while not task_queue.empty(): p = task_queue.get() p.join()
python
{ "resource": "" }
q38172
saturate_kwargs
train
def saturate_kwargs(keys, **kwargs): """ Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func """ # Validate if keys are in kwargs and if they are iterable if isinstance(keys, str): keys = [keys] keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')] if len(keys) == 0: return [] # Saturate coordinates of kwargs kw_corr = list(product(*(range(len(kwargs[k])) for k in keys))) # Append all possible values kw_arr = [] for corr in kw_corr: kw_arr.append( dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))])) ) # All combinations of kwargs of inputs for k in keys: kwargs.pop(k, None) kw_arr = [{**k, **kwargs} for k in kw_arr] return kw_arr
python
{ "resource": "" }
q38173
quick_idw
train
def quick_idw(input_geojson_points, variable_name, power, nb_class, nb_pts=10000, resolution=None, disc_func=None, mask=None, user_defined_breaks=None, variable_name2=None, output='GeoJSON', **kwargs): """ Function acting as a one-shot wrapper around SmoothIdw object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). power : int or float The power of the function. nb_class : int, optionnal The number of class, if unset will most likely be 8. (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). disc_func: str, optionnal The name of the classification function to be used to decide on which break values to use to create the contour layer. (default: None) mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (overrides `nb_class` and `disc_func` values if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_idw("some_file.geojson", "some_variable", power=2) More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", nb_class=8, disc_func="percentiles", output="GeoDataFrame") """ return SmoothIdw(input_geojson_points, variable_name, power, nb_pts, resolution, variable_name2, mask, **kwargs ).render(nb_class=nb_class, disc_func=disc_func, user_defined_breaks=user_defined_breaks, output=output)
python
{ "resource": "" }
q38174
quick_stewart
train
def quick_stewart(input_geojson_points, variable_name, span, beta=2, typefct='exponential',nb_class=None, nb_pts=10000, resolution=None, mask=None, user_defined_breaks=None, variable_name2=None, output="GeoJSON", **kwargs): """ Function acting as a one-shot wrapper around SmoothStewart object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). span : int The span (meters). beta : float The beta! typefct : str, optionnal The type of function in {"exponential", "pareto"} (default: "exponential"). nb_class : int, optionnal The number of class, if unset will most likely be 8 (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` value if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="exponential") More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="pareto", output="GeoDataFrame") """ return SmoothStewart( input_geojson_points, variable_name, span, beta, typefct, nb_pts, resolution, variable_name2, mask, **kwargs ).render( nb_class=nb_class, user_defined_breaks=user_defined_breaks, output=output)
python
{ "resource": "" }
q38175
make_regular_points
train
def make_regular_points(bounds, resolution, longlat=True): """ Return a regular grid of points within `bounds` with the specified resolution. Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. resolution : int The resolution to use, in the same unit as `bounds` Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height) """ # xmin, ymin, xmax, ymax = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat if longlat: height = hav_dist( np.array([(maxlon + minlon) / 2, minlat]), np.array([(maxlon + minlon) / 2, maxlat]) ) width = hav_dist( np.array([minlon, (maxlat + minlat) / 2]), np.array([maxlon, (maxlat + minlat) / 2]) ) else: height = np.linalg.norm( np.array([(maxlon + minlon) / 2, minlat]) - np.array([(maxlon + minlon) / 2, maxlat])) width = np.linalg.norm( np.array([minlon, (maxlat + minlat) / 2]) - np.array([maxlon, (maxlat + minlat) / 2])) nb_x = int(round(width / resolution)) nb_y = int(round(height / resolution)) if nb_y * 0.6 > nb_x: nb_x = int(nb_x + nb_x / 3) elif nb_x * 0.6 > nb_y: nb_y = int(nb_y + nb_y / 3) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
python
{ "resource": "" }
q38176
identify
train
def identify(s): """Identify what kind of Chinese characters a string contains. *s* is a string to examine. The string's Chinese characters are tested to see if they are compatible with the Traditional or Simplified characters systems, compatible with both, or contain a mixture of Traditional and Simplified characters. The :data:`TRADITIONAL`, :data:`SIMPLIFIED`, :data:`BOTH`, or :data:`MIXED` constants are returned to indicate the string's identity. If *s* contains no Chinese characters, then :data:`UNKNOWN` is returned. All characters in a string that aren't found in the CC-CEDICT dictionary are ignored. Because the Traditional and Simplified Chinese character systems overlap, a string containing Simplified characters could identify as :data:`SIMPLIFIED` or :data:`BOTH` depending on if the characters are also Traditional characters. To make testing the identity of a string easier, the functions :func:`is_traditional`, :func:`is_simplified`, and :func:`has_chinese` are provided. """ chinese = _get_hanzi(s) if not chinese: return UNKNOWN if chinese.issubset(_SHARED_CHARACTERS): return BOTH if chinese.issubset(_TRADITIONAL_CHARACTERS): return TRADITIONAL if chinese.issubset(_SIMPLIFIED_CHARACTERS): return SIMPLIFIED return MIXED
python
{ "resource": "" }
q38177
is_traditional
train
def is_traditional(s): """Check if a string's Chinese characters are Traditional. This is equivalent to: >>> identify('foo') in (TRADITIONAL, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_TRADITIONAL_CHARACTERS): return True return False
python
{ "resource": "" }
q38178
is_simplified
train
def is_simplified(s): """Check if a string's Chinese characters are Simplified. This is equivalent to: >>> identify('foo') in (SIMPLIFIED, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_SIMPLIFIED_CHARACTERS): return True return False
python
{ "resource": "" }
q38179
params.set_default_fig_style
train
def set_default_fig_style(self): '''default figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm, self.frontierswidth/self.inchpercm], })
python
{ "resource": "" }
q38180
params.set_large_fig_style
train
def set_large_fig_style(self): '''twice width figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm], })
python
{ "resource": "" }
q38181
params.set_broad_fig_style
train
def set_broad_fig_style(self): '''4 times width, 1.5 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*4, self.frontierswidth/self.inchpercm*1.5], })
python
{ "resource": "" }
q38182
params.set_enormous_fig_style
train
def set_enormous_fig_style(self): '''2 times width, 2 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm*2], })
python
{ "resource": "" }
q38183
params.set_PLOS_1column_fig_style
train
def set_PLOS_1column_fig_style(self, ratio=1): '''figure size corresponding to Plos 1 column''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth1Col,self.PLOSwidth1Col*ratio], })
python
{ "resource": "" }
q38184
params.set_PLOS_2column_fig_style
train
def set_PLOS_2column_fig_style(self, ratio=1): '''figure size corresponding to Plos 2 columns''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth2Col, self.PLOSwidth2Col*ratio], })
python
{ "resource": "" }
q38185
PostProcess.run
train
def run(self): """ Perform the postprocessing steps, computing compound signals from cell-specific output files. """ if RANK == 0: if 'LFP' in self.savelist: #get the per population LFPs and total LFP from all populations: self.LFPdict, self.LFPsum = self.calc_lfp() self.LFPdictLayer = self.calc_lfp_layer() #save global LFP sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('LFP') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.LFPsum, compression=4) f.close() for key, value in list(self.LFPdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'LFP.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() if 'CSD' in self.savelist: #get the per population CSDs and total CSD from all populations: self.CSDdict, self.CSDsum = self.calc_csd() self.CSDdictLayer = self.calc_csd_layer() #save global CSD sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('CSD')), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.CSDsum, compression=4) f.close() for key, value in list(self.CSDdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'CSD.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() else: pass
python
{ "resource": "" }
q38186
PostProcess.calc_lfp
train
def calc_lfp(self): """ Sum all the LFP contributions from every cell type. """ LFParray = np.array([]) LFPdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'LFP.h5')) f = h5py.File(fil) if i == 0: LFParray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in LFParray[i, ] = f['data'].value LFPdict.update({y : f['data'].value}) f.close() i += 1 return LFPdict, LFParray.sum(axis=0)
python
{ "resource": "" }
q38187
PostProcess.calc_csd
train
def calc_csd(self): """ Sum all the CSD contributions from every layer. """ CSDarray = np.array([]) CSDdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'CSD.h5')) f = h5py.File(fil) if i == 0: CSDarray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in CSDarray[i, ] = f['data'].value CSDdict.update({y : f['data'].value}) f.close() i += 1 return CSDdict, CSDarray.sum(axis=0)
python
{ "resource": "" }
q38188
PostProcess.create_tar_archive
train
def create_tar_archive(self): """ Create a tar archive of the main simulation outputs. """ #file filter EXCLUDE_FILES = glob.glob(os.path.join(self.savefolder, 'cells')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'populations', 'subsamples')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'raw_nest_output')) def filter_function(tarinfo): print(tarinfo.name) if len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.name)[-1] in os.path.split(f)[-1]]) > 0 or \ len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.path)[-1] in os.path.split(f)[-1]]) > 0: print('excluding %s' % tarinfo.name) return None else: return tarinfo if RANK == 0: print('creating archive %s' % (self.savefolder + '.tar')) #open file f = tarfile.open(self.savefolder + '.tar', 'w') #avoid adding files to repo as /scratch/$USER/hybrid_model/... arcname = os.path.split(self.savefolder)[-1] f.add(name=self.savefolder, arcname=arcname, filter=filter_function) f.close() #resync COMM.Barrier()
python
{ "resource": "" }
q38189
sort_queryset
train
def sort_queryset(queryset, request, context=None): """ Returns a sorted queryset The context argument is only used in the template tag """ sort_by = request.GET.get('sort_by') if sort_by: if sort_by in [el.name for el in queryset.model._meta.fields]: queryset = queryset.order_by(sort_by) else: if sort_by in request.session: sort_by = request.session[sort_by] try: queryset = queryset.order_by(sort_by) except: raise # added else to fix a bug when using changelist # TODO: use less ifs and more standard sorting elif context is not None: # sorted ascending if sort_by[0] != '-': sort_by = context['cl'].list_display[int(sort_by) - 1] # sorted descending else: sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1] queryset = queryset.order_by(sort_by) return queryset
python
{ "resource": "" }
q38190
include_before
train
def include_before(predicate, num, iterative): """ Return elements in `iterative` including `num`-before elements. >>> list(include_before(lambda x: x == 'd', 2, 'abcded')) ['b', 'c', 'd', 'e', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _backward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
python
{ "resource": "" }
q38191
include_after
train
def include_after(predicate, num, iterative): """ Return elements in `iterative` including `num`-after elements. >>> list(include_after(lambda x: x == 'b', 2, 'abcbcde')) ['b', 'c', 'b', 'c', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _forward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
python
{ "resource": "" }
q38192
include_context
train
def include_context(predicate, num, iterative): """ Return elements in `iterative` including `num` before and after elements. >>> ''.join(include_context(lambda x: x == '!', 2, 'bb!aa__bb!aa')) 'bb!aabb!aa' """ (it0, it1, it2) = itertools.tee(iterative, 3) psf = _forward_shifted_predicate(predicate, num, it1) psb = _backward_shifted_predicate(predicate, num, it2) return (e for (e, pf, pb) in zip(it0, psf, psb) if pf or pb)
python
{ "resource": "" }
q38193
calc_variances
train
def calc_variances(params): ''' This function calculates the variance of the sum signal and all population-resolved signals ''' depth = params.electrodeParams['z'] ############################ ### CSD ### ############################ for i, data_type in enumerate(['CSD','LFP']): if i % SIZE == RANK: f_out = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_variances), 'w') f_out['depths']=depth for celltype in params.y: f_in = h5py.File(os.path.join(params.populations_path, '%s_population_%s' % (celltype,data_type) + '.h5' )) var = f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out[celltype]= var f_in = h5py.File(os.path.join(params.savefolder, data_type + 'sum.h5' )) var= f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out['sum']= var f_out.close() return
python
{ "resource": "" }
q38194
MyIRCClient.on_join
train
def on_join(self, connection, event): """Handles the join event and greets everone :param connection: the connection with the event :type connection: :class:`irc.client.ServerConnection` :param event: the event to handle :type event: :class:`irc.client.Event` :returns: None """ target = event.source self.privmsg(target, 'Hello %s!' % target)
python
{ "resource": "" }
q38195
daemon_run
train
def daemon_run(no_error, restart, record_path, keep_json, check_duplicate, use_polling, log_level): """ Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index """ # Probably it makes sense to use this daemon to provide search # API, so that this daemon is going to be the only process that # is connected to the DB? from .config import ConfigStore from .indexer import Indexer from .log import setup_daemon_log_file, LogForTheFuture from .watchrecord import watch_record, install_sigterm_handler install_sigterm_handler() cfstore = ConfigStore() if log_level: cfstore.daemon_log_level = log_level flogger = LogForTheFuture() # SOMEDAY: make PID checking/writing atomic if possible flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path) if os.path.exists(cfstore.daemon_pid_path): flogger.debug('Old PID file exists. Reading from it.') with open(cfstore.daemon_pid_path, 'rt') as f: pid = int(f.read().strip()) flogger.debug('Checking if old process with PID=%d is alive', pid) try: os.kill(pid, 0) # check if `pid` is alive except OSError: flogger.info( 'Process with PID=%d is already dead. ' 'So just go on and use this daemon.', pid) else: if restart: flogger.info('Stopping old daemon with PID=%d.', pid) stop_running_daemon(cfstore, pid) else: message = ('There is already a running daemon (PID={0})!' .format(pid)) if no_error: flogger.debug(message) # FIXME: Setup log handler and flogger.dump(). # Note that using the default log file is not safe # since it has already been used. return else: raise RuntimeError(message) else: flogger.debug('Daemon PID file %r does not exists. ' 'So just go on and use this daemon.', cfstore.daemon_pid_path) with open(cfstore.daemon_pid_path, 'w') as f: f.write(str(os.getpid())) try: setup_daemon_log_file(cfstore) flogger.dump() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all() watch_record(indexer, use_polling) finally: os.remove(cfstore.daemon_pid_path)
python
{ "resource": "" }
q38196
show_run
train
def show_run(command_history_id): """ Show detailed command history by its ID. """ from pprint import pprint from .config import ConfigStore from .database import DataBase db = DataBase(ConfigStore().db_path) with db.connection(): for ch_id in command_history_id: crec = db.get_full_command_record(ch_id) pprint(crec.__dict__) print("")
python
{ "resource": "" }
q38197
BlobDBClient.insert
train
def insert(self, database, key, value, callback=None): """ Insert an item into the given database. :param database: The database into which to insert the value. :type database: .BlobDatabaseID :param key: The key to insert. :type key: uuid.UUID :param value: The value to insert. :type value: bytes :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=InsertCommand(key=key.bytes, value=value)), callback))
python
{ "resource": "" }
q38198
BlobDBClient.delete
train
def delete(self, database, key, callback=None): """ Delete an item from the given database. :param database: The database from which to delete the value. :type database: .BlobDatabaseID :param key: The key to delete. :type key: uuid.UUID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=DeleteCommand(key=key.bytes)), callback))
python
{ "resource": "" }
q38199
Service.invoke
train
def invoke(self, headers, body): """ Invokes the soap service """ xml = Service._create_request(headers, body) try: response = self.session.post(self.endpoint, verify=False, data=xml) logging.debug(response.content) except Exception as e: traceback.print_exc() raise WSManException(e) if response.status_code == 200: return Service._parse_response(response.content) if response.status_code == 401: raise WSManAuthenticationException('the remote host rejected authentication') raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code)
python
{ "resource": "" }