partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
Player.advanced_splits
Returns a DataFrame of advanced splits data for a player-year. Note: only go back to 2012. :year: The year for the season in question. If None, returns career advanced splits. :returns: A DataFrame of advanced splits data.
sportsref/nfl/players.py
def advanced_splits(self, year=None): """Returns a DataFrame of advanced splits data for a player-year. Note: only go back to 2012. :year: The year for the season in question. If None, returns career advanced splits. :returns: A DataFrame of advanced splits data. """ # get the table url = self._subpage_url('splits', year) doc = pq(sportsref.utils.get_html(url)) table = doc('table#advanced_splits') df = sportsref.utils.parse_table(table) # cleaning the data if not df.empty: df.split_type.fillna(method='ffill', inplace=True) return df
def advanced_splits(self, year=None): """Returns a DataFrame of advanced splits data for a player-year. Note: only go back to 2012. :year: The year for the season in question. If None, returns career advanced splits. :returns: A DataFrame of advanced splits data. """ # get the table url = self._subpage_url('splits', year) doc = pq(sportsref.utils.get_html(url)) table = doc('table#advanced_splits') df = sportsref.utils.parse_table(table) # cleaning the data if not df.empty: df.split_type.fillna(method='ffill', inplace=True) return df
[ "Returns", "a", "DataFrame", "of", "advanced", "splits", "data", "for", "a", "player", "-", "year", ".", "Note", ":", "only", "go", "back", "to", "2012", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/players.py#L332-L348
[ "def", "advanced_splits", "(", "self", ",", "year", "=", "None", ")", ":", "# get the table", "url", "=", "self", ".", "_subpage_url", "(", "'splits'", ",", "year", ")", "doc", "=", "pq", "(", "sportsref", ".", "utils", ".", "get_html", "(", "url", ")", ")", "table", "=", "doc", "(", "'table#advanced_splits'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "# cleaning the data", "if", "not", "df", ".", "empty", ":", "df", ".", "split_type", ".", "fillna", "(", "method", "=", "'ffill'", ",", "inplace", "=", "True", ")", "return", "df" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Player._simple_year_award
Template for simple award functions that simply list years, such as pro bowls and first-team all pro. :award_id: The div ID that is appended to "leaderboard_" in selecting the table's div. :returns: List of years for the award.
sportsref/nfl/players.py
def _simple_year_award(self, award_id): """Template for simple award functions that simply list years, such as pro bowls and first-team all pro. :award_id: The div ID that is appended to "leaderboard_" in selecting the table's div. :returns: List of years for the award. """ doc = self.get_doc() table = doc('div#leaderboard_{} table'.format(award_id)) return list(map(int, sportsref.utils.parse_awards_table(table)))
def _simple_year_award(self, award_id): """Template for simple award functions that simply list years, such as pro bowls and first-team all pro. :award_id: The div ID that is appended to "leaderboard_" in selecting the table's div. :returns: List of years for the award. """ doc = self.get_doc() table = doc('div#leaderboard_{} table'.format(award_id)) return list(map(int, sportsref.utils.parse_awards_table(table)))
[ "Template", "for", "simple", "award", "functions", "that", "simply", "list", "years", "such", "as", "pro", "bowls", "and", "first", "-", "team", "all", "pro", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/players.py#L352-L362
[ "def", "_simple_year_award", "(", "self", ",", "award_id", ")", ":", "doc", "=", "self", ".", "get_doc", "(", ")", "table", "=", "doc", "(", "'div#leaderboard_{} table'", ".", "format", "(", "award_id", ")", ")", "return", "list", "(", "map", "(", "int", ",", "sportsref", ".", "utils", ".", "parse_awards_table", "(", "table", ")", ")", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
team_names
Returns a mapping from team ID to full team name for a given season. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with teamID keys and full team name values.
sportsref/nfl/teams.py
def team_names(year): """Returns a mapping from team ID to full team name for a given season. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with teamID keys and full team name values. """ doc = pq(sportsref.utils.get_html(sportsref.nfl.BASE_URL + '/teams/')) active_table = doc('table#teams_active') active_df = sportsref.utils.parse_table(active_table) inactive_table = doc('table#teams_inactive') inactive_df = sportsref.utils.parse_table(inactive_table) df = pd.concat((active_df, inactive_df)) df = df.loc[~df['has_class_partial_table']] ids = df.team_id.str[:3].values names = [tr('th a') for tr in active_table('tr').items()] names.extend(tr('th a') for tr in inactive_table('tr').items()) names = [_f for _f in names if _f] names = [lst[0].text_content() for lst in names] # combine IDs and team names into pandas series series = pd.Series(names, index=ids) # create a mask to filter to teams from the given year mask = ((df.year_min <= year) & (year <= df.year_max)).values # filter, convert to a dict, and return return series[mask].to_dict()
def team_names(year): """Returns a mapping from team ID to full team name for a given season. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with teamID keys and full team name values. """ doc = pq(sportsref.utils.get_html(sportsref.nfl.BASE_URL + '/teams/')) active_table = doc('table#teams_active') active_df = sportsref.utils.parse_table(active_table) inactive_table = doc('table#teams_inactive') inactive_df = sportsref.utils.parse_table(inactive_table) df = pd.concat((active_df, inactive_df)) df = df.loc[~df['has_class_partial_table']] ids = df.team_id.str[:3].values names = [tr('th a') for tr in active_table('tr').items()] names.extend(tr('th a') for tr in inactive_table('tr').items()) names = [_f for _f in names if _f] names = [lst[0].text_content() for lst in names] # combine IDs and team names into pandas series series = pd.Series(names, index=ids) # create a mask to filter to teams from the given year mask = ((df.year_min <= year) & (year <= df.year_max)).values # filter, convert to a dict, and return return series[mask].to_dict()
[ "Returns", "a", "mapping", "from", "team", "ID", "to", "full", "team", "name", "for", "a", "given", "season", ".", "Example", "of", "a", "full", "team", "name", ":", "New", "England", "Patriots" ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L22-L46
[ "def", "team_names", "(", "year", ")", ":", "doc", "=", "pq", "(", "sportsref", ".", "utils", ".", "get_html", "(", "sportsref", ".", "nfl", ".", "BASE_URL", "+", "'/teams/'", ")", ")", "active_table", "=", "doc", "(", "'table#teams_active'", ")", "active_df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "active_table", ")", "inactive_table", "=", "doc", "(", "'table#teams_inactive'", ")", "inactive_df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "inactive_table", ")", "df", "=", "pd", ".", "concat", "(", "(", "active_df", ",", "inactive_df", ")", ")", "df", "=", "df", ".", "loc", "[", "~", "df", "[", "'has_class_partial_table'", "]", "]", "ids", "=", "df", ".", "team_id", ".", "str", "[", ":", "3", "]", ".", "values", "names", "=", "[", "tr", "(", "'th a'", ")", "for", "tr", "in", "active_table", "(", "'tr'", ")", ".", "items", "(", ")", "]", "names", ".", "extend", "(", "tr", "(", "'th a'", ")", "for", "tr", "in", "inactive_table", "(", "'tr'", ")", ".", "items", "(", ")", ")", "names", "=", "[", "_f", "for", "_f", "in", "names", "if", "_f", "]", "names", "=", "[", "lst", "[", "0", "]", ".", "text_content", "(", ")", "for", "lst", "in", "names", "]", "# combine IDs and team names into pandas series", "series", "=", "pd", ".", "Series", "(", "names", ",", "index", "=", "ids", ")", "# create a mask to filter to teams from the given year", "mask", "=", "(", "(", "df", ".", "year_min", "<=", "year", ")", "&", "(", "year", "<=", "df", ".", "year_max", ")", ")", ".", "values", "# filter, convert to a dict, and return", "return", "series", "[", "mask", "]", ".", "to_dict", "(", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
team_ids
Returns a mapping from team name to team ID for a given season. Inverse mapping of team_names. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with full team name keys and teamID values.
sportsref/nfl/teams.py
def team_ids(year): """Returns a mapping from team name to team ID for a given season. Inverse mapping of team_names. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with full team name keys and teamID values. """ names = team_names(year) return {v: k for k, v in names.items()}
def team_ids(year): """Returns a mapping from team name to team ID for a given season. Inverse mapping of team_names. Example of a full team name: "New England Patriots" :year: The year of the season in question (as an int). :returns: A dictionary with full team name keys and teamID values. """ names = team_names(year) return {v: k for k, v in names.items()}
[ "Returns", "a", "mapping", "from", "team", "name", "to", "team", "ID", "for", "a", "given", "season", ".", "Inverse", "mapping", "of", "team_names", ".", "Example", "of", "a", "full", "team", "name", ":", "New", "England", "Patriots" ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L50-L58
[ "def", "team_ids", "(", "year", ")", ":", "names", "=", "team_names", "(", "year", ")", "return", "{", "v", ":", "k", "for", "k", ",", "v", "in", "names", ".", "items", "(", ")", "}" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.name
Returns the real name of the franchise given the team ID. Examples: 'nwe' -> 'New England Patriots' 'sea' -> 'Seattle Seahawks' :returns: A string corresponding to the team's full name.
sportsref/nfl/teams.py
def name(self): """Returns the real name of the franchise given the team ID. Examples: 'nwe' -> 'New England Patriots' 'sea' -> 'Seattle Seahawks' :returns: A string corresponding to the team's full name. """ doc = self.get_main_doc() headerwords = doc('div#meta h1')[0].text_content().split() lastIdx = headerwords.index('Franchise') teamwords = headerwords[:lastIdx] return ' '.join(teamwords)
def name(self): """Returns the real name of the franchise given the team ID. Examples: 'nwe' -> 'New England Patriots' 'sea' -> 'Seattle Seahawks' :returns: A string corresponding to the team's full name. """ doc = self.get_main_doc() headerwords = doc('div#meta h1')[0].text_content().split() lastIdx = headerwords.index('Franchise') teamwords = headerwords[:lastIdx] return ' '.join(teamwords)
[ "Returns", "the", "real", "name", "of", "the", "franchise", "given", "the", "team", "ID", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L108-L121
[ "def", "name", "(", "self", ")", ":", "doc", "=", "self", ".", "get_main_doc", "(", ")", "headerwords", "=", "doc", "(", "'div#meta h1'", ")", "[", "0", "]", ".", "text_content", "(", ")", ".", "split", "(", ")", "lastIdx", "=", "headerwords", ".", "index", "(", "'Franchise'", ")", "teamwords", "=", "headerwords", "[", ":", "lastIdx", "]", "return", "' '", ".", "join", "(", "teamwords", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.roster
Returns the roster table for the given year. :year: The year for which we want the roster; defaults to current year. :returns: A DataFrame containing roster information for that year.
sportsref/nfl/teams.py
def roster(self, year): """Returns the roster table for the given year. :year: The year for which we want the roster; defaults to current year. :returns: A DataFrame containing roster information for that year. """ doc = self.get_year_doc('{}_roster'.format(year)) roster_table = doc('table#games_played_team') df = sportsref.utils.parse_table(roster_table) starter_table = doc('table#starters') if not starter_table.empty: start_df = sportsref.utils.parse_table(starter_table) start_df = start_df.dropna(axis=0, subset=['position']) starters = start_df.set_index('position').player_id df['is_starter'] = df.player_id.isin(starters) df['starting_pos'] = df.player_id.map( lambda pid: (starters[starters == pid].index[0] if pid in starters.values else None) ) return df
def roster(self, year): """Returns the roster table for the given year. :year: The year for which we want the roster; defaults to current year. :returns: A DataFrame containing roster information for that year. """ doc = self.get_year_doc('{}_roster'.format(year)) roster_table = doc('table#games_played_team') df = sportsref.utils.parse_table(roster_table) starter_table = doc('table#starters') if not starter_table.empty: start_df = sportsref.utils.parse_table(starter_table) start_df = start_df.dropna(axis=0, subset=['position']) starters = start_df.set_index('position').player_id df['is_starter'] = df.player_id.isin(starters) df['starting_pos'] = df.player_id.map( lambda pid: (starters[starters == pid].index[0] if pid in starters.values else None) ) return df
[ "Returns", "the", "roster", "table", "for", "the", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L124-L143
[ "def", "roster", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "'{}_roster'", ".", "format", "(", "year", ")", ")", "roster_table", "=", "doc", "(", "'table#games_played_team'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "roster_table", ")", "starter_table", "=", "doc", "(", "'table#starters'", ")", "if", "not", "starter_table", ".", "empty", ":", "start_df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "starter_table", ")", "start_df", "=", "start_df", ".", "dropna", "(", "axis", "=", "0", ",", "subset", "=", "[", "'position'", "]", ")", "starters", "=", "start_df", ".", "set_index", "(", "'position'", ")", ".", "player_id", "df", "[", "'is_starter'", "]", "=", "df", ".", "player_id", ".", "isin", "(", "starters", ")", "df", "[", "'starting_pos'", "]", "=", "df", ".", "player_id", ".", "map", "(", "lambda", "pid", ":", "(", "starters", "[", "starters", "==", "pid", "]", ".", "index", "[", "0", "]", "if", "pid", "in", "starters", ".", "values", "else", "None", ")", ")", "return", "df" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.boxscores
Gets list of BoxScore objects corresponding to the box scores from that year. :year: The year for which we want the boxscores; defaults to current year. :returns: np.array of strings representing boxscore IDs.
sportsref/nfl/teams.py
def boxscores(self, year): """Gets list of BoxScore objects corresponding to the box scores from that year. :year: The year for which we want the boxscores; defaults to current year. :returns: np.array of strings representing boxscore IDs. """ doc = self.get_year_doc(year) table = doc('table#games') df = sportsref.utils.parse_table(table) if df.empty: return np.array([]) return df.boxscore_id.values
def boxscores(self, year): """Gets list of BoxScore objects corresponding to the box scores from that year. :year: The year for which we want the boxscores; defaults to current year. :returns: np.array of strings representing boxscore IDs. """ doc = self.get_year_doc(year) table = doc('table#games') df = sportsref.utils.parse_table(table) if df.empty: return np.array([]) return df.boxscore_id.values
[ "Gets", "list", "of", "BoxScore", "objects", "corresponding", "to", "the", "box", "scores", "from", "that", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L146-L159
[ "def", "boxscores", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "table", "=", "doc", "(", "'table#games'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "if", "df", ".", "empty", ":", "return", "np", ".", "array", "(", "[", "]", ")", "return", "df", ".", "boxscore_id", ".", "values" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team._year_info_pq
Returns a PyQuery object containing the info from the meta div at the top of the team year page with the given keyword. :year: Int representing the season. :keyword: A keyword to filter to a single p tag in the meta div. :returns: A PyQuery object for the selected p element.
sportsref/nfl/teams.py
def _year_info_pq(self, year, keyword): """Returns a PyQuery object containing the info from the meta div at the top of the team year page with the given keyword. :year: Int representing the season. :keyword: A keyword to filter to a single p tag in the meta div. :returns: A PyQuery object for the selected p element. """ doc = self.get_year_doc(year) p_tags = doc('div#meta div:not(.logo) p') texts = [p_tag.text_content().strip() for p_tag in p_tags] try: return next( pq(p_tag) for p_tag, text in zip(p_tags, texts) if keyword.lower() in text.lower() ) except StopIteration: if len(texts): raise ValueError('Keyword not found in any p tag.') else: raise ValueError('No meta div p tags found.')
def _year_info_pq(self, year, keyword): """Returns a PyQuery object containing the info from the meta div at the top of the team year page with the given keyword. :year: Int representing the season. :keyword: A keyword to filter to a single p tag in the meta div. :returns: A PyQuery object for the selected p element. """ doc = self.get_year_doc(year) p_tags = doc('div#meta div:not(.logo) p') texts = [p_tag.text_content().strip() for p_tag in p_tags] try: return next( pq(p_tag) for p_tag, text in zip(p_tags, texts) if keyword.lower() in text.lower() ) except StopIteration: if len(texts): raise ValueError('Keyword not found in any p tag.') else: raise ValueError('No meta div p tags found.')
[ "Returns", "a", "PyQuery", "object", "containing", "the", "info", "from", "the", "meta", "div", "at", "the", "top", "of", "the", "team", "year", "page", "with", "the", "given", "keyword", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L162-L182
[ "def", "_year_info_pq", "(", "self", ",", "year", ",", "keyword", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "p_tags", "=", "doc", "(", "'div#meta div:not(.logo) p'", ")", "texts", "=", "[", "p_tag", ".", "text_content", "(", ")", ".", "strip", "(", ")", "for", "p_tag", "in", "p_tags", "]", "try", ":", "return", "next", "(", "pq", "(", "p_tag", ")", "for", "p_tag", ",", "text", "in", "zip", "(", "p_tags", ",", "texts", ")", "if", "keyword", ".", "lower", "(", ")", "in", "text", ".", "lower", "(", ")", ")", "except", "StopIteration", ":", "if", "len", "(", "texts", ")", ":", "raise", "ValueError", "(", "'Keyword not found in any p tag.'", ")", "else", ":", "raise", "ValueError", "(", "'No meta div p tags found.'", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.head_coaches_by_game
Returns head coach data by game. :year: An int representing the season in question. :returns: An array with an entry per game of the season that the team played (including playoffs). Each entry is the head coach's ID for that game in the season.
sportsref/nfl/teams.py
def head_coaches_by_game(self, year): """Returns head coach data by game. :year: An int representing the season in question. :returns: An array with an entry per game of the season that the team played (including playoffs). Each entry is the head coach's ID for that game in the season. """ coach_str = self._year_info_pq(year, 'Coach').text() regex = r'(\S+?) \((\d+)-(\d+)-(\d+)\)' coachAndTenure = [] m = True while m: m = re.search(regex, coach_str) coachID, wins, losses, ties = m.groups() nextIndex = m.end(4) + 1 coachStr = coachStr[nextIndex:] tenure = int(wins) + int(losses) + int(ties) coachAndTenure.append((coachID, tenure)) coachIDs = [ cID for cID, games in coachAndTenure for _ in range(games) ] return np.array(coachIDs[::-1])
def head_coaches_by_game(self, year): """Returns head coach data by game. :year: An int representing the season in question. :returns: An array with an entry per game of the season that the team played (including playoffs). Each entry is the head coach's ID for that game in the season. """ coach_str = self._year_info_pq(year, 'Coach').text() regex = r'(\S+?) \((\d+)-(\d+)-(\d+)\)' coachAndTenure = [] m = True while m: m = re.search(regex, coach_str) coachID, wins, losses, ties = m.groups() nextIndex = m.end(4) + 1 coachStr = coachStr[nextIndex:] tenure = int(wins) + int(losses) + int(ties) coachAndTenure.append((coachID, tenure)) coachIDs = [ cID for cID, games in coachAndTenure for _ in range(games) ] return np.array(coachIDs[::-1])
[ "Returns", "head", "coach", "data", "by", "game", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L189-L212
[ "def", "head_coaches_by_game", "(", "self", ",", "year", ")", ":", "coach_str", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Coach'", ")", ".", "text", "(", ")", "regex", "=", "r'(\\S+?) \\((\\d+)-(\\d+)-(\\d+)\\)'", "coachAndTenure", "=", "[", "]", "m", "=", "True", "while", "m", ":", "m", "=", "re", ".", "search", "(", "regex", ",", "coach_str", ")", "coachID", ",", "wins", ",", "losses", ",", "ties", "=", "m", ".", "groups", "(", ")", "nextIndex", "=", "m", ".", "end", "(", "4", ")", "+", "1", "coachStr", "=", "coachStr", "[", "nextIndex", ":", "]", "tenure", "=", "int", "(", "wins", ")", "+", "int", "(", "losses", ")", "+", "int", "(", "ties", ")", "coachAndTenure", ".", "append", "(", "(", "coachID", ",", "tenure", ")", ")", "coachIDs", "=", "[", "cID", "for", "cID", ",", "games", "in", "coachAndTenure", "for", "_", "in", "range", "(", "games", ")", "]", "return", "np", ".", "array", "(", "coachIDs", "[", ":", ":", "-", "1", "]", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.wins
Returns the # of regular season wins a team in a year. :year: The year for the season in question. :returns: The number of regular season wins.
sportsref/nfl/teams.py
def wins(self, year): """Returns the # of regular season wins a team in a year. :year: The year for the season in question. :returns: The number of regular season wins. """ schedule = self.schedule(year) if schedule.empty: return np.nan return schedule.query('week_num <= 17').is_win.sum()
def wins(self, year): """Returns the # of regular season wins a team in a year. :year: The year for the season in question. :returns: The number of regular season wins. """ schedule = self.schedule(year) if schedule.empty: return np.nan return schedule.query('week_num <= 17').is_win.sum()
[ "Returns", "the", "#", "of", "regular", "season", "wins", "a", "team", "in", "a", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L215-L224
[ "def", "wins", "(", "self", ",", "year", ")", ":", "schedule", "=", "self", ".", "schedule", "(", "year", ")", "if", "schedule", ".", "empty", ":", "return", "np", ".", "nan", "return", "schedule", ".", "query", "(", "'week_num <= 17'", ")", ".", "is_win", ".", "sum", "(", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.schedule
Returns a DataFrame with schedule information for the given year. :year: The year for the season in question. :returns: Pandas DataFrame with schedule information.
sportsref/nfl/teams.py
def schedule(self, year): """Returns a DataFrame with schedule information for the given year. :year: The year for the season in question. :returns: Pandas DataFrame with schedule information. """ doc = self.get_year_doc(year) table = doc('table#games') df = sportsref.utils.parse_table(table) if df.empty: return pd.DataFrame() df = df.loc[df['week_num'].notnull()] df['week_num'] = np.arange(len(df)) + 1 df['is_win'] = df['game_outcome'] == 'W' df['is_loss'] = df['game_outcome'] == 'L' df['is_tie'] = df['game_outcome'] == 'T' df['is_bye'] = df['game_outcome'].isnull() df['is_ot'] = df['overtime'].notnull() return df
def schedule(self, year): """Returns a DataFrame with schedule information for the given year. :year: The year for the season in question. :returns: Pandas DataFrame with schedule information. """ doc = self.get_year_doc(year) table = doc('table#games') df = sportsref.utils.parse_table(table) if df.empty: return pd.DataFrame() df = df.loc[df['week_num'].notnull()] df['week_num'] = np.arange(len(df)) + 1 df['is_win'] = df['game_outcome'] == 'W' df['is_loss'] = df['game_outcome'] == 'L' df['is_tie'] = df['game_outcome'] == 'T' df['is_bye'] = df['game_outcome'].isnull() df['is_ot'] = df['overtime'].notnull() return df
[ "Returns", "a", "DataFrame", "with", "schedule", "information", "for", "the", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L227-L245
[ "def", "schedule", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "table", "=", "doc", "(", "'table#games'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "if", "df", ".", "empty", ":", "return", "pd", ".", "DataFrame", "(", ")", "df", "=", "df", ".", "loc", "[", "df", "[", "'week_num'", "]", ".", "notnull", "(", ")", "]", "df", "[", "'week_num'", "]", "=", "np", ".", "arange", "(", "len", "(", "df", ")", ")", "+", "1", "df", "[", "'is_win'", "]", "=", "df", "[", "'game_outcome'", "]", "==", "'W'", "df", "[", "'is_loss'", "]", "=", "df", "[", "'game_outcome'", "]", "==", "'L'", "df", "[", "'is_tie'", "]", "=", "df", "[", "'game_outcome'", "]", "==", "'T'", "df", "[", "'is_bye'", "]", "=", "df", "[", "'game_outcome'", "]", ".", "isnull", "(", ")", "df", "[", "'is_ot'", "]", "=", "df", "[", "'overtime'", "]", ".", "notnull", "(", ")", "return", "df" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.srs
Returns the SRS (Simple Rating System) for a team in a year. :year: The year for the season in question. :returns: A float of SRS.
sportsref/nfl/teams.py
def srs(self, year): """Returns the SRS (Simple Rating System) for a team in a year. :year: The year for the season in question. :returns: A float of SRS. """ try: srs_text = self._year_info_pq(year, 'SRS').text() except ValueError: return None m = re.match(r'SRS\s*?:\s*?(\S+)', srs_text) if m: return float(m.group(1)) else: return None
def srs(self, year): """Returns the SRS (Simple Rating System) for a team in a year. :year: The year for the season in question. :returns: A float of SRS. """ try: srs_text = self._year_info_pq(year, 'SRS').text() except ValueError: return None m = re.match(r'SRS\s*?:\s*?(\S+)', srs_text) if m: return float(m.group(1)) else: return None
[ "Returns", "the", "SRS", "(", "Simple", "Rating", "System", ")", "for", "a", "team", "in", "a", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L248-L262
[ "def", "srs", "(", "self", ",", "year", ")", ":", "try", ":", "srs_text", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'SRS'", ")", ".", "text", "(", ")", "except", "ValueError", ":", "return", "None", "m", "=", "re", ".", "match", "(", "r'SRS\\s*?:\\s*?(\\S+)'", ",", "srs_text", ")", "if", "m", ":", "return", "float", "(", "m", ".", "group", "(", "1", ")", ")", "else", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.sos
Returns the SOS (Strength of Schedule) for a team in a year, based on SRS. :year: The year for the season in question. :returns: A float of SOS.
sportsref/nfl/teams.py
def sos(self, year): """Returns the SOS (Strength of Schedule) for a team in a year, based on SRS. :year: The year for the season in question. :returns: A float of SOS. """ try: sos_text = self._year_info_pq(year, 'SOS').text() except ValueError: return None m = re.search(r'SOS\s*:\s*(\S+)', sos_text) if m: return float(m.group(1)) else: return None
def sos(self, year): """Returns the SOS (Strength of Schedule) for a team in a year, based on SRS. :year: The year for the season in question. :returns: A float of SOS. """ try: sos_text = self._year_info_pq(year, 'SOS').text() except ValueError: return None m = re.search(r'SOS\s*:\s*(\S+)', sos_text) if m: return float(m.group(1)) else: return None
[ "Returns", "the", "SOS", "(", "Strength", "of", "Schedule", ")", "for", "a", "team", "in", "a", "year", "based", "on", "SRS", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L265-L280
[ "def", "sos", "(", "self", ",", "year", ")", ":", "try", ":", "sos_text", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'SOS'", ")", ".", "text", "(", ")", "except", "ValueError", ":", "return", "None", "m", "=", "re", ".", "search", "(", "r'SOS\\s*:\\s*(\\S+)'", ",", "sos_text", ")", "if", "m", ":", "return", "float", "(", "m", ".", "group", "(", "1", ")", ")", "else", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.off_coordinator
Returns the coach ID for the team's OC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the OC.
sportsref/nfl/teams.py
def off_coordinator(self, year): """Returns the coach ID for the team's OC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the OC. """ try: oc_anchor = self._year_info_pq(year, 'Offensive Coordinator')('a') if oc_anchor: return oc_anchor.attr['href'] except ValueError: return None
def off_coordinator(self, year): """Returns the coach ID for the team's OC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the OC. """ try: oc_anchor = self._year_info_pq(year, 'Offensive Coordinator')('a') if oc_anchor: return oc_anchor.attr['href'] except ValueError: return None
[ "Returns", "the", "coach", "ID", "for", "the", "team", "s", "OC", "in", "a", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L283-L294
[ "def", "off_coordinator", "(", "self", ",", "year", ")", ":", "try", ":", "oc_anchor", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Offensive Coordinator'", ")", "(", "'a'", ")", "if", "oc_anchor", ":", "return", "oc_anchor", ".", "attr", "[", "'href'", "]", "except", "ValueError", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.def_coordinator
Returns the coach ID for the team's DC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the DC.
sportsref/nfl/teams.py
def def_coordinator(self, year): """Returns the coach ID for the team's DC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the DC. """ try: dc_anchor = self._year_info_pq(year, 'Defensive Coordinator')('a') if dc_anchor: return dc_anchor.attr['href'] except ValueError: return None
def def_coordinator(self, year): """Returns the coach ID for the team's DC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the DC. """ try: dc_anchor = self._year_info_pq(year, 'Defensive Coordinator')('a') if dc_anchor: return dc_anchor.attr['href'] except ValueError: return None
[ "Returns", "the", "coach", "ID", "for", "the", "team", "s", "DC", "in", "a", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L297-L308
[ "def", "def_coordinator", "(", "self", ",", "year", ")", ":", "try", ":", "dc_anchor", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Defensive Coordinator'", ")", "(", "'a'", ")", "if", "dc_anchor", ":", "return", "dc_anchor", ".", "attr", "[", "'href'", "]", "except", "ValueError", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.stadium
Returns the ID for the stadium in which the team played in a given year. :year: The year in question. :returns: A string representing the stadium ID.
sportsref/nfl/teams.py
def stadium(self, year): """Returns the ID for the stadium in which the team played in a given year. :year: The year in question. :returns: A string representing the stadium ID. """ anchor = self._year_info_pq(year, 'Stadium')('a') return sportsref.utils.rel_url_to_id(anchor.attr['href'])
def stadium(self, year): """Returns the ID for the stadium in which the team played in a given year. :year: The year in question. :returns: A string representing the stadium ID. """ anchor = self._year_info_pq(year, 'Stadium')('a') return sportsref.utils.rel_url_to_id(anchor.attr['href'])
[ "Returns", "the", "ID", "for", "the", "stadium", "in", "which", "the", "team", "played", "in", "a", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L311-L319
[ "def", "stadium", "(", "self", ",", "year", ")", ":", "anchor", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Stadium'", ")", "(", "'a'", ")", "return", "sportsref", ".", "utils", ".", "rel_url_to_id", "(", "anchor", ".", "attr", "[", "'href'", "]", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.off_scheme
Returns the name of the offensive scheme the team ran in the given year. :year: Int representing the season year. :returns: A string representing the offensive scheme.
sportsref/nfl/teams.py
def off_scheme(self, year): """Returns the name of the offensive scheme the team ran in the given year. :year: Int representing the season year. :returns: A string representing the offensive scheme. """ scheme_text = self._year_info_pq(year, 'Offensive Scheme').text() m = re.search(r'Offensive Scheme[:\s]*(.+)\s*', scheme_text, re.I) if m: return m.group(1) else: return None
def off_scheme(self, year): """Returns the name of the offensive scheme the team ran in the given year. :year: Int representing the season year. :returns: A string representing the offensive scheme. """ scheme_text = self._year_info_pq(year, 'Offensive Scheme').text() m = re.search(r'Offensive Scheme[:\s]*(.+)\s*', scheme_text, re.I) if m: return m.group(1) else: return None
[ "Returns", "the", "name", "of", "the", "offensive", "scheme", "the", "team", "ran", "in", "the", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L322-L334
[ "def", "off_scheme", "(", "self", ",", "year", ")", ":", "scheme_text", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Offensive Scheme'", ")", ".", "text", "(", ")", "m", "=", "re", ".", "search", "(", "r'Offensive Scheme[:\\s]*(.+)\\s*'", ",", "scheme_text", ",", "re", ".", "I", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "else", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.def_alignment
Returns the name of the defensive alignment the team ran in the given year. :year: Int representing the season year. :returns: A string representing the defensive alignment.
sportsref/nfl/teams.py
def def_alignment(self, year): """Returns the name of the defensive alignment the team ran in the given year. :year: Int representing the season year. :returns: A string representing the defensive alignment. """ scheme_text = self._year_info_pq(year, 'Defensive Alignment').text() m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I) if m: return m.group(1) else: return None
def def_alignment(self, year): """Returns the name of the defensive alignment the team ran in the given year. :year: Int representing the season year. :returns: A string representing the defensive alignment. """ scheme_text = self._year_info_pq(year, 'Defensive Alignment').text() m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I) if m: return m.group(1) else: return None
[ "Returns", "the", "name", "of", "the", "defensive", "alignment", "the", "team", "ran", "in", "the", "given", "year", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L337-L349
[ "def", "def_alignment", "(", "self", ",", "year", ")", ":", "scheme_text", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Defensive Alignment'", ")", ".", "text", "(", ")", "m", "=", "re", ".", "search", "(", "r'Defensive Alignment[:\\s]*(.+)\\s*'", ",", "scheme_text", ",", "re", ".", "I", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "else", ":", "return", "None" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.team_stats
Returns a Series (dict-like) of team stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats.
sportsref/nfl/teams.py
def team_stats(self, year): """Returns a Series (dict-like) of team stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) if df.empty: return pd.Series() return df.loc[df.player_id == 'Team Stats'].iloc[0]
def team_stats(self, year): """Returns a Series (dict-like) of team stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) if df.empty: return pd.Series() return df.loc[df.player_id == 'Team Stats'].iloc[0]
[ "Returns", "a", "Series", "(", "dict", "-", "like", ")", "of", "team", "stats", "from", "the", "team", "-", "season", "page", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L352-L364
[ "def", "team_stats", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "table", "=", "doc", "(", "'table#team_stats'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "if", "df", ".", "empty", ":", "return", "pd", ".", "Series", "(", ")", "return", "df", ".", "loc", "[", "df", ".", "player_id", "==", "'Team Stats'", "]", ".", "iloc", "[", "0", "]" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.opp_stats
Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats.
sportsref/nfl/teams.py
def opp_stats(self, year): """Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) return df.loc[df.player_id == 'Opp. Stats'].iloc[0]
def opp_stats(self, year): """Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) return df.loc[df.player_id == 'Opp. Stats'].iloc[0]
[ "Returns", "a", "Series", "(", "dict", "-", "like", ")", "of", "the", "team", "s", "opponent", "s", "stats", "from", "the", "team", "-", "season", "page", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L367-L377
[ "def", "opp_stats", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "table", "=", "doc", "(", "'table#team_stats'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "return", "df", ".", "loc", "[", "df", ".", "player_id", "==", "'Opp. Stats'", "]", ".", "iloc", "[", "0", "]" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
Team.off_splits
Returns a DataFrame of offensive team splits for a season. :year: int representing the season. :returns: Pandas DataFrame of split data.
sportsref/nfl/teams.py
def off_splits(self, year): """Returns a DataFrame of offensive team splits for a season. :year: int representing the season. :returns: Pandas DataFrame of split data. """ doc = self.get_year_doc('{}_splits'.format(year)) tables = doc('table.stats_table') dfs = [sportsref.utils.parse_table(table) for table in tables.items()] dfs = [ df.assign(split=df.columns[0]) .rename(columns={df.columns[0]: 'split_value'}) for df in dfs ] if not dfs: return pd.DataFrame() return pd.concat(dfs).reset_index(drop=True)
def off_splits(self, year): """Returns a DataFrame of offensive team splits for a season. :year: int representing the season. :returns: Pandas DataFrame of split data. """ doc = self.get_year_doc('{}_splits'.format(year)) tables = doc('table.stats_table') dfs = [sportsref.utils.parse_table(table) for table in tables.items()] dfs = [ df.assign(split=df.columns[0]) .rename(columns={df.columns[0]: 'split_value'}) for df in dfs ] if not dfs: return pd.DataFrame() return pd.concat(dfs).reset_index(drop=True)
[ "Returns", "a", "DataFrame", "of", "offensive", "team", "splits", "for", "a", "season", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L394-L410
[ "def", "off_splits", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "'{}_splits'", ".", "format", "(", "year", ")", ")", "tables", "=", "doc", "(", "'table.stats_table'", ")", "dfs", "=", "[", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "for", "table", "in", "tables", ".", "items", "(", ")", "]", "dfs", "=", "[", "df", ".", "assign", "(", "split", "=", "df", ".", "columns", "[", "0", "]", ")", ".", "rename", "(", "columns", "=", "{", "df", ".", "columns", "[", "0", "]", ":", "'split_value'", "}", ")", "for", "df", "in", "dfs", "]", "if", "not", "dfs", ":", "return", "pd", ".", "DataFrame", "(", ")", "return", "pd", ".", "concat", "(", "dfs", ")", ".", "reset_index", "(", "drop", "=", "True", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
get_html
Gets the HTML for the given URL using a GET request. :url: the absolute URL of the desired page. :returns: a string of HTML.
sportsref/utils.py
def get_html(url): """Gets the HTML for the given URL using a GET request. :url: the absolute URL of the desired page. :returns: a string of HTML. """ global last_request_time with throttle_process_lock: with throttle_thread_lock: # sleep until THROTTLE_DELAY secs have passed since last request wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value) if wait_left > 0: time.sleep(wait_left) # make request response = requests.get(url) # update last request time for throttling last_request_time.value = time.time() # raise ValueError on 4xx status code, get rid of comments, and return if 400 <= response.status_code < 500: raise ValueError( 'Status Code {} received fetching URL "{}"' .format(response.status_code, url) ) html = response.text html = html.replace('<!--', '').replace('-->', '') return html
def get_html(url): """Gets the HTML for the given URL using a GET request. :url: the absolute URL of the desired page. :returns: a string of HTML. """ global last_request_time with throttle_process_lock: with throttle_thread_lock: # sleep until THROTTLE_DELAY secs have passed since last request wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value) if wait_left > 0: time.sleep(wait_left) # make request response = requests.get(url) # update last request time for throttling last_request_time.value = time.time() # raise ValueError on 4xx status code, get rid of comments, and return if 400 <= response.status_code < 500: raise ValueError( 'Status Code {} received fetching URL "{}"' .format(response.status_code, url) ) html = response.text html = html.replace('<!--', '').replace('-->', '') return html
[ "Gets", "the", "HTML", "for", "the", "given", "URL", "using", "a", "GET", "request", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L26-L55
[ "def", "get_html", "(", "url", ")", ":", "global", "last_request_time", "with", "throttle_process_lock", ":", "with", "throttle_thread_lock", ":", "# sleep until THROTTLE_DELAY secs have passed since last request", "wait_left", "=", "THROTTLE_DELAY", "-", "(", "time", ".", "time", "(", ")", "-", "last_request_time", ".", "value", ")", "if", "wait_left", ">", "0", ":", "time", ".", "sleep", "(", "wait_left", ")", "# make request", "response", "=", "requests", ".", "get", "(", "url", ")", "# update last request time for throttling", "last_request_time", ".", "value", "=", "time", ".", "time", "(", ")", "# raise ValueError on 4xx status code, get rid of comments, and return", "if", "400", "<=", "response", ".", "status_code", "<", "500", ":", "raise", "ValueError", "(", "'Status Code {} received fetching URL \"{}\"'", ".", "format", "(", "response", ".", "status_code", ",", "url", ")", ")", "html", "=", "response", ".", "text", "html", "=", "html", ".", "replace", "(", "'<!--'", ",", "''", ")", ".", "replace", "(", "'-->'", ",", "''", ")", "return", "html" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
parse_table
Parses a table from sports-reference sites into a pandas dataframe. :param table: the PyQuery object representing the HTML table :param flatten: if True, flattens relative URLs to IDs. otherwise, leaves all fields as text without cleaning. :param footer: If True, returns the summary/footer of the page. Recommended to use this with flatten=False. Defaults to False. :returns: pd.DataFrame
sportsref/utils.py
def parse_table(table, flatten=True, footer=False): """Parses a table from sports-reference sites into a pandas dataframe. :param table: the PyQuery object representing the HTML table :param flatten: if True, flattens relative URLs to IDs. otherwise, leaves all fields as text without cleaning. :param footer: If True, returns the summary/footer of the page. Recommended to use this with flatten=False. Defaults to False. :returns: pd.DataFrame """ if not len(table): return pd.DataFrame() # get columns columns = [c.attrib['data-stat'] for c in table('thead tr:not([class]) th[data-stat]')] # get data rows = list(table('tbody tr' if not footer else 'tfoot tr') .not_('.thead, .stat_total, .stat_average').items()) data = [ [flatten_links(td) if flatten else td.text() for td in row.items('th,td')] for row in rows ] # make DataFrame df = pd.DataFrame(data, columns=columns, dtype='float') # add has_class columns allClasses = set( cls for row in rows if row.attr['class'] for cls in row.attr['class'].split() ) for cls in allClasses: df['has_class_' + cls] = [ bool(row.attr['class'] and cls in row.attr['class'].split()) for row in rows ] # cleaning the DataFrame df.drop(['ranker', 'Xxx', 'Yyy', 'Zzz'], axis=1, inplace=True, errors='ignore') # year_id -> year (as int) if 'year_id' in df.columns: df.rename(columns={'year_id': 'year'}, inplace=True) if flatten: df.year = df.year.fillna(method='ffill') df['year'] = df.year.map(lambda s: str(s)[:4]).astype(int) # pos -> position if 'pos' in df.columns: df.rename(columns={'pos': 'position'}, inplace=True) # boxscore_word, game_date -> boxscore_id and separate into Y, M, D columns for bs_id_col in ('boxscore_word', 'game_date', 'box_score_text'): if bs_id_col in df.columns: df.rename(columns={bs_id_col: 'boxscore_id'}, inplace=True) break # ignore *, +, and other characters used to note things df.replace(re.compile(r'[\*\+\u2605]', re.U), '', inplace=True) for col in df.columns: if hasattr(df[col], 'str'): df[col] = df[col].str.strip() # player -> player_id and/or player_name if 'player' in df.columns: if flatten: df.rename(columns={'player': 'player_id'}, inplace=True) # when flattening, keep a column for names player_names = parse_table(table, flatten=False)['player_name'] df['player_name'] = player_names else: df.rename(columns={'player': 'player_name'}, inplace=True) # team, team_name -> team_id for team_col in ('team', 'team_name'): if team_col in df.columns: # first, get rid of faulty rows df = df.loc[~df[team_col].isin(['XXX'])] if flatten: df.rename(columns={team_col: 'team_id'}, inplace=True) # season -> int if 'season' in df.columns and flatten: df['season'] = df['season'].astype(int) # handle date_game columns (different types) if 'date_game' in df.columns and flatten: date_re = r'month=(?P<month>\d+)&day=(?P<day>\d+)&year=(?P<year>\d+)' date_df = df['date_game'].str.extract(date_re, expand=True) if date_df.notnull().all(axis=1).any(): df = pd.concat((df, date_df), axis=1) else: df.rename(columns={'date_game': 'boxscore_id'}, inplace=True) # game_location -> is_home if 'game_location' in df.columns and flatten: df['game_location'] = df['game_location'].isnull() df.rename(columns={'game_location': 'is_home'}, inplace=True) # mp: (min:sec) -> float(min + sec / 60), notes -> NaN, new column if 'mp' in df.columns and df.dtypes['mp'] == object and flatten: mp_df = df['mp'].str.extract( r'(?P<m>\d+):(?P<s>\d+)', expand=True).astype(float) no_match = mp_df.isnull().all(axis=1) if no_match.any(): df.loc[no_match, 'note'] = df.loc[no_match, 'mp'] df['mp'] = mp_df['m'] + mp_df['s'] / 60 # converts number-y things to floats def convert_to_float(val): # percentages: (number%) -> float(number * 0.01) m = re.search(r'([-\.\d]+)\%', val if isinstance(val, basestring) else str(val), re.U) try: if m: return float(m.group(1)) / 100 if m else val if m: return int(m.group(1)) + int(m.group(2)) / 60 except ValueError: return val # salaries: $ABC,DEF,GHI -> float(ABCDEFGHI) m = re.search(r'\$[\d,]+', val if isinstance(val, basestring) else str(val), re.U) try: if m: return float(re.sub(r'\$|,', '', val)) except Exception: return val # generally try to coerce to float, unless it's an int or bool try: if isinstance(val, (int, bool)): return val else: return float(val) except Exception: return val if flatten: df = df.applymap(convert_to_float) df = df.loc[df.astype(bool).any(axis=1)] return df
def parse_table(table, flatten=True, footer=False): """Parses a table from sports-reference sites into a pandas dataframe. :param table: the PyQuery object representing the HTML table :param flatten: if True, flattens relative URLs to IDs. otherwise, leaves all fields as text without cleaning. :param footer: If True, returns the summary/footer of the page. Recommended to use this with flatten=False. Defaults to False. :returns: pd.DataFrame """ if not len(table): return pd.DataFrame() # get columns columns = [c.attrib['data-stat'] for c in table('thead tr:not([class]) th[data-stat]')] # get data rows = list(table('tbody tr' if not footer else 'tfoot tr') .not_('.thead, .stat_total, .stat_average').items()) data = [ [flatten_links(td) if flatten else td.text() for td in row.items('th,td')] for row in rows ] # make DataFrame df = pd.DataFrame(data, columns=columns, dtype='float') # add has_class columns allClasses = set( cls for row in rows if row.attr['class'] for cls in row.attr['class'].split() ) for cls in allClasses: df['has_class_' + cls] = [ bool(row.attr['class'] and cls in row.attr['class'].split()) for row in rows ] # cleaning the DataFrame df.drop(['ranker', 'Xxx', 'Yyy', 'Zzz'], axis=1, inplace=True, errors='ignore') # year_id -> year (as int) if 'year_id' in df.columns: df.rename(columns={'year_id': 'year'}, inplace=True) if flatten: df.year = df.year.fillna(method='ffill') df['year'] = df.year.map(lambda s: str(s)[:4]).astype(int) # pos -> position if 'pos' in df.columns: df.rename(columns={'pos': 'position'}, inplace=True) # boxscore_word, game_date -> boxscore_id and separate into Y, M, D columns for bs_id_col in ('boxscore_word', 'game_date', 'box_score_text'): if bs_id_col in df.columns: df.rename(columns={bs_id_col: 'boxscore_id'}, inplace=True) break # ignore *, +, and other characters used to note things df.replace(re.compile(r'[\*\+\u2605]', re.U), '', inplace=True) for col in df.columns: if hasattr(df[col], 'str'): df[col] = df[col].str.strip() # player -> player_id and/or player_name if 'player' in df.columns: if flatten: df.rename(columns={'player': 'player_id'}, inplace=True) # when flattening, keep a column for names player_names = parse_table(table, flatten=False)['player_name'] df['player_name'] = player_names else: df.rename(columns={'player': 'player_name'}, inplace=True) # team, team_name -> team_id for team_col in ('team', 'team_name'): if team_col in df.columns: # first, get rid of faulty rows df = df.loc[~df[team_col].isin(['XXX'])] if flatten: df.rename(columns={team_col: 'team_id'}, inplace=True) # season -> int if 'season' in df.columns and flatten: df['season'] = df['season'].astype(int) # handle date_game columns (different types) if 'date_game' in df.columns and flatten: date_re = r'month=(?P<month>\d+)&day=(?P<day>\d+)&year=(?P<year>\d+)' date_df = df['date_game'].str.extract(date_re, expand=True) if date_df.notnull().all(axis=1).any(): df = pd.concat((df, date_df), axis=1) else: df.rename(columns={'date_game': 'boxscore_id'}, inplace=True) # game_location -> is_home if 'game_location' in df.columns and flatten: df['game_location'] = df['game_location'].isnull() df.rename(columns={'game_location': 'is_home'}, inplace=True) # mp: (min:sec) -> float(min + sec / 60), notes -> NaN, new column if 'mp' in df.columns and df.dtypes['mp'] == object and flatten: mp_df = df['mp'].str.extract( r'(?P<m>\d+):(?P<s>\d+)', expand=True).astype(float) no_match = mp_df.isnull().all(axis=1) if no_match.any(): df.loc[no_match, 'note'] = df.loc[no_match, 'mp'] df['mp'] = mp_df['m'] + mp_df['s'] / 60 # converts number-y things to floats def convert_to_float(val): # percentages: (number%) -> float(number * 0.01) m = re.search(r'([-\.\d]+)\%', val if isinstance(val, basestring) else str(val), re.U) try: if m: return float(m.group(1)) / 100 if m else val if m: return int(m.group(1)) + int(m.group(2)) / 60 except ValueError: return val # salaries: $ABC,DEF,GHI -> float(ABCDEFGHI) m = re.search(r'\$[\d,]+', val if isinstance(val, basestring) else str(val), re.U) try: if m: return float(re.sub(r'\$|,', '', val)) except Exception: return val # generally try to coerce to float, unless it's an int or bool try: if isinstance(val, (int, bool)): return val else: return float(val) except Exception: return val if flatten: df = df.applymap(convert_to_float) df = df.loc[df.astype(bool).any(axis=1)] return df
[ "Parses", "a", "table", "from", "sports", "-", "reference", "sites", "into", "a", "pandas", "dataframe", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L58-L208
[ "def", "parse_table", "(", "table", ",", "flatten", "=", "True", ",", "footer", "=", "False", ")", ":", "if", "not", "len", "(", "table", ")", ":", "return", "pd", ".", "DataFrame", "(", ")", "# get columns", "columns", "=", "[", "c", ".", "attrib", "[", "'data-stat'", "]", "for", "c", "in", "table", "(", "'thead tr:not([class]) th[data-stat]'", ")", "]", "# get data", "rows", "=", "list", "(", "table", "(", "'tbody tr'", "if", "not", "footer", "else", "'tfoot tr'", ")", ".", "not_", "(", "'.thead, .stat_total, .stat_average'", ")", ".", "items", "(", ")", ")", "data", "=", "[", "[", "flatten_links", "(", "td", ")", "if", "flatten", "else", "td", ".", "text", "(", ")", "for", "td", "in", "row", ".", "items", "(", "'th,td'", ")", "]", "for", "row", "in", "rows", "]", "# make DataFrame", "df", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "columns", ",", "dtype", "=", "'float'", ")", "# add has_class columns", "allClasses", "=", "set", "(", "cls", "for", "row", "in", "rows", "if", "row", ".", "attr", "[", "'class'", "]", "for", "cls", "in", "row", ".", "attr", "[", "'class'", "]", ".", "split", "(", ")", ")", "for", "cls", "in", "allClasses", ":", "df", "[", "'has_class_'", "+", "cls", "]", "=", "[", "bool", "(", "row", ".", "attr", "[", "'class'", "]", "and", "cls", "in", "row", ".", "attr", "[", "'class'", "]", ".", "split", "(", ")", ")", "for", "row", "in", "rows", "]", "# cleaning the DataFrame", "df", ".", "drop", "(", "[", "'ranker'", ",", "'Xxx'", ",", "'Yyy'", ",", "'Zzz'", "]", ",", "axis", "=", "1", ",", "inplace", "=", "True", ",", "errors", "=", "'ignore'", ")", "# year_id -> year (as int)", "if", "'year_id'", "in", "df", ".", "columns", ":", "df", ".", "rename", "(", "columns", "=", "{", "'year_id'", ":", "'year'", "}", ",", "inplace", "=", "True", ")", "if", "flatten", ":", "df", ".", "year", "=", "df", ".", "year", ".", "fillna", "(", "method", "=", "'ffill'", ")", "df", "[", "'year'", "]", "=", "df", ".", "year", ".", "map", "(", "lambda", "s", ":", "str", "(", "s", ")", "[", ":", "4", "]", ")", ".", "astype", "(", "int", ")", "# pos -> position", "if", "'pos'", "in", "df", ".", "columns", ":", "df", ".", "rename", "(", "columns", "=", "{", "'pos'", ":", "'position'", "}", ",", "inplace", "=", "True", ")", "# boxscore_word, game_date -> boxscore_id and separate into Y, M, D columns", "for", "bs_id_col", "in", "(", "'boxscore_word'", ",", "'game_date'", ",", "'box_score_text'", ")", ":", "if", "bs_id_col", "in", "df", ".", "columns", ":", "df", ".", "rename", "(", "columns", "=", "{", "bs_id_col", ":", "'boxscore_id'", "}", ",", "inplace", "=", "True", ")", "break", "# ignore *, +, and other characters used to note things", "df", ".", "replace", "(", "re", ".", "compile", "(", "r'[\\*\\+\\u2605]'", ",", "re", ".", "U", ")", ",", "''", ",", "inplace", "=", "True", ")", "for", "col", "in", "df", ".", "columns", ":", "if", "hasattr", "(", "df", "[", "col", "]", ",", "'str'", ")", ":", "df", "[", "col", "]", "=", "df", "[", "col", "]", ".", "str", ".", "strip", "(", ")", "# player -> player_id and/or player_name", "if", "'player'", "in", "df", ".", "columns", ":", "if", "flatten", ":", "df", ".", "rename", "(", "columns", "=", "{", "'player'", ":", "'player_id'", "}", ",", "inplace", "=", "True", ")", "# when flattening, keep a column for names", "player_names", "=", "parse_table", "(", "table", ",", "flatten", "=", "False", ")", "[", "'player_name'", "]", "df", "[", "'player_name'", "]", "=", "player_names", "else", ":", "df", ".", "rename", "(", "columns", "=", "{", "'player'", ":", "'player_name'", "}", ",", "inplace", "=", "True", ")", "# team, team_name -> team_id", "for", "team_col", "in", "(", "'team'", ",", "'team_name'", ")", ":", "if", "team_col", "in", "df", ".", "columns", ":", "# first, get rid of faulty rows", "df", "=", "df", ".", "loc", "[", "~", "df", "[", "team_col", "]", ".", "isin", "(", "[", "'XXX'", "]", ")", "]", "if", "flatten", ":", "df", ".", "rename", "(", "columns", "=", "{", "team_col", ":", "'team_id'", "}", ",", "inplace", "=", "True", ")", "# season -> int", "if", "'season'", "in", "df", ".", "columns", "and", "flatten", ":", "df", "[", "'season'", "]", "=", "df", "[", "'season'", "]", ".", "astype", "(", "int", ")", "# handle date_game columns (different types)", "if", "'date_game'", "in", "df", ".", "columns", "and", "flatten", ":", "date_re", "=", "r'month=(?P<month>\\d+)&day=(?P<day>\\d+)&year=(?P<year>\\d+)'", "date_df", "=", "df", "[", "'date_game'", "]", ".", "str", ".", "extract", "(", "date_re", ",", "expand", "=", "True", ")", "if", "date_df", ".", "notnull", "(", ")", ".", "all", "(", "axis", "=", "1", ")", ".", "any", "(", ")", ":", "df", "=", "pd", ".", "concat", "(", "(", "df", ",", "date_df", ")", ",", "axis", "=", "1", ")", "else", ":", "df", ".", "rename", "(", "columns", "=", "{", "'date_game'", ":", "'boxscore_id'", "}", ",", "inplace", "=", "True", ")", "# game_location -> is_home", "if", "'game_location'", "in", "df", ".", "columns", "and", "flatten", ":", "df", "[", "'game_location'", "]", "=", "df", "[", "'game_location'", "]", ".", "isnull", "(", ")", "df", ".", "rename", "(", "columns", "=", "{", "'game_location'", ":", "'is_home'", "}", ",", "inplace", "=", "True", ")", "# mp: (min:sec) -> float(min + sec / 60), notes -> NaN, new column", "if", "'mp'", "in", "df", ".", "columns", "and", "df", ".", "dtypes", "[", "'mp'", "]", "==", "object", "and", "flatten", ":", "mp_df", "=", "df", "[", "'mp'", "]", ".", "str", ".", "extract", "(", "r'(?P<m>\\d+):(?P<s>\\d+)'", ",", "expand", "=", "True", ")", ".", "astype", "(", "float", ")", "no_match", "=", "mp_df", ".", "isnull", "(", ")", ".", "all", "(", "axis", "=", "1", ")", "if", "no_match", ".", "any", "(", ")", ":", "df", ".", "loc", "[", "no_match", ",", "'note'", "]", "=", "df", ".", "loc", "[", "no_match", ",", "'mp'", "]", "df", "[", "'mp'", "]", "=", "mp_df", "[", "'m'", "]", "+", "mp_df", "[", "'s'", "]", "/", "60", "# converts number-y things to floats", "def", "convert_to_float", "(", "val", ")", ":", "# percentages: (number%) -> float(number * 0.01)", "m", "=", "re", ".", "search", "(", "r'([-\\.\\d]+)\\%'", ",", "val", "if", "isinstance", "(", "val", ",", "basestring", ")", "else", "str", "(", "val", ")", ",", "re", ".", "U", ")", "try", ":", "if", "m", ":", "return", "float", "(", "m", ".", "group", "(", "1", ")", ")", "/", "100", "if", "m", "else", "val", "if", "m", ":", "return", "int", "(", "m", ".", "group", "(", "1", ")", ")", "+", "int", "(", "m", ".", "group", "(", "2", ")", ")", "/", "60", "except", "ValueError", ":", "return", "val", "# salaries: $ABC,DEF,GHI -> float(ABCDEFGHI)", "m", "=", "re", ".", "search", "(", "r'\\$[\\d,]+'", ",", "val", "if", "isinstance", "(", "val", ",", "basestring", ")", "else", "str", "(", "val", ")", ",", "re", ".", "U", ")", "try", ":", "if", "m", ":", "return", "float", "(", "re", ".", "sub", "(", "r'\\$|,'", ",", "''", ",", "val", ")", ")", "except", "Exception", ":", "return", "val", "# generally try to coerce to float, unless it's an int or bool", "try", ":", "if", "isinstance", "(", "val", ",", "(", "int", ",", "bool", ")", ")", ":", "return", "val", "else", ":", "return", "float", "(", "val", ")", "except", "Exception", ":", "return", "val", "if", "flatten", ":", "df", "=", "df", ".", "applymap", "(", "convert_to_float", ")", "df", "=", "df", ".", "loc", "[", "df", ".", "astype", "(", "bool", ")", ".", "any", "(", "axis", "=", "1", ")", "]", "return", "df" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
parse_info_table
Parses an info table, like the "Game Info" table or the "Officials" table on the PFR Boxscore page. Keys are lower case and have spaces/special characters converted to underscores. :table: PyQuery object representing the HTML table. :returns: A dictionary representing the information.
sportsref/utils.py
def parse_info_table(table): """Parses an info table, like the "Game Info" table or the "Officials" table on the PFR Boxscore page. Keys are lower case and have spaces/special characters converted to underscores. :table: PyQuery object representing the HTML table. :returns: A dictionary representing the information. """ ret = {} for tr in list(table('tr').not_('.thead').items()): th, td = list(tr('th, td').items()) key = th.text().lower() key = re.sub(r'\W', '_', key) val = sportsref.utils.flatten_links(td) ret[key] = val return ret
def parse_info_table(table): """Parses an info table, like the "Game Info" table or the "Officials" table on the PFR Boxscore page. Keys are lower case and have spaces/special characters converted to underscores. :table: PyQuery object representing the HTML table. :returns: A dictionary representing the information. """ ret = {} for tr in list(table('tr').not_('.thead').items()): th, td = list(tr('th, td').items()) key = th.text().lower() key = re.sub(r'\W', '_', key) val = sportsref.utils.flatten_links(td) ret[key] = val return ret
[ "Parses", "an", "info", "table", "like", "the", "Game", "Info", "table", "or", "the", "Officials", "table", "on", "the", "PFR", "Boxscore", "page", ".", "Keys", "are", "lower", "case", "and", "have", "spaces", "/", "special", "characters", "converted", "to", "underscores", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L211-L226
[ "def", "parse_info_table", "(", "table", ")", ":", "ret", "=", "{", "}", "for", "tr", "in", "list", "(", "table", "(", "'tr'", ")", ".", "not_", "(", "'.thead'", ")", ".", "items", "(", ")", ")", ":", "th", ",", "td", "=", "list", "(", "tr", "(", "'th, td'", ")", ".", "items", "(", ")", ")", "key", "=", "th", ".", "text", "(", ")", ".", "lower", "(", ")", "key", "=", "re", ".", "sub", "(", "r'\\W'", ",", "'_'", ",", "key", ")", "val", "=", "sportsref", ".", "utils", ".", "flatten_links", "(", "td", ")", "ret", "[", "key", "]", "=", "val", "return", "ret" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
flatten_links
Flattens relative URLs within text of a table cell to IDs and returns the result. :td: the PyQuery object for the HTML to convert :returns: the string with the links flattened to IDs
sportsref/utils.py
def flatten_links(td, _recurse=False): """Flattens relative URLs within text of a table cell to IDs and returns the result. :td: the PyQuery object for the HTML to convert :returns: the string with the links flattened to IDs """ # helper function to flatten individual strings/links def _flatten_node(c): if isinstance(c, basestring): return c.strip() elif 'href' in c.attrib: c_id = rel_url_to_id(c.attrib['href']) return c_id if c_id else c.text_content().strip() else: return flatten_links(pq(c), _recurse=True) # if there's no text, just return None if td is None or not td.text(): return '' if _recurse else None td.remove('span.note') return ''.join(_flatten_node(c) for c in td.contents())
def flatten_links(td, _recurse=False): """Flattens relative URLs within text of a table cell to IDs and returns the result. :td: the PyQuery object for the HTML to convert :returns: the string with the links flattened to IDs """ # helper function to flatten individual strings/links def _flatten_node(c): if isinstance(c, basestring): return c.strip() elif 'href' in c.attrib: c_id = rel_url_to_id(c.attrib['href']) return c_id if c_id else c.text_content().strip() else: return flatten_links(pq(c), _recurse=True) # if there's no text, just return None if td is None or not td.text(): return '' if _recurse else None td.remove('span.note') return ''.join(_flatten_node(c) for c in td.contents())
[ "Flattens", "relative", "URLs", "within", "text", "of", "a", "table", "cell", "to", "IDs", "and", "returns", "the", "result", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L238-L261
[ "def", "flatten_links", "(", "td", ",", "_recurse", "=", "False", ")", ":", "# helper function to flatten individual strings/links", "def", "_flatten_node", "(", "c", ")", ":", "if", "isinstance", "(", "c", ",", "basestring", ")", ":", "return", "c", ".", "strip", "(", ")", "elif", "'href'", "in", "c", ".", "attrib", ":", "c_id", "=", "rel_url_to_id", "(", "c", ".", "attrib", "[", "'href'", "]", ")", "return", "c_id", "if", "c_id", "else", "c", ".", "text_content", "(", ")", ".", "strip", "(", ")", "else", ":", "return", "flatten_links", "(", "pq", "(", "c", ")", ",", "_recurse", "=", "True", ")", "# if there's no text, just return None", "if", "td", "is", "None", "or", "not", "td", ".", "text", "(", ")", ":", "return", "''", "if", "_recurse", "else", "None", "td", ".", "remove", "(", "'span.note'", ")", "return", "''", ".", "join", "(", "_flatten_node", "(", "c", ")", "for", "c", "in", "td", ".", "contents", "(", ")", ")" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
rel_url_to_id
Converts a relative URL to a unique ID. Here, 'ID' refers generally to the unique ID for a given 'type' that a given datum has. For example, 'BradTo00' is Tom Brady's player ID - this corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly, '201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14. Supported types: * player/... * boxscores/... * teams/... * years/... * leagues/... * awards/... * coaches/... * officials/... * schools/... * schools/high_schools.cgi?id=... :returns: ID associated with the given relative URL.
sportsref/utils.py
def rel_url_to_id(url): """Converts a relative URL to a unique ID. Here, 'ID' refers generally to the unique ID for a given 'type' that a given datum has. For example, 'BradTo00' is Tom Brady's player ID - this corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly, '201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14. Supported types: * player/... * boxscores/... * teams/... * years/... * leagues/... * awards/... * coaches/... * officials/... * schools/... * schools/high_schools.cgi?id=... :returns: ID associated with the given relative URL. """ yearRegex = r'.*/years/(\d{4}).*|.*/gamelog/(\d{4}).*' playerRegex = r'.*/players/(?:\w/)?(.+?)(?:/|\.html?)' boxscoresRegex = r'.*/boxscores/(.+?)\.html?' teamRegex = r'.*/teams/(\w{3})/.*' coachRegex = r'.*/coaches/(.+?)\.html?' stadiumRegex = r'.*/stadiums/(.+?)\.html?' refRegex = r'.*/officials/(.+?r)\.html?' collegeRegex = r'.*/schools/(\S+?)/.*|.*college=([^&]+)' hsRegex = r'.*/schools/high_schools\.cgi\?id=([^\&]{8})' bsDateRegex = r'.*/boxscores/index\.f?cgi\?(month=\d+&day=\d+&year=\d+)' leagueRegex = r'.*/leagues/(.*_\d{4}).*' awardRegex = r'.*/awards/(.+)\.htm' regexes = [ yearRegex, playerRegex, boxscoresRegex, teamRegex, coachRegex, stadiumRegex, refRegex, collegeRegex, hsRegex, bsDateRegex, leagueRegex, awardRegex, ] for regex in regexes: match = re.match(regex, url, re.I) if match: return [_f for _f in match.groups() if _f][0] # things we don't want to match but don't want to print a WARNING if any( url.startswith(s) for s in ( '/play-index/', ) ): return url print('WARNING. NO MATCH WAS FOUND FOR "{}"'.format(url)) return url
def rel_url_to_id(url): """Converts a relative URL to a unique ID. Here, 'ID' refers generally to the unique ID for a given 'type' that a given datum has. For example, 'BradTo00' is Tom Brady's player ID - this corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly, '201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14. Supported types: * player/... * boxscores/... * teams/... * years/... * leagues/... * awards/... * coaches/... * officials/... * schools/... * schools/high_schools.cgi?id=... :returns: ID associated with the given relative URL. """ yearRegex = r'.*/years/(\d{4}).*|.*/gamelog/(\d{4}).*' playerRegex = r'.*/players/(?:\w/)?(.+?)(?:/|\.html?)' boxscoresRegex = r'.*/boxscores/(.+?)\.html?' teamRegex = r'.*/teams/(\w{3})/.*' coachRegex = r'.*/coaches/(.+?)\.html?' stadiumRegex = r'.*/stadiums/(.+?)\.html?' refRegex = r'.*/officials/(.+?r)\.html?' collegeRegex = r'.*/schools/(\S+?)/.*|.*college=([^&]+)' hsRegex = r'.*/schools/high_schools\.cgi\?id=([^\&]{8})' bsDateRegex = r'.*/boxscores/index\.f?cgi\?(month=\d+&day=\d+&year=\d+)' leagueRegex = r'.*/leagues/(.*_\d{4}).*' awardRegex = r'.*/awards/(.+)\.htm' regexes = [ yearRegex, playerRegex, boxscoresRegex, teamRegex, coachRegex, stadiumRegex, refRegex, collegeRegex, hsRegex, bsDateRegex, leagueRegex, awardRegex, ] for regex in regexes: match = re.match(regex, url, re.I) if match: return [_f for _f in match.groups() if _f][0] # things we don't want to match but don't want to print a WARNING if any( url.startswith(s) for s in ( '/play-index/', ) ): return url print('WARNING. NO MATCH WAS FOUND FOR "{}"'.format(url)) return url
[ "Converts", "a", "relative", "URL", "to", "a", "unique", "ID", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L265-L330
[ "def", "rel_url_to_id", "(", "url", ")", ":", "yearRegex", "=", "r'.*/years/(\\d{4}).*|.*/gamelog/(\\d{4}).*'", "playerRegex", "=", "r'.*/players/(?:\\w/)?(.+?)(?:/|\\.html?)'", "boxscoresRegex", "=", "r'.*/boxscores/(.+?)\\.html?'", "teamRegex", "=", "r'.*/teams/(\\w{3})/.*'", "coachRegex", "=", "r'.*/coaches/(.+?)\\.html?'", "stadiumRegex", "=", "r'.*/stadiums/(.+?)\\.html?'", "refRegex", "=", "r'.*/officials/(.+?r)\\.html?'", "collegeRegex", "=", "r'.*/schools/(\\S+?)/.*|.*college=([^&]+)'", "hsRegex", "=", "r'.*/schools/high_schools\\.cgi\\?id=([^\\&]{8})'", "bsDateRegex", "=", "r'.*/boxscores/index\\.f?cgi\\?(month=\\d+&day=\\d+&year=\\d+)'", "leagueRegex", "=", "r'.*/leagues/(.*_\\d{4}).*'", "awardRegex", "=", "r'.*/awards/(.+)\\.htm'", "regexes", "=", "[", "yearRegex", ",", "playerRegex", ",", "boxscoresRegex", ",", "teamRegex", ",", "coachRegex", ",", "stadiumRegex", ",", "refRegex", ",", "collegeRegex", ",", "hsRegex", ",", "bsDateRegex", ",", "leagueRegex", ",", "awardRegex", ",", "]", "for", "regex", "in", "regexes", ":", "match", "=", "re", ".", "match", "(", "regex", ",", "url", ",", "re", ".", "I", ")", "if", "match", ":", "return", "[", "_f", "for", "_f", "in", "match", ".", "groups", "(", ")", "if", "_f", "]", "[", "0", "]", "# things we don't want to match but don't want to print a WARNING", "if", "any", "(", "url", ".", "startswith", "(", "s", ")", "for", "s", "in", "(", "'/play-index/'", ",", ")", ")", ":", "return", "url", "print", "(", "'WARNING. NO MATCH WAS FOUND FOR \"{}\"'", ".", "format", "(", "url", ")", ")", "return", "url" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
PlayerSeasonFinder
Docstring will be filled in by __init__.py
sportsref/nfl/finders/PSF.py
def PlayerSeasonFinder(**kwargs): """ Docstring will be filled in by __init__.py """ if 'offset' not in kwargs: kwargs['offset'] = 0 playerSeasons = [] while True: querystring = _kwargs_to_qs(**kwargs) url = '{}?{}'.format(PSF_URL, querystring) if kwargs.get('verbose', False): print(url) html = utils.get_html(url) doc = pq(html) table = doc('table#results') df = utils.parse_table(table) if df.empty: break thisSeason = list(zip(df.player_id, df.year)) playerSeasons.extend(thisSeason) if doc('*:contains("Next Page")'): kwargs['offset'] += 100 else: break return playerSeasons
def PlayerSeasonFinder(**kwargs): """ Docstring will be filled in by __init__.py """ if 'offset' not in kwargs: kwargs['offset'] = 0 playerSeasons = [] while True: querystring = _kwargs_to_qs(**kwargs) url = '{}?{}'.format(PSF_URL, querystring) if kwargs.get('verbose', False): print(url) html = utils.get_html(url) doc = pq(html) table = doc('table#results') df = utils.parse_table(table) if df.empty: break thisSeason = list(zip(df.player_id, df.year)) playerSeasons.extend(thisSeason) if doc('*:contains("Next Page")'): kwargs['offset'] += 100 else: break return playerSeasons
[ "Docstring", "will", "be", "filled", "in", "by", "__init__", ".", "py" ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/finders/PSF.py#L22-L49
[ "def", "PlayerSeasonFinder", "(", "*", "*", "kwargs", ")", ":", "if", "'offset'", "not", "in", "kwargs", ":", "kwargs", "[", "'offset'", "]", "=", "0", "playerSeasons", "=", "[", "]", "while", "True", ":", "querystring", "=", "_kwargs_to_qs", "(", "*", "*", "kwargs", ")", "url", "=", "'{}?{}'", ".", "format", "(", "PSF_URL", ",", "querystring", ")", "if", "kwargs", ".", "get", "(", "'verbose'", ",", "False", ")", ":", "print", "(", "url", ")", "html", "=", "utils", ".", "get_html", "(", "url", ")", "doc", "=", "pq", "(", "html", ")", "table", "=", "doc", "(", "'table#results'", ")", "df", "=", "utils", ".", "parse_table", "(", "table", ")", "if", "df", ".", "empty", ":", "break", "thisSeason", "=", "list", "(", "zip", "(", "df", ".", "player_id", ",", "df", ".", "year", ")", ")", "playerSeasons", ".", "extend", "(", "thisSeason", ")", "if", "doc", "(", "'*:contains(\"Next Page\")'", ")", ":", "kwargs", "[", "'offset'", "]", "+=", "100", "else", ":", "break", "return", "playerSeasons" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
_kwargs_to_qs
Converts kwargs given to PSF to a querystring. :returns: the querystring.
sportsref/nfl/finders/PSF.py
def _kwargs_to_qs(**kwargs): """Converts kwargs given to PSF to a querystring. :returns: the querystring. """ # start with defaults inpOptDef = inputs_options_defaults() opts = { name: dct['value'] for name, dct in inpOptDef.items() } # clean up keys and values for k, v in kwargs.items(): del kwargs[k] # bool => 'Y'|'N' if isinstance(v, bool): kwargs[k] = 'Y' if v else 'N' # tm, team => team_id elif k.lower() in ('tm', 'team'): kwargs['team_id'] = v # yr, year, yrs, years => year_min, year_max elif k.lower() in ('yr', 'year', 'yrs', 'years'): if isinstance(v, collections.Iterable): lst = list(v) kwargs['year_min'] = min(lst) kwargs['year_max'] = max(lst) elif isinstance(v, basestring): v = list(map(int, v.split(','))) kwargs['year_min'] = min(v) kwargs['year_max'] = max(v) else: kwargs['year_min'] = v kwargs['year_max'] = v # pos, position, positions => pos[] elif k.lower() in ('pos', 'position', 'positions'): if isinstance(v, basestring): v = v.split(',') elif not isinstance(v, collections.Iterable): v = [v] kwargs['pos[]'] = v # draft_pos, ... => draft_pos[] elif k.lower() in ( 'draft_pos', 'draftpos', 'draftposition', 'draftpositions', 'draft_position', 'draft_positions' ): if isinstance(v, basestring): v = v.split(',') elif not isinstance(v, collections.Iterable): v = [v] kwargs['draft_pos[]'] = v # if not one of these cases, put it back in kwargs else: kwargs[k] = v # update based on kwargs for k, v in kwargs.items(): # if overwriting a default, overwrite it (with a list so the # opts -> querystring list comp works) if k in opts or k in ('pos[]', 'draft_pos[]'): # if multiple values separated by commas, split em if isinstance(v, basestring): v = v.split(',') # otherwise, make sure it's a list elif not isinstance(v, collections.Iterable): v = [v] # then, add list of values to the querystring dict *opts* opts[k] = v if 'draft' in k: opts['draft'] = [1] opts['request'] = [1] opts['offset'] = [kwargs.get('offset', 0)] qs = '&'.join( '{}={}'.format(urllib.parse.quote_plus(name), val) for name, vals in sorted(opts.items()) for val in vals ) return qs
def _kwargs_to_qs(**kwargs): """Converts kwargs given to PSF to a querystring. :returns: the querystring. """ # start with defaults inpOptDef = inputs_options_defaults() opts = { name: dct['value'] for name, dct in inpOptDef.items() } # clean up keys and values for k, v in kwargs.items(): del kwargs[k] # bool => 'Y'|'N' if isinstance(v, bool): kwargs[k] = 'Y' if v else 'N' # tm, team => team_id elif k.lower() in ('tm', 'team'): kwargs['team_id'] = v # yr, year, yrs, years => year_min, year_max elif k.lower() in ('yr', 'year', 'yrs', 'years'): if isinstance(v, collections.Iterable): lst = list(v) kwargs['year_min'] = min(lst) kwargs['year_max'] = max(lst) elif isinstance(v, basestring): v = list(map(int, v.split(','))) kwargs['year_min'] = min(v) kwargs['year_max'] = max(v) else: kwargs['year_min'] = v kwargs['year_max'] = v # pos, position, positions => pos[] elif k.lower() in ('pos', 'position', 'positions'): if isinstance(v, basestring): v = v.split(',') elif not isinstance(v, collections.Iterable): v = [v] kwargs['pos[]'] = v # draft_pos, ... => draft_pos[] elif k.lower() in ( 'draft_pos', 'draftpos', 'draftposition', 'draftpositions', 'draft_position', 'draft_positions' ): if isinstance(v, basestring): v = v.split(',') elif not isinstance(v, collections.Iterable): v = [v] kwargs['draft_pos[]'] = v # if not one of these cases, put it back in kwargs else: kwargs[k] = v # update based on kwargs for k, v in kwargs.items(): # if overwriting a default, overwrite it (with a list so the # opts -> querystring list comp works) if k in opts or k in ('pos[]', 'draft_pos[]'): # if multiple values separated by commas, split em if isinstance(v, basestring): v = v.split(',') # otherwise, make sure it's a list elif not isinstance(v, collections.Iterable): v = [v] # then, add list of values to the querystring dict *opts* opts[k] = v if 'draft' in k: opts['draft'] = [1] opts['request'] = [1] opts['offset'] = [kwargs.get('offset', 0)] qs = '&'.join( '{}={}'.format(urllib.parse.quote_plus(name), val) for name, vals in sorted(opts.items()) for val in vals ) return qs
[ "Converts", "kwargs", "given", "to", "PSF", "to", "a", "querystring", "." ]
mdgoldberg/sportsref
python
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/finders/PSF.py#L52-L131
[ "def", "_kwargs_to_qs", "(", "*", "*", "kwargs", ")", ":", "# start with defaults", "inpOptDef", "=", "inputs_options_defaults", "(", ")", "opts", "=", "{", "name", ":", "dct", "[", "'value'", "]", "for", "name", ",", "dct", "in", "inpOptDef", ".", "items", "(", ")", "}", "# clean up keys and values", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "del", "kwargs", "[", "k", "]", "# bool => 'Y'|'N'", "if", "isinstance", "(", "v", ",", "bool", ")", ":", "kwargs", "[", "k", "]", "=", "'Y'", "if", "v", "else", "'N'", "# tm, team => team_id", "elif", "k", ".", "lower", "(", ")", "in", "(", "'tm'", ",", "'team'", ")", ":", "kwargs", "[", "'team_id'", "]", "=", "v", "# yr, year, yrs, years => year_min, year_max", "elif", "k", ".", "lower", "(", ")", "in", "(", "'yr'", ",", "'year'", ",", "'yrs'", ",", "'years'", ")", ":", "if", "isinstance", "(", "v", ",", "collections", ".", "Iterable", ")", ":", "lst", "=", "list", "(", "v", ")", "kwargs", "[", "'year_min'", "]", "=", "min", "(", "lst", ")", "kwargs", "[", "'year_max'", "]", "=", "max", "(", "lst", ")", "elif", "isinstance", "(", "v", ",", "basestring", ")", ":", "v", "=", "list", "(", "map", "(", "int", ",", "v", ".", "split", "(", "','", ")", ")", ")", "kwargs", "[", "'year_min'", "]", "=", "min", "(", "v", ")", "kwargs", "[", "'year_max'", "]", "=", "max", "(", "v", ")", "else", ":", "kwargs", "[", "'year_min'", "]", "=", "v", "kwargs", "[", "'year_max'", "]", "=", "v", "# pos, position, positions => pos[]", "elif", "k", ".", "lower", "(", ")", "in", "(", "'pos'", ",", "'position'", ",", "'positions'", ")", ":", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "v", "=", "v", ".", "split", "(", "','", ")", "elif", "not", "isinstance", "(", "v", ",", "collections", ".", "Iterable", ")", ":", "v", "=", "[", "v", "]", "kwargs", "[", "'pos[]'", "]", "=", "v", "# draft_pos, ... => draft_pos[]", "elif", "k", ".", "lower", "(", ")", "in", "(", "'draft_pos'", ",", "'draftpos'", ",", "'draftposition'", ",", "'draftpositions'", ",", "'draft_position'", ",", "'draft_positions'", ")", ":", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "v", "=", "v", ".", "split", "(", "','", ")", "elif", "not", "isinstance", "(", "v", ",", "collections", ".", "Iterable", ")", ":", "v", "=", "[", "v", "]", "kwargs", "[", "'draft_pos[]'", "]", "=", "v", "# if not one of these cases, put it back in kwargs", "else", ":", "kwargs", "[", "k", "]", "=", "v", "# update based on kwargs", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "# if overwriting a default, overwrite it (with a list so the", "# opts -> querystring list comp works)", "if", "k", "in", "opts", "or", "k", "in", "(", "'pos[]'", ",", "'draft_pos[]'", ")", ":", "# if multiple values separated by commas, split em", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "v", "=", "v", ".", "split", "(", "','", ")", "# otherwise, make sure it's a list", "elif", "not", "isinstance", "(", "v", ",", "collections", ".", "Iterable", ")", ":", "v", "=", "[", "v", "]", "# then, add list of values to the querystring dict *opts*", "opts", "[", "k", "]", "=", "v", "if", "'draft'", "in", "k", ":", "opts", "[", "'draft'", "]", "=", "[", "1", "]", "opts", "[", "'request'", "]", "=", "[", "1", "]", "opts", "[", "'offset'", "]", "=", "[", "kwargs", ".", "get", "(", "'offset'", ",", "0", ")", "]", "qs", "=", "'&'", ".", "join", "(", "'{}={}'", ".", "format", "(", "urllib", ".", "parse", ".", "quote_plus", "(", "name", ")", ",", "val", ")", "for", "name", ",", "vals", "in", "sorted", "(", "opts", ".", "items", "(", ")", ")", "for", "val", "in", "vals", ")", "return", "qs" ]
09f11ac856a23c96d666d1d510bb35d6f050b5c3
test
_Streamer__read_process
Main function for the processes that read from the HDF5 file. :param self: A reference to the streamer object that created these processes. :param path: The HDF5 path to the node to be read from. :param read_size: The length of the block along the outer dimension to read. :param cbuf: The circular buffer to place read elements into. :param stop: The Event that signals the process to stop reading. :param barrier: The Barrier that synchonises read cycles. :param cyclic: True if the process should read cyclically. :param offset: Offset into the dataset that this process should start reading at. :param read_skip: How many element to skip on each iteration. :param sync: GuardSynchonizer to order writes to the buffer. :return: Nothing
multitables.py
def _Streamer__read_process(self, path, read_size, cbuf, stop, barrier, cyclic, offset, read_skip, sync): """ Main function for the processes that read from the HDF5 file. :param self: A reference to the streamer object that created these processes. :param path: The HDF5 path to the node to be read from. :param read_size: The length of the block along the outer dimension to read. :param cbuf: The circular buffer to place read elements into. :param stop: The Event that signals the process to stop reading. :param barrier: The Barrier that synchonises read cycles. :param cyclic: True if the process should read cyclically. :param offset: Offset into the dataset that this process should start reading at. :param read_skip: How many element to skip on each iteration. :param sync: GuardSynchonizer to order writes to the buffer. :return: Nothing """ # Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables. import tables as tb h5_file = tb.open_file(self.filename, 'r', **self.h5_kw_args) ary = h5_file.get_node(path) i = offset while not stop.is_set(): vals = ary[i:i + read_size] # If the read goes off the end of the dataset, then wrap to the start. if i + read_size > len(ary): vals = np.concatenate([vals, ary[0:read_size - len(vals)]]) if sync is None: # If no ordering is requested, then just write to the next available space in the buffer. with cbuf.put_direct() as put_ary: put_ary[:] = vals else: # Otherwise, use the sync object to ensure that writes occur in the order provided by i. # So i = 0 will write first, then i = block_size, then i = 2*block_size, etc... # The sync object has two ordered barriers so that acquisition and release of the buffer spaces # are synchronized in order, but the actual writing to the buffer can happen simultaneously. # If only one barrier were used, writing to the buffer would be linearised. with sync.do(cbuf.put_direct(), i, (i+read_size) % len(ary)) as put_ary: put_ary[:] = vals i += read_skip if cyclic: # If the next iteration is past the end of the dataset, wrap it around. if i >= len(ary): i %= len(ary) barrier.wait() else: # But if cyclic mode is disabled, break the loop as the work is now done. if i + read_size > len(ary): break
def _Streamer__read_process(self, path, read_size, cbuf, stop, barrier, cyclic, offset, read_skip, sync): """ Main function for the processes that read from the HDF5 file. :param self: A reference to the streamer object that created these processes. :param path: The HDF5 path to the node to be read from. :param read_size: The length of the block along the outer dimension to read. :param cbuf: The circular buffer to place read elements into. :param stop: The Event that signals the process to stop reading. :param barrier: The Barrier that synchonises read cycles. :param cyclic: True if the process should read cyclically. :param offset: Offset into the dataset that this process should start reading at. :param read_skip: How many element to skip on each iteration. :param sync: GuardSynchonizer to order writes to the buffer. :return: Nothing """ # Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables. import tables as tb h5_file = tb.open_file(self.filename, 'r', **self.h5_kw_args) ary = h5_file.get_node(path) i = offset while not stop.is_set(): vals = ary[i:i + read_size] # If the read goes off the end of the dataset, then wrap to the start. if i + read_size > len(ary): vals = np.concatenate([vals, ary[0:read_size - len(vals)]]) if sync is None: # If no ordering is requested, then just write to the next available space in the buffer. with cbuf.put_direct() as put_ary: put_ary[:] = vals else: # Otherwise, use the sync object to ensure that writes occur in the order provided by i. # So i = 0 will write first, then i = block_size, then i = 2*block_size, etc... # The sync object has two ordered barriers so that acquisition and release of the buffer spaces # are synchronized in order, but the actual writing to the buffer can happen simultaneously. # If only one barrier were used, writing to the buffer would be linearised. with sync.do(cbuf.put_direct(), i, (i+read_size) % len(ary)) as put_ary: put_ary[:] = vals i += read_skip if cyclic: # If the next iteration is past the end of the dataset, wrap it around. if i >= len(ary): i %= len(ary) barrier.wait() else: # But if cyclic mode is disabled, break the loop as the work is now done. if i + read_size > len(ary): break
[ "Main", "function", "for", "the", "processes", "that", "read", "from", "the", "HDF5", "file", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L351-L401
[ "def", "_Streamer__read_process", "(", "self", ",", "path", ",", "read_size", ",", "cbuf", ",", "stop", ",", "barrier", ",", "cyclic", ",", "offset", ",", "read_skip", ",", "sync", ")", ":", "# Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables.", "import", "tables", "as", "tb", "h5_file", "=", "tb", ".", "open_file", "(", "self", ".", "filename", ",", "'r'", ",", "*", "*", "self", ".", "h5_kw_args", ")", "ary", "=", "h5_file", ".", "get_node", "(", "path", ")", "i", "=", "offset", "while", "not", "stop", ".", "is_set", "(", ")", ":", "vals", "=", "ary", "[", "i", ":", "i", "+", "read_size", "]", "# If the read goes off the end of the dataset, then wrap to the start.", "if", "i", "+", "read_size", ">", "len", "(", "ary", ")", ":", "vals", "=", "np", ".", "concatenate", "(", "[", "vals", ",", "ary", "[", "0", ":", "read_size", "-", "len", "(", "vals", ")", "]", "]", ")", "if", "sync", "is", "None", ":", "# If no ordering is requested, then just write to the next available space in the buffer.", "with", "cbuf", ".", "put_direct", "(", ")", "as", "put_ary", ":", "put_ary", "[", ":", "]", "=", "vals", "else", ":", "# Otherwise, use the sync object to ensure that writes occur in the order provided by i.", "# So i = 0 will write first, then i = block_size, then i = 2*block_size, etc...", "# The sync object has two ordered barriers so that acquisition and release of the buffer spaces", "# are synchronized in order, but the actual writing to the buffer can happen simultaneously.", "# If only one barrier were used, writing to the buffer would be linearised.", "with", "sync", ".", "do", "(", "cbuf", ".", "put_direct", "(", ")", ",", "i", ",", "(", "i", "+", "read_size", ")", "%", "len", "(", "ary", ")", ")", "as", "put_ary", ":", "put_ary", "[", ":", "]", "=", "vals", "i", "+=", "read_skip", "if", "cyclic", ":", "# If the next iteration is past the end of the dataset, wrap it around.", "if", "i", ">=", "len", "(", "ary", ")", ":", "i", "%=", "len", "(", "ary", ")", "barrier", ".", "wait", "(", ")", "else", ":", "# But if cyclic mode is disabled, break the loop as the work is now done.", "if", "i", "+", "read_size", ">", "len", "(", "ary", ")", ":", "break" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
BarrierImpl.wait
Wait until all processes have reached the barrier.
multitables.py
def wait(self): """Wait until all processes have reached the barrier.""" with self.cvar: self.count.value += 1 self.cvar.notify_all() while self.count.value < self.n_procs: self.cvar.wait()
def wait(self): """Wait until all processes have reached the barrier.""" with self.cvar: self.count.value += 1 self.cvar.notify_all() while self.count.value < self.n_procs: self.cvar.wait()
[ "Wait", "until", "all", "processes", "have", "reached", "the", "barrier", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L32-L38
[ "def", "wait", "(", "self", ")", ":", "with", "self", ".", "cvar", ":", "self", ".", "count", ".", "value", "+=", "1", "self", ".", "cvar", ".", "notify_all", "(", ")", "while", "self", ".", "count", ".", "value", "<", "self", ".", "n_procs", ":", "self", ".", "cvar", ".", "wait", "(", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
Barrier.wait
Wait until all processes have reached the barrier.
multitables.py
def wait(self): """Wait until all processes have reached the barrier.""" self.barrier_A.wait() # The current barrier (barrier_A) is switched with the reserve barrier. # This is because the current barrier cannot be safely reset until the reserve barrier has been passed. self.barrier_A, self.barrier_B = self.barrier_B, self.barrier_A self.barrier_A.reset()
def wait(self): """Wait until all processes have reached the barrier.""" self.barrier_A.wait() # The current barrier (barrier_A) is switched with the reserve barrier. # This is because the current barrier cannot be safely reset until the reserve barrier has been passed. self.barrier_A, self.barrier_B = self.barrier_B, self.barrier_A self.barrier_A.reset()
[ "Wait", "until", "all", "processes", "have", "reached", "the", "barrier", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L57-L63
[ "def", "wait", "(", "self", ")", ":", "self", ".", "barrier_A", ".", "wait", "(", ")", "# The current barrier (barrier_A) is switched with the reserve barrier.", "# This is because the current barrier cannot be safely reset until the reserve barrier has been passed.", "self", ".", "barrier_A", ",", "self", ".", "barrier_B", "=", "self", ".", "barrier_B", ",", "self", ".", "barrier_A", "self", ".", "barrier_A", ".", "reset", "(", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
OrderedBarrier.wait
Block until it is the turn indicated by index. :param index: :param next_index: Set the index to this value after finishing. Releases the process waiting on next_index. Defaults to incrementing index by 1. :return:
multitables.py
def wait(self, index, next_index=None): """ Block until it is the turn indicated by index. :param index: :param next_index: Set the index to this value after finishing. Releases the process waiting on next_index. Defaults to incrementing index by 1. :return: """ return OrderedBarrier.Guard(self, index, index+1 if next_index is None else next_index)
def wait(self, index, next_index=None): """ Block until it is the turn indicated by index. :param index: :param next_index: Set the index to this value after finishing. Releases the process waiting on next_index. Defaults to incrementing index by 1. :return: """ return OrderedBarrier.Guard(self, index, index+1 if next_index is None else next_index)
[ "Block", "until", "it", "is", "the", "turn", "indicated", "by", "index", ".", ":", "param", "index", ":", ":", "param", "next_index", ":", "Set", "the", "index", "to", "this", "value", "after", "finishing", ".", "Releases", "the", "process", "waiting", "on", "next_index", ".", "Defaults", "to", "incrementing", "index", "by", "1", ".", ":", "return", ":" ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L107-L115
[ "def", "wait", "(", "self", ",", "index", ",", "next_index", "=", "None", ")", ":", "return", "OrderedBarrier", ".", "Guard", "(", "self", ",", "index", ",", "index", "+", "1", "if", "next_index", "is", "None", "else", "next_index", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
GuardSynchronizer.do
Create a guard that requires the resource guard to be entered and exited based on the order provided by index. :param guard: The context manager for the resource. :param index: The order to wait for. :param next_index: The next index to release. :return:
multitables.py
def do(self, guard, index, next_index): """ Create a guard that requires the resource guard to be entered and exited based on the order provided by index. :param guard: The context manager for the resource. :param index: The order to wait for. :param next_index: The next index to release. :return: """ return GuardSynchronizer.Guard(self, guard, index, next_index)
def do(self, guard, index, next_index): """ Create a guard that requires the resource guard to be entered and exited based on the order provided by index. :param guard: The context manager for the resource. :param index: The order to wait for. :param next_index: The next index to release. :return: """ return GuardSynchronizer.Guard(self, guard, index, next_index)
[ "Create", "a", "guard", "that", "requires", "the", "resource", "guard", "to", "be", "entered", "and", "exited", "based", "on", "the", "order", "provided", "by", "index", ".", ":", "param", "guard", ":", "The", "context", "manager", "for", "the", "resource", ".", ":", "param", "index", ":", "The", "order", "to", "wait", "for", ".", ":", "param", "next_index", ":", "The", "next", "index", "to", "release", ".", ":", "return", ":" ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L151-L159
[ "def", "do", "(", "self", ",", "guard", ",", "index", ",", "next_index", ")", ":", "return", "GuardSynchronizer", ".", "Guard", "(", "self", ",", "guard", ",", "index", ",", "next_index", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
SafeQueue.put
Put an unsigned integer into the queue. This method always assumes that there is space in the queue. ( In the circular buffer, this is guaranteed by the implementation ) :param v: The item to insert. Must be >= 0, as -2 is used to signal a queue close. :return:
multitables.py
def put(self, v): """ Put an unsigned integer into the queue. This method always assumes that there is space in the queue. ( In the circular buffer, this is guaranteed by the implementation ) :param v: The item to insert. Must be >= 0, as -2 is used to signal a queue close. :return: """ if v is QueueClosed: v = -2 else: assert(v >= 0) with self.cvar: assert(self.size.value < len(self.vals)) head = (self.tail.value + self.size.value) % len(self.vals) self.vals[head] = v self.size.value += 1 self.cvar.notify()
def put(self, v): """ Put an unsigned integer into the queue. This method always assumes that there is space in the queue. ( In the circular buffer, this is guaranteed by the implementation ) :param v: The item to insert. Must be >= 0, as -2 is used to signal a queue close. :return: """ if v is QueueClosed: v = -2 else: assert(v >= 0) with self.cvar: assert(self.size.value < len(self.vals)) head = (self.tail.value + self.size.value) % len(self.vals) self.vals[head] = v self.size.value += 1 self.cvar.notify()
[ "Put", "an", "unsigned", "integer", "into", "the", "queue", ".", "This", "method", "always", "assumes", "that", "there", "is", "space", "in", "the", "queue", ".", "(", "In", "the", "circular", "buffer", "this", "is", "guaranteed", "by", "the", "implementation", ")", ":", "param", "v", ":", "The", "item", "to", "insert", ".", "Must", "be", ">", "=", "0", "as", "-", "2", "is", "used", "to", "signal", "a", "queue", "close", ".", ":", "return", ":" ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L183-L200
[ "def", "put", "(", "self", ",", "v", ")", ":", "if", "v", "is", "QueueClosed", ":", "v", "=", "-", "2", "else", ":", "assert", "(", "v", ">=", "0", ")", "with", "self", ".", "cvar", ":", "assert", "(", "self", ".", "size", ".", "value", "<", "len", "(", "self", ".", "vals", ")", ")", "head", "=", "(", "self", ".", "tail", ".", "value", "+", "self", ".", "size", ".", "value", ")", "%", "len", "(", "self", ".", "vals", ")", "self", ".", "vals", "[", "head", "]", "=", "v", "self", ".", "size", ".", "value", "+=", "1", "self", ".", "cvar", ".", "notify", "(", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
SafeQueue.get
Fetch the next item in the queue. Blocks until an item is ready. :return: The next unsigned integer in the queue.
multitables.py
def get(self): """ Fetch the next item in the queue. Blocks until an item is ready. :return: The next unsigned integer in the queue. """ with self.cvar: while True: if self.size.value > 0: rval = self.vals[self.tail.value] self.tail.value = (self.tail.value + 1) % len(self.vals) self.size.value -= 1 if rval == -2: return QueueClosed assert(rval >= 0) return rval self.cvar.wait()
def get(self): """ Fetch the next item in the queue. Blocks until an item is ready. :return: The next unsigned integer in the queue. """ with self.cvar: while True: if self.size.value > 0: rval = self.vals[self.tail.value] self.tail.value = (self.tail.value + 1) % len(self.vals) self.size.value -= 1 if rval == -2: return QueueClosed assert(rval >= 0) return rval self.cvar.wait()
[ "Fetch", "the", "next", "item", "in", "the", "queue", ".", "Blocks", "until", "an", "item", "is", "ready", ".", ":", "return", ":", "The", "next", "unsigned", "integer", "in", "the", "queue", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L202-L217
[ "def", "get", "(", "self", ")", ":", "with", "self", ".", "cvar", ":", "while", "True", ":", "if", "self", ".", "size", ".", "value", ">", "0", ":", "rval", "=", "self", ".", "vals", "[", "self", ".", "tail", ".", "value", "]", "self", ".", "tail", ".", "value", "=", "(", "self", ".", "tail", ".", "value", "+", "1", ")", "%", "len", "(", "self", ".", "vals", ")", "self", ".", "size", ".", "value", "-=", "1", "if", "rval", "==", "-", "2", ":", "return", "QueueClosed", "assert", "(", "rval", ">=", "0", ")", "return", "rval", "self", ".", "cvar", ".", "wait", "(", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
SharedCircBuf.put_direct
Allows direct access to the buffer element. Blocks until there is room to write into the buffer. :return: A guard object that returns the buffer element.
multitables.py
def put_direct(self): """ Allows direct access to the buffer element. Blocks until there is room to write into the buffer. :return: A guard object that returns the buffer element. """ # Once the guard is released, write_idx will be placed into read_queue. return self.Guard(self.read_queue, self.arys, self.__put_idx)
def put_direct(self): """ Allows direct access to the buffer element. Blocks until there is room to write into the buffer. :return: A guard object that returns the buffer element. """ # Once the guard is released, write_idx will be placed into read_queue. return self.Guard(self.read_queue, self.arys, self.__put_idx)
[ "Allows", "direct", "access", "to", "the", "buffer", "element", ".", "Blocks", "until", "there", "is", "room", "to", "write", "into", "the", "buffer", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L298-L307
[ "def", "put_direct", "(", "self", ")", ":", "# Once the guard is released, write_idx will be placed into read_queue.", "return", "self", ".", "Guard", "(", "self", ".", "read_queue", ",", "self", ".", "arys", ",", "self", ".", "__put_idx", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
SharedCircBuf.get_direct
Allows direct access to the buffer element. Blocks until there is data that can be read. :return: A guard object that returns the buffer element.
multitables.py
def get_direct(self): """ Allows direct access to the buffer element. Blocks until there is data that can be read. :return: A guard object that returns the buffer element. """ read_idx = self.__get_idx() if read_idx is QueueClosed: return QueueClosed # Once the guard is released, read_idx will be placed into write_queue. return self.Guard(self.write_queue, self.arys, lambda: read_idx)
def get_direct(self): """ Allows direct access to the buffer element. Blocks until there is data that can be read. :return: A guard object that returns the buffer element. """ read_idx = self.__get_idx() if read_idx is QueueClosed: return QueueClosed # Once the guard is released, read_idx will be placed into write_queue. return self.Guard(self.write_queue, self.arys, lambda: read_idx)
[ "Allows", "direct", "access", "to", "the", "buffer", "element", ".", "Blocks", "until", "there", "is", "data", "that", "can", "be", "read", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L329-L343
[ "def", "get_direct", "(", "self", ")", ":", "read_idx", "=", "self", ".", "__get_idx", "(", ")", "if", "read_idx", "is", "QueueClosed", ":", "return", "QueueClosed", "# Once the guard is released, read_idx will be placed into write_queue.", "return", "self", ".", "Guard", "(", "self", ".", "write_queue", ",", "self", ".", "arys", ",", "lambda", ":", "read_idx", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
SharedCircBuf.close
Close the queue, signalling that no more data can be put into the queue.
multitables.py
def close(self): """Close the queue, signalling that no more data can be put into the queue.""" self.read_queue.put(QueueClosed) self.write_queue.put(QueueClosed)
def close(self): """Close the queue, signalling that no more data can be put into the queue.""" self.read_queue.put(QueueClosed) self.write_queue.put(QueueClosed)
[ "Close", "the", "queue", "signalling", "that", "no", "more", "data", "can", "be", "put", "into", "the", "queue", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L345-L348
[ "def", "close", "(", "self", ")", ":", "self", ".", "read_queue", ".", "put", "(", "QueueClosed", ")", "self", ".", "write_queue", ".", "put", "(", "QueueClosed", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
Streamer.__get_batch
Get a block of data from the node at path. :param path: The path to the node to read from. :param length: The length along the outer dimension to read. :param last: True if the remainder elements should be read. :return: A copy of the requested block of data as a numpy array.
multitables.py
def __get_batch(self, path, length, last=False): """ Get a block of data from the node at path. :param path: The path to the node to read from. :param length: The length along the outer dimension to read. :param last: True if the remainder elements should be read. :return: A copy of the requested block of data as a numpy array. """ import tables h5_file = tables.open_file(self.filename, 'r') h5_node = h5_file.get_node(path) if len(h5_node) == 0: raise Exception("Cannot read from empty dataset.") # If the length isn't specified, then fall back to default values. if length is None: chunkshape = h5_node.chunkshape # If the array isn't chunked, then try to make the block close to 128KB. if chunkshape is None: default_length = 128*2**10//h5_node[0].nbytes # Divides by one row of the dataset. length = min(h5_node.shape[0], default_length) # If it is chunked, then use the chunkshape for best performance. else: length = chunkshape[0] if last: example = h5_node[length*(len(h5_node)//length):].copy() else: example = h5_node[:length].copy() h5_file.close() return example
def __get_batch(self, path, length, last=False): """ Get a block of data from the node at path. :param path: The path to the node to read from. :param length: The length along the outer dimension to read. :param last: True if the remainder elements should be read. :return: A copy of the requested block of data as a numpy array. """ import tables h5_file = tables.open_file(self.filename, 'r') h5_node = h5_file.get_node(path) if len(h5_node) == 0: raise Exception("Cannot read from empty dataset.") # If the length isn't specified, then fall back to default values. if length is None: chunkshape = h5_node.chunkshape # If the array isn't chunked, then try to make the block close to 128KB. if chunkshape is None: default_length = 128*2**10//h5_node[0].nbytes # Divides by one row of the dataset. length = min(h5_node.shape[0], default_length) # If it is chunked, then use the chunkshape for best performance. else: length = chunkshape[0] if last: example = h5_node[length*(len(h5_node)//length):].copy() else: example = h5_node[:length].copy() h5_file.close() return example
[ "Get", "a", "block", "of", "data", "from", "the", "node", "at", "path", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L418-L451
[ "def", "__get_batch", "(", "self", ",", "path", ",", "length", ",", "last", "=", "False", ")", ":", "import", "tables", "h5_file", "=", "tables", ".", "open_file", "(", "self", ".", "filename", ",", "'r'", ")", "h5_node", "=", "h5_file", ".", "get_node", "(", "path", ")", "if", "len", "(", "h5_node", ")", "==", "0", ":", "raise", "Exception", "(", "\"Cannot read from empty dataset.\"", ")", "# If the length isn't specified, then fall back to default values.", "if", "length", "is", "None", ":", "chunkshape", "=", "h5_node", ".", "chunkshape", "# If the array isn't chunked, then try to make the block close to 128KB.", "if", "chunkshape", "is", "None", ":", "default_length", "=", "128", "*", "2", "**", "10", "//", "h5_node", "[", "0", "]", ".", "nbytes", "# Divides by one row of the dataset.", "length", "=", "min", "(", "h5_node", ".", "shape", "[", "0", "]", ",", "default_length", ")", "# If it is chunked, then use the chunkshape for best performance.", "else", ":", "length", "=", "chunkshape", "[", "0", "]", "if", "last", ":", "example", "=", "h5_node", "[", "length", "*", "(", "len", "(", "h5_node", ")", "//", "length", ")", ":", "]", ".", "copy", "(", ")", "else", ":", "example", "=", "h5_node", "[", ":", "length", "]", ".", "copy", "(", ")", "h5_file", ".", "close", "(", ")", "return", "example" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
Streamer.get_remainder
Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode. :param path: The HDF5 path to the dataset to be read. :param block_size: The block size is used to calculate which elements will remain. :return: A copy of the remainder elements as a numpy array.
multitables.py
def get_remainder(self, path, block_size): """ Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode. :param path: The HDF5 path to the dataset to be read. :param block_size: The block size is used to calculate which elements will remain. :return: A copy of the remainder elements as a numpy array. """ return self.__get_batch(path, length=block_size, last=True)
def get_remainder(self, path, block_size): """ Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode. :param path: The HDF5 path to the dataset to be read. :param block_size: The block size is used to calculate which elements will remain. :return: A copy of the remainder elements as a numpy array. """ return self.__get_batch(path, length=block_size, last=True)
[ "Get", "the", "remainder", "elements", ".", "These", "elements", "will", "not", "be", "read", "in", "the", "direct", "queue", "access", "cyclic", "=", "False", "mode", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L453-L461
[ "def", "get_remainder", "(", "self", ",", "path", ",", "block_size", ")", ":", "return", "self", ".", "__get_batch", "(", "path", ",", "length", "=", "block_size", ",", "last", "=", "True", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
Streamer.get_queue
Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps around the end and includes element from the beginning of the dataset. By default, blocks are returned in the order in which they become available. The ordered option will force blocks to be returned in on-disk order. :param path: The HDF5 path to the dataset that should be read. :param n_procs: The number of background processes used to read the datset in parallel. :param read_ahead: The number of blocks to allocate in the internal buffer. :param cyclic: True if the queue should wrap at the end of the dataset. :param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of the chunk size, or to a 128KB sized block if the dataset is not chunked. :param ordered: Force the reader return data in on-disk order. May result in performance penalty. :return: A queue object that allows access to the internal buffer.
multitables.py
def get_queue(self, path, n_procs=4, read_ahead=None, cyclic=False, block_size=None, ordered=False): """ Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps around the end and includes element from the beginning of the dataset. By default, blocks are returned in the order in which they become available. The ordered option will force blocks to be returned in on-disk order. :param path: The HDF5 path to the dataset that should be read. :param n_procs: The number of background processes used to read the datset in parallel. :param read_ahead: The number of blocks to allocate in the internal buffer. :param cyclic: True if the queue should wrap at the end of the dataset. :param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of the chunk size, or to a 128KB sized block if the dataset is not chunked. :param ordered: Force the reader return data in on-disk order. May result in performance penalty. :return: A queue object that allows access to the internal buffer. """ # Get a block_size length of elements from the dataset to serve as a template for creating the buffer. # If block_size=None, then get_batch calculates an appropriate block size. example = self.__get_batch(path, block_size) block_size = example.shape[0] if read_ahead is None: # 2x No. of processes for writing, 1 extra for reading. read_ahead = 2*n_procs + 1 cbuf = SharedCircBuf(read_ahead, example) stop = multiprocessing.Event() barrier = Barrier(n_procs) # If ordering has been requested, create a synchronizer. sync = GuardSynchronizer() if ordered else None procs = [] for i in range(n_procs): # Each process is offset in the dataset by i*block_size # The skip length is set to n_procs*block_size so that no block is read by 2 processes. process = multiprocessing.Process(target=_Streamer__read_process, args=( self, path, block_size, cbuf, stop, barrier, cyclic, i * block_size, n_procs * block_size, sync )) process.daemon = True process.start() procs.append(process) # If the queue is not cyclic, then the cessation of reading data needs to be monitored. if not cyclic: # This closure defines a background thread that waits until all processes have finished. # At this point, all data from the dataset has been read, and the buffer is closed. def monitor(): for p in procs: p.join() cbuf.close() monitor_thread = threading.Thread(target=monitor) monitor_thread.daemon = True monitor_thread.start() return Streamer.Queue(cbuf, stop, block_size)
def get_queue(self, path, n_procs=4, read_ahead=None, cyclic=False, block_size=None, ordered=False): """ Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps around the end and includes element from the beginning of the dataset. By default, blocks are returned in the order in which they become available. The ordered option will force blocks to be returned in on-disk order. :param path: The HDF5 path to the dataset that should be read. :param n_procs: The number of background processes used to read the datset in parallel. :param read_ahead: The number of blocks to allocate in the internal buffer. :param cyclic: True if the queue should wrap at the end of the dataset. :param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of the chunk size, or to a 128KB sized block if the dataset is not chunked. :param ordered: Force the reader return data in on-disk order. May result in performance penalty. :return: A queue object that allows access to the internal buffer. """ # Get a block_size length of elements from the dataset to serve as a template for creating the buffer. # If block_size=None, then get_batch calculates an appropriate block size. example = self.__get_batch(path, block_size) block_size = example.shape[0] if read_ahead is None: # 2x No. of processes for writing, 1 extra for reading. read_ahead = 2*n_procs + 1 cbuf = SharedCircBuf(read_ahead, example) stop = multiprocessing.Event() barrier = Barrier(n_procs) # If ordering has been requested, create a synchronizer. sync = GuardSynchronizer() if ordered else None procs = [] for i in range(n_procs): # Each process is offset in the dataset by i*block_size # The skip length is set to n_procs*block_size so that no block is read by 2 processes. process = multiprocessing.Process(target=_Streamer__read_process, args=( self, path, block_size, cbuf, stop, barrier, cyclic, i * block_size, n_procs * block_size, sync )) process.daemon = True process.start() procs.append(process) # If the queue is not cyclic, then the cessation of reading data needs to be monitored. if not cyclic: # This closure defines a background thread that waits until all processes have finished. # At this point, all data from the dataset has been read, and the buffer is closed. def monitor(): for p in procs: p.join() cbuf.close() monitor_thread = threading.Thread(target=monitor) monitor_thread.daemon = True monitor_thread.start() return Streamer.Queue(cbuf, stop, block_size)
[ "Get", "a", "queue", "that", "allows", "direct", "access", "to", "the", "internal", "buffer", ".", "If", "the", "dataset", "to", "be", "read", "is", "chunked", "the", "block_size", "should", "be", "a", "multiple", "of", "the", "chunk", "size", "to", "maximise", "performance", ".", "In", "this", "case", "it", "is", "best", "to", "leave", "it", "to", "the", "default", ".", "When", "cyclic", "=", "False", "and", "block_size", "does", "not", "divide", "the", "dataset", "evenly", "the", "remainder", "elements", "will", "not", "be", "returned", "by", "the", "queue", ".", "When", "cyclic", "=", "True", "the", "remainder", "elements", "will", "be", "part", "of", "a", "block", "that", "wraps", "around", "the", "end", "and", "includes", "element", "from", "the", "beginning", "of", "the", "dataset", ".", "By", "default", "blocks", "are", "returned", "in", "the", "order", "in", "which", "they", "become", "available", ".", "The", "ordered", "option", "will", "force", "blocks", "to", "be", "returned", "in", "on", "-", "disk", "order", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L508-L568
[ "def", "get_queue", "(", "self", ",", "path", ",", "n_procs", "=", "4", ",", "read_ahead", "=", "None", ",", "cyclic", "=", "False", ",", "block_size", "=", "None", ",", "ordered", "=", "False", ")", ":", "# Get a block_size length of elements from the dataset to serve as a template for creating the buffer.", "# If block_size=None, then get_batch calculates an appropriate block size.", "example", "=", "self", ".", "__get_batch", "(", "path", ",", "block_size", ")", "block_size", "=", "example", ".", "shape", "[", "0", "]", "if", "read_ahead", "is", "None", ":", "# 2x No. of processes for writing, 1 extra for reading.", "read_ahead", "=", "2", "*", "n_procs", "+", "1", "cbuf", "=", "SharedCircBuf", "(", "read_ahead", ",", "example", ")", "stop", "=", "multiprocessing", ".", "Event", "(", ")", "barrier", "=", "Barrier", "(", "n_procs", ")", "# If ordering has been requested, create a synchronizer.", "sync", "=", "GuardSynchronizer", "(", ")", "if", "ordered", "else", "None", "procs", "=", "[", "]", "for", "i", "in", "range", "(", "n_procs", ")", ":", "# Each process is offset in the dataset by i*block_size", "# The skip length is set to n_procs*block_size so that no block is read by 2 processes.", "process", "=", "multiprocessing", ".", "Process", "(", "target", "=", "_Streamer__read_process", ",", "args", "=", "(", "self", ",", "path", ",", "block_size", ",", "cbuf", ",", "stop", ",", "barrier", ",", "cyclic", ",", "i", "*", "block_size", ",", "n_procs", "*", "block_size", ",", "sync", ")", ")", "process", ".", "daemon", "=", "True", "process", ".", "start", "(", ")", "procs", ".", "append", "(", "process", ")", "# If the queue is not cyclic, then the cessation of reading data needs to be monitored.", "if", "not", "cyclic", ":", "# This closure defines a background thread that waits until all processes have finished.", "# At this point, all data from the dataset has been read, and the buffer is closed.", "def", "monitor", "(", ")", ":", "for", "p", "in", "procs", ":", "p", ".", "join", "(", ")", "cbuf", ".", "close", "(", ")", "monitor_thread", "=", "threading", ".", "Thread", "(", "target", "=", "monitor", ")", "monitor_thread", ".", "daemon", "=", "True", "monitor_thread", ".", "start", "(", ")", "return", "Streamer", ".", "Queue", "(", "cbuf", ",", "stop", ",", "block_size", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
Streamer.get_generator
Get a generator that allows convenient access to the streamed data. Elements from the dataset are returned from the generator one row at a time. Unlike the direct access queue, this generator also returns the remainder elements. Additional arguments are forwarded to get_queue. See the get_queue method for documentation of these parameters. :param path: :return: A generator that iterates over the rows in the dataset.
multitables.py
def get_generator(self, path, *args, **kw_args): """ Get a generator that allows convenient access to the streamed data. Elements from the dataset are returned from the generator one row at a time. Unlike the direct access queue, this generator also returns the remainder elements. Additional arguments are forwarded to get_queue. See the get_queue method for documentation of these parameters. :param path: :return: A generator that iterates over the rows in the dataset. """ q = self.get_queue(path=path, *args, **kw_args) try: # This generator just implements a standard access pattern for the direct access queue. for guard in q.iter(): with guard as batch: batch_copy = batch.copy() for row in batch_copy: yield row last_batch = self.get_remainder(path, q.block_size) for row in last_batch: yield row finally: q.close()
def get_generator(self, path, *args, **kw_args): """ Get a generator that allows convenient access to the streamed data. Elements from the dataset are returned from the generator one row at a time. Unlike the direct access queue, this generator also returns the remainder elements. Additional arguments are forwarded to get_queue. See the get_queue method for documentation of these parameters. :param path: :return: A generator that iterates over the rows in the dataset. """ q = self.get_queue(path=path, *args, **kw_args) try: # This generator just implements a standard access pattern for the direct access queue. for guard in q.iter(): with guard as batch: batch_copy = batch.copy() for row in batch_copy: yield row last_batch = self.get_remainder(path, q.block_size) for row in last_batch: yield row finally: q.close()
[ "Get", "a", "generator", "that", "allows", "convenient", "access", "to", "the", "streamed", "data", ".", "Elements", "from", "the", "dataset", "are", "returned", "from", "the", "generator", "one", "row", "at", "a", "time", ".", "Unlike", "the", "direct", "access", "queue", "this", "generator", "also", "returns", "the", "remainder", "elements", ".", "Additional", "arguments", "are", "forwarded", "to", "get_queue", ".", "See", "the", "get_queue", "method", "for", "documentation", "of", "these", "parameters", "." ]
ghcollin/multitables
python
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L570-L597
[ "def", "get_generator", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kw_args", ")", ":", "q", "=", "self", ".", "get_queue", "(", "path", "=", "path", ",", "*", "args", ",", "*", "*", "kw_args", ")", "try", ":", "# This generator just implements a standard access pattern for the direct access queue.", "for", "guard", "in", "q", ".", "iter", "(", ")", ":", "with", "guard", "as", "batch", ":", "batch_copy", "=", "batch", ".", "copy", "(", ")", "for", "row", "in", "batch_copy", ":", "yield", "row", "last_batch", "=", "self", ".", "get_remainder", "(", "path", ",", "q", ".", "block_size", ")", "for", "row", "in", "last_batch", ":", "yield", "row", "finally", ":", "q", ".", "close", "(", ")" ]
9654a45800289a20e66d2b0e0666149f0d370f93
test
parse
Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream.
stream/stream.py
def parse(ifp, pb_cls, **kwargs): """Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream. """ mode = 'rb' if isinstance(ifp, str): istream = open(ifp, mode=mode, **kwargs) else: istream = open(fileobj=ifp, mode=mode, **kwargs) with istream: for data in istream: pb_obj = pb_cls() pb_obj.ParseFromString(data) yield pb_obj
def parse(ifp, pb_cls, **kwargs): """Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream. """ mode = 'rb' if isinstance(ifp, str): istream = open(ifp, mode=mode, **kwargs) else: istream = open(fileobj=ifp, mode=mode, **kwargs) with istream: for data in istream: pb_obj = pb_cls() pb_obj.ParseFromString(data) yield pb_obj
[ "Parse", "a", "stream", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L19-L36
[ "def", "parse", "(", "ifp", ",", "pb_cls", ",", "*", "*", "kwargs", ")", ":", "mode", "=", "'rb'", "if", "isinstance", "(", "ifp", ",", "str", ")", ":", "istream", "=", "open", "(", "ifp", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "istream", "=", "open", "(", "fileobj", "=", "ifp", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "with", "istream", ":", "for", "data", "in", "istream", ":", "pb_obj", "=", "pb_cls", "(", ")", "pb_obj", ".", "ParseFromString", "(", "data", ")", "yield", "pb_obj" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
dump
Write to a stream. Args: ofp (string or file-like object): output stream. pb_objs (*protobuf.message.Message): list of protobuf message objects to be written.
stream/stream.py
def dump(ofp, *pb_objs, **kwargs): """Write to a stream. Args: ofp (string or file-like object): output stream. pb_objs (*protobuf.message.Message): list of protobuf message objects to be written. """ mode = 'wb' if isinstance(ofp, str): ostream = open(ofp, mode=mode, **kwargs) else: ostream = open(fileobj=ofp, mode=mode, **kwargs) with ostream: ostream.write(*pb_objs)
def dump(ofp, *pb_objs, **kwargs): """Write to a stream. Args: ofp (string or file-like object): output stream. pb_objs (*protobuf.message.Message): list of protobuf message objects to be written. """ mode = 'wb' if isinstance(ofp, str): ostream = open(ofp, mode=mode, **kwargs) else: ostream = open(fileobj=ofp, mode=mode, **kwargs) with ostream: ostream.write(*pb_objs)
[ "Write", "to", "a", "stream", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L39-L53
[ "def", "dump", "(", "ofp", ",", "*", "pb_objs", ",", "*", "*", "kwargs", ")", ":", "mode", "=", "'wb'", "if", "isinstance", "(", "ofp", ",", "str", ")", ":", "ostream", "=", "open", "(", "ofp", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "else", ":", "ostream", "=", "open", "(", "fileobj", "=", "ofp", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "with", "ostream", ":", "ostream", ".", "write", "(", "*", "pb_objs", ")" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Stream._read_varint
Read a varint from file, parse it, and return the decoded integer.
stream/stream.py
def _read_varint(self): """Read a varint from file, parse it, and return the decoded integer. """ buff = self._fd.read(1) if buff == b'': return 0 while (bytearray(buff)[-1] & 0x80) >> 7 == 1: # while the MSB is 1 new_byte = self._fd.read(1) if new_byte == b'': raise EOFError('unexpected EOF.') buff += new_byte varint, _ = decodeVarint(buff, 0) return varint
def _read_varint(self): """Read a varint from file, parse it, and return the decoded integer. """ buff = self._fd.read(1) if buff == b'': return 0 while (bytearray(buff)[-1] & 0x80) >> 7 == 1: # while the MSB is 1 new_byte = self._fd.read(1) if new_byte == b'': raise EOFError('unexpected EOF.') buff += new_byte varint, _ = decodeVarint(buff, 0) return varint
[ "Read", "a", "varint", "from", "file", "parse", "it", "and", "return", "the", "decoded", "integer", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L148-L163
[ "def", "_read_varint", "(", "self", ")", ":", "buff", "=", "self", ".", "_fd", ".", "read", "(", "1", ")", "if", "buff", "==", "b''", ":", "return", "0", "while", "(", "bytearray", "(", "buff", ")", "[", "-", "1", "]", "&", "0x80", ")", ">>", "7", "==", "1", ":", "# while the MSB is 1", "new_byte", "=", "self", ".", "_fd", ".", "read", "(", "1", ")", "if", "new_byte", "==", "b''", ":", "raise", "EOFError", "(", "'unexpected EOF.'", ")", "buff", "+=", "new_byte", "varint", ",", "_", "=", "decodeVarint", "(", "buff", ",", "0", ")", "return", "varint" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Stream._get_objs
A generator yielding all protobuf object data in the file. It is the main parser of the stream encoding.
stream/stream.py
def _get_objs(self): """A generator yielding all protobuf object data in the file. It is the main parser of the stream encoding. """ while True: count = self._read_varint() if count == 0: break # Read a group containing `count` number of objects. for _ in range(count): size = self._read_varint() if size == 0: raise EOFError('unexpected EOF.') # Read an object from the object group. yield self._fd.read(size) if self._group_delim: yield self._delimiter() if self._delimiter is not None \ else None
def _get_objs(self): """A generator yielding all protobuf object data in the file. It is the main parser of the stream encoding. """ while True: count = self._read_varint() if count == 0: break # Read a group containing `count` number of objects. for _ in range(count): size = self._read_varint() if size == 0: raise EOFError('unexpected EOF.') # Read an object from the object group. yield self._fd.read(size) if self._group_delim: yield self._delimiter() if self._delimiter is not None \ else None
[ "A", "generator", "yielding", "all", "protobuf", "object", "data", "in", "the", "file", ".", "It", "is", "the", "main", "parser", "of", "the", "stream", "encoding", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L165-L183
[ "def", "_get_objs", "(", "self", ")", ":", "while", "True", ":", "count", "=", "self", ".", "_read_varint", "(", ")", "if", "count", "==", "0", ":", "break", "# Read a group containing `count` number of objects.", "for", "_", "in", "range", "(", "count", ")", ":", "size", "=", "self", ".", "_read_varint", "(", ")", "if", "size", "==", "0", ":", "raise", "EOFError", "(", "'unexpected EOF.'", ")", "# Read an object from the object group.", "yield", "self", ".", "_fd", ".", "read", "(", "size", ")", "if", "self", ".", "_group_delim", ":", "yield", "self", ".", "_delimiter", "(", ")", "if", "self", ".", "_delimiter", "is", "not", "None", "else", "None" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Stream.close
Close the stream.
stream/stream.py
def close(self): """Close the stream.""" self.flush() if self._myfd is not None: self._myfd.close() self._myfd = None
def close(self): """Close the stream.""" self.flush() if self._myfd is not None: self._myfd.close() self._myfd = None
[ "Close", "the", "stream", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L191-L196
[ "def", "close", "(", "self", ")", ":", "self", ".", "flush", "(", ")", "if", "self", ".", "_myfd", "is", "not", "None", ":", "self", ".", "_myfd", ".", "close", "(", ")", "self", ".", "_myfd", "=", "None" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Stream.write
Write a group of one or more protobuf objects to the file. Multiple object groups can be written by calling this method several times before closing stream or exiting the runtime context. The input protobuf objects get buffered and will be written down when the number of buffered objects exceed the `self._buffer_size`. Args: pb2_obj (*protobuf.message.Message): list of protobuf messages.
stream/stream.py
def write(self, *pb2_obj): """Write a group of one or more protobuf objects to the file. Multiple object groups can be written by calling this method several times before closing stream or exiting the runtime context. The input protobuf objects get buffered and will be written down when the number of buffered objects exceed the `self._buffer_size`. Args: pb2_obj (*protobuf.message.Message): list of protobuf messages. """ base = len(self._write_buff) for idx, obj in enumerate(pb2_obj): if self._buffer_size > 0 and \ (idx + base) != 0 and \ (idx + base) % self._buffer_size == 0: self.flush() self._write_buff.append(obj) if self._buffer_size == 0: self.flush()
def write(self, *pb2_obj): """Write a group of one or more protobuf objects to the file. Multiple object groups can be written by calling this method several times before closing stream or exiting the runtime context. The input protobuf objects get buffered and will be written down when the number of buffered objects exceed the `self._buffer_size`. Args: pb2_obj (*protobuf.message.Message): list of protobuf messages. """ base = len(self._write_buff) for idx, obj in enumerate(pb2_obj): if self._buffer_size > 0 and \ (idx + base) != 0 and \ (idx + base) % self._buffer_size == 0: self.flush() self._write_buff.append(obj) if self._buffer_size == 0: self.flush()
[ "Write", "a", "group", "of", "one", "or", "more", "protobuf", "objects", "to", "the", "file", ".", "Multiple", "object", "groups", "can", "be", "written", "by", "calling", "this", "method", "several", "times", "before", "closing", "stream", "or", "exiting", "the", "runtime", "context", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L198-L219
[ "def", "write", "(", "self", ",", "*", "pb2_obj", ")", ":", "base", "=", "len", "(", "self", ".", "_write_buff", ")", "for", "idx", ",", "obj", "in", "enumerate", "(", "pb2_obj", ")", ":", "if", "self", ".", "_buffer_size", ">", "0", "and", "(", "idx", "+", "base", ")", "!=", "0", "and", "(", "idx", "+", "base", ")", "%", "self", ".", "_buffer_size", "==", "0", ":", "self", ".", "flush", "(", ")", "self", ".", "_write_buff", ".", "append", "(", "obj", ")", "if", "self", ".", "_buffer_size", "==", "0", ":", "self", ".", "flush", "(", ")" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Stream.flush
Write down buffer to the file.
stream/stream.py
def flush(self): """Write down buffer to the file.""" if not self.is_output(): return count = len(self._write_buff) if count == 0: return encodeVarint(self._fd.write, count, True) for obj in self._write_buff: obj_str = obj.SerializeToString() encodeVarint(self._fd.write, len(obj_str), True) self._fd.write(obj_str) self._write_buff = []
def flush(self): """Write down buffer to the file.""" if not self.is_output(): return count = len(self._write_buff) if count == 0: return encodeVarint(self._fd.write, count, True) for obj in self._write_buff: obj_str = obj.SerializeToString() encodeVarint(self._fd.write, len(obj_str), True) self._fd.write(obj_str) self._write_buff = []
[ "Write", "down", "buffer", "to", "the", "file", "." ]
cartoonist/pystream-protobuf
python
https://github.com/cartoonist/pystream-protobuf/blob/40e70b932436887b748905e5e0a82839e4c559f0/stream/stream.py#L221-L237
[ "def", "flush", "(", "self", ")", ":", "if", "not", "self", ".", "is_output", "(", ")", ":", "return", "count", "=", "len", "(", "self", ".", "_write_buff", ")", "if", "count", "==", "0", ":", "return", "encodeVarint", "(", "self", ".", "_fd", ".", "write", ",", "count", ",", "True", ")", "for", "obj", "in", "self", ".", "_write_buff", ":", "obj_str", "=", "obj", ".", "SerializeToString", "(", ")", "encodeVarint", "(", "self", ".", "_fd", ".", "write", ",", "len", "(", "obj_str", ")", ",", "True", ")", "self", ".", "_fd", ".", "write", "(", "obj_str", ")", "self", ".", "_write_buff", "=", "[", "]" ]
40e70b932436887b748905e5e0a82839e4c559f0
test
Game.get_game_dir
Returns joined game directory path relative to Steamapps
tools/buildbsp.py
def get_game_dir(self, username=False): """Returns joined game directory path relative to Steamapps""" if not self.common and not username: raise RuntimeError("Can't determine this game's directory without username") if self.common: subdir = "common" else: subdir = "username" subsubdir = self.dir if WIN32 or CYGWIN: subsubdir = subsubdir.lower() return os.path.join(subdir, subsubdir)
def get_game_dir(self, username=False): """Returns joined game directory path relative to Steamapps""" if not self.common and not username: raise RuntimeError("Can't determine this game's directory without username") if self.common: subdir = "common" else: subdir = "username" subsubdir = self.dir if WIN32 or CYGWIN: subsubdir = subsubdir.lower() return os.path.join(subdir, subsubdir)
[ "Returns", "joined", "game", "directory", "path", "relative", "to", "Steamapps" ]
BHSPitMonkey/vmflib
python
https://github.com/BHSPitMonkey/vmflib/blob/322757fcba98e05041ee8f416c8ffe847ca1fe64/tools/buildbsp.py#L34-L45
[ "def", "get_game_dir", "(", "self", ",", "username", "=", "False", ")", ":", "if", "not", "self", ".", "common", "and", "not", "username", ":", "raise", "RuntimeError", "(", "\"Can't determine this game's directory without username\"", ")", "if", "self", ".", "common", ":", "subdir", "=", "\"common\"", "else", ":", "subdir", "=", "\"username\"", "subsubdir", "=", "self", ".", "dir", "if", "WIN32", "or", "CYGWIN", ":", "subsubdir", "=", "subsubdir", ".", "lower", "(", ")", "return", "os", ".", "path", ".", "join", "(", "subdir", ",", "subsubdir", ")" ]
322757fcba98e05041ee8f416c8ffe847ca1fe64
test
_get_MAP_spikes
Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best
pyfnnd/_fnndeconv.py
def _get_MAP_spikes(F, c_hat, theta, dt, tol=1E-6, maxiter=100, verbosity=0): """ Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best """ npix, nt = F.shape sigma, alpha, beta, lamb, gamma = theta # we project everything onto the alpha mask so that we only ever have to # deal with 1D vector norms alpha_ss = np.dot(alpha, alpha) c = np.dot(alpha, F) - np.dot(alpha, beta) # used for computing the LL and gradient scale_var = 1. / (2 * sigma * sigma) lD = lamb * dt # used for computing the gradient (M.T.dot(lamb * dt)) grad_lnprior = np.zeros(nt, dtype=DTYPE) grad_lnprior[1:] = lD grad_lnprior[:-1] -= lD * gamma # initialize the weight of the barrier term to 1 z = 1. # initial estimate of spike probabilities n_hat = c_hat[1:] - gamma * c_hat[:-1] # assert not np.any(n_hat < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat # best overall posterior log-likelihood of the fluorescence LL_best = _post_LL(n_hat, res, scale_var, lD, z) LL_barrier = LL_best nloop1 = 0 terminate_interior = False # in the outer loop we'll progressively reduce the weight of the barrier # term and check the interior point termination criteria while not terminate_interior: nloop2 = 0 terminate_barrier = False # converge for this barrier weight while not terminate_barrier: # by projecting everything onto alpha, we reduce this to a 1D # vector norm res = c - alpha_ss * c_hat # compute direction of newton step d = _direction(n_hat, res, alpha_ss, gamma, scale_var, grad_lnprior, z) terminate_linesearch = False # find the largest step we can take in direction d without # violating the non-negativity constraint on n_hat s_upper_bnd = -n_hat / (d[1:] - gamma * d[:-1]) # we are only interested in positive step sizes feasible = (s_upper_bnd > 0) if np.any(feasible): # largest allowable step size is 1. s = min(1., 0.999 * np.min(s_upper_bnd[feasible])) else: # if there is no step size that will keep n_hat >= 0, just # reduce the barrier weight and try again terminate_linesearch = True terminate_barrier = True if verbosity >= 2: print("skipping linesearch: no positive step size will " "keep n_hat >= 0") nloop3 = 0 # backtracking line search for the largest step size that increases # the posterior log-likelihood of the fluorescence while not terminate_linesearch: # update estimated calcium c_hat_line = c_hat + (s * d) # update spike probabilities n_hat_line = c_hat_line[1:] - gamma * c_hat_line[:-1] # assert not np.any(n_hat_line < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat_line # compute the new posterior log-likelihood LL_line = _post_LL(n_hat_line, res, scale_var, lD, z) # assert not np.any(np.isnan(LL1)), "nan LL" if verbosity >= 2: print('spikes: iter=%3i, %3i, %3i; z=%-10.4g; s=%-10.4g;' ' LL=%-10.4g' % (nloop1, nloop2, nloop3, z, s, LL_line)) # if the step size gets too small without making any progress, # we terminate the linesearch and reduce the barrier weight if s < S_TOL: if verbosity >= 2: print('--> terminated linesearch: s < %.3g on %i ' 'iterations' % (S_TOL, nloop3)) terminate_linesearch = True terminate_barrier = True # only update c_hat & LL if LL improved if LL_line > LL_barrier: LL_barrier, n_hat, c_hat = LL_line, n_hat_line, c_hat_line terminate_linesearch = True # reduce the step size else: s /= S_FAC nloop3 += 1 # if d gets too small, reduce the barrier weight if (np.linalg.norm(d) < D_TOL): terminate_barrier = True nloop2 += 1 # only test for convergence if we were actually able to enter the # linesearch if nloop3: delta_LL = -(LL_barrier - LL_best) / LL_best LL_best = LL_barrier if (delta_LL < tol): terminate_interior = True elif z < Z_TOL: if verbosity >= 2: print('MAP spike train failed to converge before z < %.3g' % Z_TOL) terminate_interior = True elif nloop1 > maxiter: if verbosity >= 2: print('MAP spike train failed to converge within maxiter (%i)' % maxiter) terminate_interior = True # increment the outer loop counter, reduce the barrier weight nloop1 += 1 z /= Z_FAC return n_hat, c_hat, LL_best
def _get_MAP_spikes(F, c_hat, theta, dt, tol=1E-6, maxiter=100, verbosity=0): """ Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best """ npix, nt = F.shape sigma, alpha, beta, lamb, gamma = theta # we project everything onto the alpha mask so that we only ever have to # deal with 1D vector norms alpha_ss = np.dot(alpha, alpha) c = np.dot(alpha, F) - np.dot(alpha, beta) # used for computing the LL and gradient scale_var = 1. / (2 * sigma * sigma) lD = lamb * dt # used for computing the gradient (M.T.dot(lamb * dt)) grad_lnprior = np.zeros(nt, dtype=DTYPE) grad_lnprior[1:] = lD grad_lnprior[:-1] -= lD * gamma # initialize the weight of the barrier term to 1 z = 1. # initial estimate of spike probabilities n_hat = c_hat[1:] - gamma * c_hat[:-1] # assert not np.any(n_hat < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat # best overall posterior log-likelihood of the fluorescence LL_best = _post_LL(n_hat, res, scale_var, lD, z) LL_barrier = LL_best nloop1 = 0 terminate_interior = False # in the outer loop we'll progressively reduce the weight of the barrier # term and check the interior point termination criteria while not terminate_interior: nloop2 = 0 terminate_barrier = False # converge for this barrier weight while not terminate_barrier: # by projecting everything onto alpha, we reduce this to a 1D # vector norm res = c - alpha_ss * c_hat # compute direction of newton step d = _direction(n_hat, res, alpha_ss, gamma, scale_var, grad_lnprior, z) terminate_linesearch = False # find the largest step we can take in direction d without # violating the non-negativity constraint on n_hat s_upper_bnd = -n_hat / (d[1:] - gamma * d[:-1]) # we are only interested in positive step sizes feasible = (s_upper_bnd > 0) if np.any(feasible): # largest allowable step size is 1. s = min(1., 0.999 * np.min(s_upper_bnd[feasible])) else: # if there is no step size that will keep n_hat >= 0, just # reduce the barrier weight and try again terminate_linesearch = True terminate_barrier = True if verbosity >= 2: print("skipping linesearch: no positive step size will " "keep n_hat >= 0") nloop3 = 0 # backtracking line search for the largest step size that increases # the posterior log-likelihood of the fluorescence while not terminate_linesearch: # update estimated calcium c_hat_line = c_hat + (s * d) # update spike probabilities n_hat_line = c_hat_line[1:] - gamma * c_hat_line[:-1] # assert not np.any(n_hat_line < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat_line # compute the new posterior log-likelihood LL_line = _post_LL(n_hat_line, res, scale_var, lD, z) # assert not np.any(np.isnan(LL1)), "nan LL" if verbosity >= 2: print('spikes: iter=%3i, %3i, %3i; z=%-10.4g; s=%-10.4g;' ' LL=%-10.4g' % (nloop1, nloop2, nloop3, z, s, LL_line)) # if the step size gets too small without making any progress, # we terminate the linesearch and reduce the barrier weight if s < S_TOL: if verbosity >= 2: print('--> terminated linesearch: s < %.3g on %i ' 'iterations' % (S_TOL, nloop3)) terminate_linesearch = True terminate_barrier = True # only update c_hat & LL if LL improved if LL_line > LL_barrier: LL_barrier, n_hat, c_hat = LL_line, n_hat_line, c_hat_line terminate_linesearch = True # reduce the step size else: s /= S_FAC nloop3 += 1 # if d gets too small, reduce the barrier weight if (np.linalg.norm(d) < D_TOL): terminate_barrier = True nloop2 += 1 # only test for convergence if we were actually able to enter the # linesearch if nloop3: delta_LL = -(LL_barrier - LL_best) / LL_best LL_best = LL_barrier if (delta_LL < tol): terminate_interior = True elif z < Z_TOL: if verbosity >= 2: print('MAP spike train failed to converge before z < %.3g' % Z_TOL) terminate_interior = True elif nloop1 > maxiter: if verbosity >= 2: print('MAP spike train failed to converge within maxiter (%i)' % maxiter) terminate_interior = True # increment the outer loop counter, reduce the barrier weight nloop1 += 1 z /= Z_FAC return n_hat, c_hat, LL_best
[ "Used", "internally", "by", "deconvolve", "to", "compute", "the", "maximum", "a", "posteriori", "spike", "train", "for", "a", "given", "set", "of", "fluorescence", "traces", "and", "model", "parameters", "." ]
alimuldal/PyFNND
python
https://github.com/alimuldal/PyFNND/blob/3cbe0622a385f5206837bfd944d781aa7b1649ea/pyfnnd/_fnndeconv.py#L302-L464
[ "def", "_get_MAP_spikes", "(", "F", ",", "c_hat", ",", "theta", ",", "dt", ",", "tol", "=", "1E-6", ",", "maxiter", "=", "100", ",", "verbosity", "=", "0", ")", ":", "npix", ",", "nt", "=", "F", ".", "shape", "sigma", ",", "alpha", ",", "beta", ",", "lamb", ",", "gamma", "=", "theta", "# we project everything onto the alpha mask so that we only ever have to", "# deal with 1D vector norms", "alpha_ss", "=", "np", ".", "dot", "(", "alpha", ",", "alpha", ")", "c", "=", "np", ".", "dot", "(", "alpha", ",", "F", ")", "-", "np", ".", "dot", "(", "alpha", ",", "beta", ")", "# used for computing the LL and gradient", "scale_var", "=", "1.", "/", "(", "2", "*", "sigma", "*", "sigma", ")", "lD", "=", "lamb", "*", "dt", "# used for computing the gradient (M.T.dot(lamb * dt))", "grad_lnprior", "=", "np", ".", "zeros", "(", "nt", ",", "dtype", "=", "DTYPE", ")", "grad_lnprior", "[", "1", ":", "]", "=", "lD", "grad_lnprior", "[", ":", "-", "1", "]", "-=", "lD", "*", "gamma", "# initialize the weight of the barrier term to 1", "z", "=", "1.", "# initial estimate of spike probabilities", "n_hat", "=", "c_hat", "[", "1", ":", "]", "-", "gamma", "*", "c_hat", "[", ":", "-", "1", "]", "# assert not np.any(n_hat < 0), \"spike probabilities < 0\"", "# (actual - predicted) fluorescence", "res", "=", "c", "-", "alpha_ss", "*", "c_hat", "# best overall posterior log-likelihood of the fluorescence", "LL_best", "=", "_post_LL", "(", "n_hat", ",", "res", ",", "scale_var", ",", "lD", ",", "z", ")", "LL_barrier", "=", "LL_best", "nloop1", "=", "0", "terminate_interior", "=", "False", "# in the outer loop we'll progressively reduce the weight of the barrier", "# term and check the interior point termination criteria", "while", "not", "terminate_interior", ":", "nloop2", "=", "0", "terminate_barrier", "=", "False", "# converge for this barrier weight", "while", "not", "terminate_barrier", ":", "# by projecting everything onto alpha, we reduce this to a 1D", "# vector norm", "res", "=", "c", "-", "alpha_ss", "*", "c_hat", "# compute direction of newton step", "d", "=", "_direction", "(", "n_hat", ",", "res", ",", "alpha_ss", ",", "gamma", ",", "scale_var", ",", "grad_lnprior", ",", "z", ")", "terminate_linesearch", "=", "False", "# find the largest step we can take in direction d without", "# violating the non-negativity constraint on n_hat", "s_upper_bnd", "=", "-", "n_hat", "/", "(", "d", "[", "1", ":", "]", "-", "gamma", "*", "d", "[", ":", "-", "1", "]", ")", "# we are only interested in positive step sizes", "feasible", "=", "(", "s_upper_bnd", ">", "0", ")", "if", "np", ".", "any", "(", "feasible", ")", ":", "# largest allowable step size is 1.", "s", "=", "min", "(", "1.", ",", "0.999", "*", "np", ".", "min", "(", "s_upper_bnd", "[", "feasible", "]", ")", ")", "else", ":", "# if there is no step size that will keep n_hat >= 0, just", "# reduce the barrier weight and try again", "terminate_linesearch", "=", "True", "terminate_barrier", "=", "True", "if", "verbosity", ">=", "2", ":", "print", "(", "\"skipping linesearch: no positive step size will \"", "\"keep n_hat >= 0\"", ")", "nloop3", "=", "0", "# backtracking line search for the largest step size that increases", "# the posterior log-likelihood of the fluorescence", "while", "not", "terminate_linesearch", ":", "# update estimated calcium", "c_hat_line", "=", "c_hat", "+", "(", "s", "*", "d", ")", "# update spike probabilities", "n_hat_line", "=", "c_hat_line", "[", "1", ":", "]", "-", "gamma", "*", "c_hat_line", "[", ":", "-", "1", "]", "# assert not np.any(n_hat_line < 0), \"spike probabilities < 0\"", "# (actual - predicted) fluorescence", "res", "=", "c", "-", "alpha_ss", "*", "c_hat_line", "# compute the new posterior log-likelihood", "LL_line", "=", "_post_LL", "(", "n_hat_line", ",", "res", ",", "scale_var", ",", "lD", ",", "z", ")", "# assert not np.any(np.isnan(LL1)), \"nan LL\"", "if", "verbosity", ">=", "2", ":", "print", "(", "'spikes: iter=%3i, %3i, %3i; z=%-10.4g; s=%-10.4g;'", "' LL=%-10.4g'", "%", "(", "nloop1", ",", "nloop2", ",", "nloop3", ",", "z", ",", "s", ",", "LL_line", ")", ")", "# if the step size gets too small without making any progress,", "# we terminate the linesearch and reduce the barrier weight", "if", "s", "<", "S_TOL", ":", "if", "verbosity", ">=", "2", ":", "print", "(", "'--> terminated linesearch: s < %.3g on %i '", "'iterations'", "%", "(", "S_TOL", ",", "nloop3", ")", ")", "terminate_linesearch", "=", "True", "terminate_barrier", "=", "True", "# only update c_hat & LL if LL improved", "if", "LL_line", ">", "LL_barrier", ":", "LL_barrier", ",", "n_hat", ",", "c_hat", "=", "LL_line", ",", "n_hat_line", ",", "c_hat_line", "terminate_linesearch", "=", "True", "# reduce the step size", "else", ":", "s", "/=", "S_FAC", "nloop3", "+=", "1", "# if d gets too small, reduce the barrier weight", "if", "(", "np", ".", "linalg", ".", "norm", "(", "d", ")", "<", "D_TOL", ")", ":", "terminate_barrier", "=", "True", "nloop2", "+=", "1", "# only test for convergence if we were actually able to enter the", "# linesearch", "if", "nloop3", ":", "delta_LL", "=", "-", "(", "LL_barrier", "-", "LL_best", ")", "/", "LL_best", "LL_best", "=", "LL_barrier", "if", "(", "delta_LL", "<", "tol", ")", ":", "terminate_interior", "=", "True", "elif", "z", "<", "Z_TOL", ":", "if", "verbosity", ">=", "2", ":", "print", "(", "'MAP spike train failed to converge before z < %.3g'", "%", "Z_TOL", ")", "terminate_interior", "=", "True", "elif", "nloop1", ">", "maxiter", ":", "if", "verbosity", ">=", "2", ":", "print", "(", "'MAP spike train failed to converge within maxiter (%i)'", "%", "maxiter", ")", "terminate_interior", "=", "True", "# increment the outer loop counter, reduce the barrier weight", "nloop1", "+=", "1", "z", "/=", "Z_FAC", "return", "n_hat", ",", "c_hat", ",", "LL_best" ]
3cbe0622a385f5206837bfd944d781aa7b1649ea
test
trisolve
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
pyfnnd/_tridiag_solvers.py
def trisolve(dl, d, du, b, inplace=False): """ The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html """ if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1) or d.shape[0] != b.shape[0]): raise ValueError('Invalid diagonal shapes') bshape_in = b.shape rtype = np.result_type(dl, d, du, b) if not inplace: # force a copy dl = np.array(dl, dtype=rtype, copy=True, order='F') d = np.array(d, dtype=rtype, copy=True, order='F') du = np.array(du, dtype=rtype, copy=True, order='F') b = np.array(b, dtype=rtype, copy=True, order='F') # this may also force copies if arrays have inconsistent types / incorrect # order dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order='F') for v in (dl, d, du, b)) # use the LAPACK implementation _lapack_trisolve(dl, d, du, b, rtype) return b.reshape(bshape_in)
def trisolve(dl, d, du, b, inplace=False): """ The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html """ if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1) or d.shape[0] != b.shape[0]): raise ValueError('Invalid diagonal shapes') bshape_in = b.shape rtype = np.result_type(dl, d, du, b) if not inplace: # force a copy dl = np.array(dl, dtype=rtype, copy=True, order='F') d = np.array(d, dtype=rtype, copy=True, order='F') du = np.array(du, dtype=rtype, copy=True, order='F') b = np.array(b, dtype=rtype, copy=True, order='F') # this may also force copies if arrays have inconsistent types / incorrect # order dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order='F') for v in (dl, d, du, b)) # use the LAPACK implementation _lapack_trisolve(dl, d, du, b, rtype) return b.reshape(bshape_in)
[ "The", "tridiagonal", "matrix", "(", "Thomas", ")", "algorithm", "for", "solving", "tridiagonal", "systems", "of", "equations", ":" ]
alimuldal/PyFNND
python
https://github.com/alimuldal/PyFNND/blob/3cbe0622a385f5206837bfd944d781aa7b1649ea/pyfnnd/_tridiag_solvers.py#L31-L90
[ "def", "trisolve", "(", "dl", ",", "d", ",", "du", ",", "b", ",", "inplace", "=", "False", ")", ":", "if", "(", "dl", ".", "shape", "[", "0", "]", "!=", "du", ".", "shape", "[", "0", "]", "or", "(", "d", ".", "shape", "[", "0", "]", "!=", "dl", ".", "shape", "[", "0", "]", "+", "1", ")", "or", "d", ".", "shape", "[", "0", "]", "!=", "b", ".", "shape", "[", "0", "]", ")", ":", "raise", "ValueError", "(", "'Invalid diagonal shapes'", ")", "bshape_in", "=", "b", ".", "shape", "rtype", "=", "np", ".", "result_type", "(", "dl", ",", "d", ",", "du", ",", "b", ")", "if", "not", "inplace", ":", "# force a copy", "dl", "=", "np", ".", "array", "(", "dl", ",", "dtype", "=", "rtype", ",", "copy", "=", "True", ",", "order", "=", "'F'", ")", "d", "=", "np", ".", "array", "(", "d", ",", "dtype", "=", "rtype", ",", "copy", "=", "True", ",", "order", "=", "'F'", ")", "du", "=", "np", ".", "array", "(", "du", ",", "dtype", "=", "rtype", ",", "copy", "=", "True", ",", "order", "=", "'F'", ")", "b", "=", "np", ".", "array", "(", "b", ",", "dtype", "=", "rtype", ",", "copy", "=", "True", ",", "order", "=", "'F'", ")", "# this may also force copies if arrays have inconsistent types / incorrect", "# order", "dl", ",", "d", ",", "du", ",", "b", "=", "(", "np", ".", "array", "(", "v", ",", "dtype", "=", "rtype", ",", "copy", "=", "False", ",", "order", "=", "'F'", ")", "for", "v", "in", "(", "dl", ",", "d", ",", "du", ",", "b", ")", ")", "# use the LAPACK implementation", "_lapack_trisolve", "(", "dl", ",", "d", ",", "du", ",", "b", ",", "rtype", ")", "return", "b", ".", "reshape", "(", "bshape_in", ")" ]
3cbe0622a385f5206837bfd944d781aa7b1649ea
test
UIComponent.from_web_element
Store reference to a WebElement instance representing the element on the DOM. Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and you want to create a UIComponent out of it without evaluating it from the locator again. Returns an instance of the class.
pages/ui_component.py
def from_web_element(self, web_element): """ Store reference to a WebElement instance representing the element on the DOM. Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and you want to create a UIComponent out of it without evaluating it from the locator again. Returns an instance of the class. """ if isinstance(web_element, WebElement) is not True: raise TypeError("web_element parameter is not of type WebElement.") self._web_element = web_element return self
def from_web_element(self, web_element): """ Store reference to a WebElement instance representing the element on the DOM. Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and you want to create a UIComponent out of it without evaluating it from the locator again. Returns an instance of the class. """ if isinstance(web_element, WebElement) is not True: raise TypeError("web_element parameter is not of type WebElement.") self._web_element = web_element return self
[ "Store", "reference", "to", "a", "WebElement", "instance", "representing", "the", "element", "on", "the", "DOM", ".", "Use", "it", "when", "an", "instance", "of", "WebElement", "has", "already", "been", "created", "(", "e", ".", "g", ".", "as", "the", "result", "of", "find_element", ")", "and", "you", "want", "to", "create", "a", "UIComponent", "out", "of", "it", "without", "evaluating", "it", "from", "the", "locator", "again", ".", "Returns", "an", "instance", "of", "the", "class", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/ui_component.py#L44-L54
[ "def", "from_web_element", "(", "self", ",", "web_element", ")", ":", "if", "isinstance", "(", "web_element", ",", "WebElement", ")", "is", "not", "True", ":", "raise", "TypeError", "(", "\"web_element parameter is not of type WebElement.\"", ")", "self", ".", "_web_element", "=", "web_element", "return", "self" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
UIComponent.locate
Lazily locates the element on the DOM if the WebElement instance is not available already. Returns a WebElement object. It also caches the element if caching has been set through cache().
pages/ui_component.py
def locate(self): """ Lazily locates the element on the DOM if the WebElement instance is not available already. Returns a WebElement object. It also caches the element if caching has been set through cache(). """ if self._web_element: return self._web_element else: locator_type, locator_value = self.__locator element = self.driver.find_element(by=locator_type, value=locator_value) self._cache_web_element(element) # cache the element if allowed return element
def locate(self): """ Lazily locates the element on the DOM if the WebElement instance is not available already. Returns a WebElement object. It also caches the element if caching has been set through cache(). """ if self._web_element: return self._web_element else: locator_type, locator_value = self.__locator element = self.driver.find_element(by=locator_type, value=locator_value) self._cache_web_element(element) # cache the element if allowed return element
[ "Lazily", "locates", "the", "element", "on", "the", "DOM", "if", "the", "WebElement", "instance", "is", "not", "available", "already", ".", "Returns", "a", "WebElement", "object", ".", "It", "also", "caches", "the", "element", "if", "caching", "has", "been", "set", "through", "cache", "()", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/ui_component.py#L68-L80
[ "def", "locate", "(", "self", ")", ":", "if", "self", ".", "_web_element", ":", "return", "self", ".", "_web_element", "else", ":", "locator_type", ",", "locator_value", "=", "self", ".", "__locator", "element", "=", "self", ".", "driver", ".", "find_element", "(", "by", "=", "locator_type", ",", "value", "=", "locator_value", ")", "self", ".", "_cache_web_element", "(", "element", ")", "# cache the element if allowed", "return", "element" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
TextInput.input_text_with_keyboard_emulation
Works around the problem of emulating user interactions with text inputs. Emulates a key-down action on the first char of the input. This way, implementations which require key-down event to trigger auto-suggest are testable. Then the chains sends the rest of the text and releases the key.
pages/standard_components/textinput.py
def input_text_with_keyboard_emulation(self, text): """ Works around the problem of emulating user interactions with text inputs. Emulates a key-down action on the first char of the input. This way, implementations which require key-down event to trigger auto-suggest are testable. Then the chains sends the rest of the text and releases the key. """ ActionChains(self.driver).key_down(text).key_up(Keys.CONTROL).perform()
def input_text_with_keyboard_emulation(self, text): """ Works around the problem of emulating user interactions with text inputs. Emulates a key-down action on the first char of the input. This way, implementations which require key-down event to trigger auto-suggest are testable. Then the chains sends the rest of the text and releases the key. """ ActionChains(self.driver).key_down(text).key_up(Keys.CONTROL).perform()
[ "Works", "around", "the", "problem", "of", "emulating", "user", "interactions", "with", "text", "inputs", ".", "Emulates", "a", "key", "-", "down", "action", "on", "the", "first", "char", "of", "the", "input", ".", "This", "way", "implementations", "which", "require", "key", "-", "down", "event", "to", "trigger", "auto", "-", "suggest", "are", "testable", ".", "Then", "the", "chains", "sends", "the", "rest", "of", "the", "text", "and", "releases", "the", "key", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/standard_components/textinput.py#L42-L49
[ "def", "input_text_with_keyboard_emulation", "(", "self", ",", "text", ")", ":", "ActionChains", "(", "self", ".", "driver", ")", ".", "key_down", "(", "text", ")", ".", "key_up", "(", "Keys", ".", "CONTROL", ")", ".", "perform", "(", ")" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
make_fake_movie
Generate 2D fake fluorescence movie Arguments: --------------------------------------------------------------------------- nframes: number of timebins to simulate mask_shape: tuple (nrows, ncols), shape of a single movie frame mask_center: tuple (x, y), pixel coords of cell center bg_intensity: scalar, amplitude of (static) baseline fluorescence mask_sigma: scalar, standard deviation of Gaussian mask dt: timestep (s) rate: mean spike rate (Hz) tau: time constant of decay in calcium concentration (s) sigma: SD of additive noise on fluorescence seed: Seed for RNG Returns: --------------------------------------------------------------------------- F: fluorescence [npixels, nframes] c: calcium concentration [nframes,] n: spike train [nframes,] theta: tuple of true model parameters: (sigma, alpha, beta, lambda, gamma)
pyfnnd/demo.py
def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None, bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0, tau=1., sigma=0.001, seed=None): """ Generate 2D fake fluorescence movie Arguments: --------------------------------------------------------------------------- nframes: number of timebins to simulate mask_shape: tuple (nrows, ncols), shape of a single movie frame mask_center: tuple (x, y), pixel coords of cell center bg_intensity: scalar, amplitude of (static) baseline fluorescence mask_sigma: scalar, standard deviation of Gaussian mask dt: timestep (s) rate: mean spike rate (Hz) tau: time constant of decay in calcium concentration (s) sigma: SD of additive noise on fluorescence seed: Seed for RNG Returns: --------------------------------------------------------------------------- F: fluorescence [npixels, nframes] c: calcium concentration [nframes,] n: spike train [nframes,] theta: tuple of true model parameters: (sigma, alpha, beta, lambda, gamma) """ gen = np.random.RandomState(seed) # poisson spikes n = gen.poisson(rate * dt, size=nframes) # internal calcium dynamics gamma = np.exp(-dt / tau) c = signal.lfilter(np.r_[1], np.r_[1, -gamma], n, axis=0) # pixel weights (sum == 1) nr, nc = mask_shape npix = nr * nc if mask_center is None: mask_center = (nc // 2., nr // 2.) a, b = mask_center y, x = np.ogrid[:nr, :nc] xs = (x - a) ** 2. ys = (y - b) ** 2. twoss = 2. * mask_sigma ** 2. alpha = np.exp(-1 * ((xs / twoss) + (ys / twoss))).ravel() alpha /= alpha.sum() # background fluorescence beta = gen.randn(npix) * bg_intensity # firing rate (spike probability per sec) lamb = rate # spatially & temporally white noise epsilon = gen.randn(npix, nframes) * sigma # simulated fluorescence F = c[None, :] * alpha[:, None] + beta[:, None] + epsilon theta = (sigma, alpha, beta, lamb, gamma) return F, c, n, theta
def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None, bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0, tau=1., sigma=0.001, seed=None): """ Generate 2D fake fluorescence movie Arguments: --------------------------------------------------------------------------- nframes: number of timebins to simulate mask_shape: tuple (nrows, ncols), shape of a single movie frame mask_center: tuple (x, y), pixel coords of cell center bg_intensity: scalar, amplitude of (static) baseline fluorescence mask_sigma: scalar, standard deviation of Gaussian mask dt: timestep (s) rate: mean spike rate (Hz) tau: time constant of decay in calcium concentration (s) sigma: SD of additive noise on fluorescence seed: Seed for RNG Returns: --------------------------------------------------------------------------- F: fluorescence [npixels, nframes] c: calcium concentration [nframes,] n: spike train [nframes,] theta: tuple of true model parameters: (sigma, alpha, beta, lambda, gamma) """ gen = np.random.RandomState(seed) # poisson spikes n = gen.poisson(rate * dt, size=nframes) # internal calcium dynamics gamma = np.exp(-dt / tau) c = signal.lfilter(np.r_[1], np.r_[1, -gamma], n, axis=0) # pixel weights (sum == 1) nr, nc = mask_shape npix = nr * nc if mask_center is None: mask_center = (nc // 2., nr // 2.) a, b = mask_center y, x = np.ogrid[:nr, :nc] xs = (x - a) ** 2. ys = (y - b) ** 2. twoss = 2. * mask_sigma ** 2. alpha = np.exp(-1 * ((xs / twoss) + (ys / twoss))).ravel() alpha /= alpha.sum() # background fluorescence beta = gen.randn(npix) * bg_intensity # firing rate (spike probability per sec) lamb = rate # spatially & temporally white noise epsilon = gen.randn(npix, nframes) * sigma # simulated fluorescence F = c[None, :] * alpha[:, None] + beta[:, None] + epsilon theta = (sigma, alpha, beta, lamb, gamma) return F, c, n, theta
[ "Generate", "2D", "fake", "fluorescence", "movie" ]
alimuldal/PyFNND
python
https://github.com/alimuldal/PyFNND/blob/3cbe0622a385f5206837bfd944d781aa7b1649ea/pyfnnd/demo.py#L8-L73
[ "def", "make_fake_movie", "(", "nframes", ",", "mask_shape", "=", "(", "64", ",", "64", ")", ",", "mask_center", "=", "None", ",", "bg_intensity", "=", "0.1", ",", "mask_sigma", "=", "10", ",", "dt", "=", "0.02", ",", "rate", "=", "1.0", ",", "tau", "=", "1.", ",", "sigma", "=", "0.001", ",", "seed", "=", "None", ")", ":", "gen", "=", "np", ".", "random", ".", "RandomState", "(", "seed", ")", "# poisson spikes", "n", "=", "gen", ".", "poisson", "(", "rate", "*", "dt", ",", "size", "=", "nframes", ")", "# internal calcium dynamics", "gamma", "=", "np", ".", "exp", "(", "-", "dt", "/", "tau", ")", "c", "=", "signal", ".", "lfilter", "(", "np", ".", "r_", "[", "1", "]", ",", "np", ".", "r_", "[", "1", ",", "-", "gamma", "]", ",", "n", ",", "axis", "=", "0", ")", "# pixel weights (sum == 1)", "nr", ",", "nc", "=", "mask_shape", "npix", "=", "nr", "*", "nc", "if", "mask_center", "is", "None", ":", "mask_center", "=", "(", "nc", "//", "2.", ",", "nr", "//", "2.", ")", "a", ",", "b", "=", "mask_center", "y", ",", "x", "=", "np", ".", "ogrid", "[", ":", "nr", ",", ":", "nc", "]", "xs", "=", "(", "x", "-", "a", ")", "**", "2.", "ys", "=", "(", "y", "-", "b", ")", "**", "2.", "twoss", "=", "2.", "*", "mask_sigma", "**", "2.", "alpha", "=", "np", ".", "exp", "(", "-", "1", "*", "(", "(", "xs", "/", "twoss", ")", "+", "(", "ys", "/", "twoss", ")", ")", ")", ".", "ravel", "(", ")", "alpha", "/=", "alpha", ".", "sum", "(", ")", "# background fluorescence", "beta", "=", "gen", ".", "randn", "(", "npix", ")", "*", "bg_intensity", "# firing rate (spike probability per sec)", "lamb", "=", "rate", "# spatially & temporally white noise", "epsilon", "=", "gen", ".", "randn", "(", "npix", ",", "nframes", ")", "*", "sigma", "# simulated fluorescence", "F", "=", "c", "[", "None", ",", ":", "]", "*", "alpha", "[", ":", ",", "None", "]", "+", "beta", "[", ":", ",", "None", "]", "+", "epsilon", "theta", "=", "(", "sigma", ",", "alpha", ",", "beta", ",", "lamb", ",", "gamma", ")", "return", "F", ",", "c", ",", "n", ",", "theta" ]
3cbe0622a385f5206837bfd944d781aa7b1649ea
test
ElementWithTraits.evaluate_traits
Evaluates traits and returns a list containing the description of traits which are not true. Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option only for debugging purposes.
pages/element_with_traits.py
def evaluate_traits(self): """ Evaluates traits and returns a list containing the description of traits which are not true. Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option only for debugging purposes. """ return_value = [] for trait in self.traits: if not trait.condition(): if not self.traits_eager_evaluation: return [trait.description] else: return_value.append(trait.description) return return_value
def evaluate_traits(self): """ Evaluates traits and returns a list containing the description of traits which are not true. Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option only for debugging purposes. """ return_value = [] for trait in self.traits: if not trait.condition(): if not self.traits_eager_evaluation: return [trait.description] else: return_value.append(trait.description) return return_value
[ "Evaluates", "traits", "and", "returns", "a", "list", "containing", "the", "description", "of", "traits", "which", "are", "not", "true", ".", "Notice", "that", "if", "LAZY_EVALUATION", "is", "set", "to", "False", "all", "traits", "are", "evaluated", "before", "returning", ".", "Use", "this", "option", "only", "for", "debugging", "purposes", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/element_with_traits.py#L61-L74
[ "def", "evaluate_traits", "(", "self", ")", ":", "return_value", "=", "[", "]", "for", "trait", "in", "self", ".", "traits", ":", "if", "not", "trait", ".", "condition", "(", ")", ":", "if", "not", "self", ".", "traits_eager_evaluation", ":", "return", "[", "trait", ".", "description", "]", "else", ":", "return_value", ".", "append", "(", "trait", ".", "description", ")", "return", "return_value" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
Wait.until_condition
Waits until conditions is True or returns a non-None value. If any of the trait is still not present after timeout, raises a TimeoutException.
pages/wait/wait.py
def until_condition(self, condition, condition_description): """ Waits until conditions is True or returns a non-None value. If any of the trait is still not present after timeout, raises a TimeoutException. """ end_time = time.time() + self._timeout count = 1 while True: try: if not hasattr(condition, '__call__'): raise TypeError("condition is not callable") value = condition() if type(value) is bool and value is not False: return value elif type(value) is not bool and value is not None: return value else: logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover except self._ignored_exceptions as ex: logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""), str(ex))) # pragma: no cover time.sleep(self._poll) count += 1 if time.time() > end_time: # pragma: no cover break raise TimeoutException( msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.")
def until_condition(self, condition, condition_description): """ Waits until conditions is True or returns a non-None value. If any of the trait is still not present after timeout, raises a TimeoutException. """ end_time = time.time() + self._timeout count = 1 while True: try: if not hasattr(condition, '__call__'): raise TypeError("condition is not callable") value = condition() if type(value) is bool and value is not False: return value elif type(value) is not bool and value is not None: return value else: logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover except self._ignored_exceptions as ex: logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""), str(ex))) # pragma: no cover time.sleep(self._poll) count += 1 if time.time() > end_time: # pragma: no cover break raise TimeoutException( msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.")
[ "Waits", "until", "conditions", "is", "True", "or", "returns", "a", "non", "-", "None", "value", ".", "If", "any", "of", "the", "trait", "is", "still", "not", "present", "after", "timeout", "raises", "a", "TimeoutException", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/wait/wait.py#L52-L78
[ "def", "until_condition", "(", "self", ",", "condition", ",", "condition_description", ")", ":", "end_time", "=", "time", ".", "time", "(", ")", "+", "self", ".", "_timeout", "count", "=", "1", "while", "True", ":", "try", ":", "if", "not", "hasattr", "(", "condition", ",", "'__call__'", ")", ":", "raise", "TypeError", "(", "\"condition is not callable\"", ")", "value", "=", "condition", "(", ")", "if", "type", "(", "value", ")", "is", "bool", "and", "value", "is", "not", "False", ":", "return", "value", "elif", "type", "(", "value", ")", "is", "not", "bool", "and", "value", "is", "not", "None", ":", "return", "value", "else", ":", "logger", ".", "debug", "(", "\"#\"", "+", "str", "(", "count", ")", "+", "\" - wait until \"", "+", "condition_description", ")", "# pragma: no cover", "except", "self", ".", "_ignored_exceptions", "as", "ex", ":", "logger", ".", "debug", "(", "\"Captured {0} : {1}\"", ".", "format", "(", "str", "(", "ex", ".", "__class__", ")", ".", "replace", "(", "\"<type '\"", ",", "\"\"", ")", ".", "replace", "(", "\"'>\"", ",", "\"\"", ")", ",", "str", "(", "ex", ")", ")", ")", "# pragma: no cover", "time", ".", "sleep", "(", "self", ".", "_poll", ")", "count", "+=", "1", "if", "time", ".", "time", "(", ")", ">", "end_time", ":", "# pragma: no cover", "break", "raise", "TimeoutException", "(", "msg", "=", "\"condition <\"", "+", "condition_description", "+", "\"> was not true after \"", "+", "str", "(", "self", ".", "_timeout", ")", "+", "\" seconds.\"", ")" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
Wait.until_traits_are_present
Waits until all traits are present. If any of the traits is still not present after timeout, raises a TimeoutException.
pages/wait/wait.py
def until_traits_are_present(self, element_with_traits): """ Waits until all traits are present. If any of the traits is still not present after timeout, raises a TimeoutException. """ end_time = time.time() + self._timeout count = 1 missing_traits_descriptions = None while True: missing_traits_descriptions = [] try: missing_traits_descriptions = element_with_traits.evaluate_traits() if len(missing_traits_descriptions) == 0: return True else: logger.debug("#{0} - wait until all traits are present: <{1}>".format(str(count), '> <'.join( missing_traits_descriptions))) except self._ignored_exceptions as ex: # pragma: no cover logger.debug("Captured {0}: {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""), str(ex))) # pragma: no cover pass # pragma: no cover time.sleep(self._poll) count += 1 if time.time() > end_time: break raise TimeoutException( msg="conditions " + '<' + '> <'.join(missing_traits_descriptions) + '>' + " not true after " + str( self._timeout) + " seconds.")
def until_traits_are_present(self, element_with_traits): """ Waits until all traits are present. If any of the traits is still not present after timeout, raises a TimeoutException. """ end_time = time.time() + self._timeout count = 1 missing_traits_descriptions = None while True: missing_traits_descriptions = [] try: missing_traits_descriptions = element_with_traits.evaluate_traits() if len(missing_traits_descriptions) == 0: return True else: logger.debug("#{0} - wait until all traits are present: <{1}>".format(str(count), '> <'.join( missing_traits_descriptions))) except self._ignored_exceptions as ex: # pragma: no cover logger.debug("Captured {0}: {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""), str(ex))) # pragma: no cover pass # pragma: no cover time.sleep(self._poll) count += 1 if time.time() > end_time: break raise TimeoutException( msg="conditions " + '<' + '> <'.join(missing_traits_descriptions) + '>' + " not true after " + str( self._timeout) + " seconds.")
[ "Waits", "until", "all", "traits", "are", "present", ".", "If", "any", "of", "the", "traits", "is", "still", "not", "present", "after", "timeout", "raises", "a", "TimeoutException", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/wait/wait.py#L80-L107
[ "def", "until_traits_are_present", "(", "self", ",", "element_with_traits", ")", ":", "end_time", "=", "time", ".", "time", "(", ")", "+", "self", ".", "_timeout", "count", "=", "1", "missing_traits_descriptions", "=", "None", "while", "True", ":", "missing_traits_descriptions", "=", "[", "]", "try", ":", "missing_traits_descriptions", "=", "element_with_traits", ".", "evaluate_traits", "(", ")", "if", "len", "(", "missing_traits_descriptions", ")", "==", "0", ":", "return", "True", "else", ":", "logger", ".", "debug", "(", "\"#{0} - wait until all traits are present: <{1}>\"", ".", "format", "(", "str", "(", "count", ")", ",", "'> <'", ".", "join", "(", "missing_traits_descriptions", ")", ")", ")", "except", "self", ".", "_ignored_exceptions", "as", "ex", ":", "# pragma: no cover", "logger", ".", "debug", "(", "\"Captured {0}: {1}\"", ".", "format", "(", "str", "(", "ex", ".", "__class__", ")", ".", "replace", "(", "\"<type '\"", ",", "\"\"", ")", ".", "replace", "(", "\"'>\"", ",", "\"\"", ")", ",", "str", "(", "ex", ")", ")", ")", "# pragma: no cover", "pass", "# pragma: no cover", "time", ".", "sleep", "(", "self", ".", "_poll", ")", "count", "+=", "1", "if", "time", ".", "time", "(", ")", ">", "end_time", ":", "break", "raise", "TimeoutException", "(", "msg", "=", "\"conditions \"", "+", "'<'", "+", "'> <'", ".", "join", "(", "missing_traits_descriptions", ")", "+", "'>'", "+", "\" not true after \"", "+", "str", "(", "self", ".", "_timeout", ")", "+", "\" seconds.\"", ")" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
Wait.with_ignored_exceptions
Set a list of exceptions that should be ignored inside the wait loop.
pages/wait/wait.py
def with_ignored_exceptions(self, *ignored_exceptions): """ Set a list of exceptions that should be ignored inside the wait loop. """ for exception in ignored_exceptions: self._ignored_exceptions = self._ignored_exceptions + (exception,) return self
def with_ignored_exceptions(self, *ignored_exceptions): """ Set a list of exceptions that should be ignored inside the wait loop. """ for exception in ignored_exceptions: self._ignored_exceptions = self._ignored_exceptions + (exception,) return self
[ "Set", "a", "list", "of", "exceptions", "that", "should", "be", "ignored", "inside", "the", "wait", "loop", "." ]
Skyscanner/pages
python
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/wait/wait.py#L123-L129
[ "def", "with_ignored_exceptions", "(", "self", ",", "*", "ignored_exceptions", ")", ":", "for", "exception", "in", "ignored_exceptions", ":", "self", ".", "_ignored_exceptions", "=", "self", ".", "_ignored_exceptions", "+", "(", "exception", ",", ")", "return", "self" ]
f80471ef01f84b11e4d751dff1e6398ae1e230b8
test
s2h
convert seconds to a pretty "d hh:mm:ss.s" format
pyfnnd/utils.py
def s2h(ss): """convert seconds to a pretty "d hh:mm:ss.s" format""" mm, ss = divmod(ss, 60) hh, mm = divmod(mm, 60) dd, hh = divmod(hh, 24) tstr = "%02i:%04.1f" % (mm, ss) if hh > 0: tstr = ("%02i:" % hh) + tstr if dd > 0: tstr = ("%id " % dd) + tstr return tstr
def s2h(ss): """convert seconds to a pretty "d hh:mm:ss.s" format""" mm, ss = divmod(ss, 60) hh, mm = divmod(mm, 60) dd, hh = divmod(hh, 24) tstr = "%02i:%04.1f" % (mm, ss) if hh > 0: tstr = ("%02i:" % hh) + tstr if dd > 0: tstr = ("%id " % dd) + tstr return tstr
[ "convert", "seconds", "to", "a", "pretty", "d", "hh", ":", "mm", ":", "ss", ".", "s", "format" ]
alimuldal/PyFNND
python
https://github.com/alimuldal/PyFNND/blob/3cbe0622a385f5206837bfd944d781aa7b1649ea/pyfnnd/utils.py#L24-L34
[ "def", "s2h", "(", "ss", ")", ":", "mm", ",", "ss", "=", "divmod", "(", "ss", ",", "60", ")", "hh", ",", "mm", "=", "divmod", "(", "mm", ",", "60", ")", "dd", ",", "hh", "=", "divmod", "(", "hh", ",", "24", ")", "tstr", "=", "\"%02i:%04.1f\"", "%", "(", "mm", ",", "ss", ")", "if", "hh", ">", "0", ":", "tstr", "=", "(", "\"%02i:\"", "%", "hh", ")", "+", "tstr", "if", "dd", ">", "0", ":", "tstr", "=", "(", "\"%id \"", "%", "dd", ")", "+", "tstr", "return", "tstr" ]
3cbe0622a385f5206837bfd944d781aa7b1649ea
test
NADReceiver.exec_command
Write a command to the receiver and read the value it returns. The receiver will always return a value, also when setting a value.
nad_receiver/__init__.py
def exec_command(self, domain, function, operator, value=None): """ Write a command to the receiver and read the value it returns. The receiver will always return a value, also when setting a value. """ if operator in CMDS[domain][function]['supported_operators']: if operator is '=' and value is None: raise ValueError('No value provided') if value is None: cmd = ''.join([CMDS[domain][function]['cmd'], operator]) else: cmd = ''.join( [CMDS[domain][function]['cmd'], operator, str(value)]) else: raise ValueError('Invalid operator provided %s' % operator) if not self.ser.is_open: self.ser.open() try: self.lock.acquire() self.ser.write(''.join(['\r', cmd, '\r']).encode('utf-8')) time.sleep(0.1) # not sure why, but otherwise it is not ready yet to do the read. msg = self.ser.read(self.ser.in_waiting) try: msg = msg.decode()[1:-1] msg = msg.split('=')[1] return msg except IndexError: pass finally: self.lock.release()
def exec_command(self, domain, function, operator, value=None): """ Write a command to the receiver and read the value it returns. The receiver will always return a value, also when setting a value. """ if operator in CMDS[domain][function]['supported_operators']: if operator is '=' and value is None: raise ValueError('No value provided') if value is None: cmd = ''.join([CMDS[domain][function]['cmd'], operator]) else: cmd = ''.join( [CMDS[domain][function]['cmd'], operator, str(value)]) else: raise ValueError('Invalid operator provided %s' % operator) if not self.ser.is_open: self.ser.open() try: self.lock.acquire() self.ser.write(''.join(['\r', cmd, '\r']).encode('utf-8')) time.sleep(0.1) # not sure why, but otherwise it is not ready yet to do the read. msg = self.ser.read(self.ser.in_waiting) try: msg = msg.decode()[1:-1] msg = msg.split('=')[1] return msg except IndexError: pass finally: self.lock.release()
[ "Write", "a", "command", "to", "the", "receiver", "and", "read", "the", "value", "it", "returns", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L28-L66
[ "def", "exec_command", "(", "self", ",", "domain", ",", "function", ",", "operator", ",", "value", "=", "None", ")", ":", "if", "operator", "in", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'supported_operators'", "]", ":", "if", "operator", "is", "'='", "and", "value", "is", "None", ":", "raise", "ValueError", "(", "'No value provided'", ")", "if", "value", "is", "None", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", "]", ")", "else", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", ",", "str", "(", "value", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "'Invalid operator provided %s'", "%", "operator", ")", "if", "not", "self", ".", "ser", ".", "is_open", ":", "self", ".", "ser", ".", "open", "(", ")", "try", ":", "self", ".", "lock", ".", "acquire", "(", ")", "self", ".", "ser", ".", "write", "(", "''", ".", "join", "(", "[", "'\\r'", ",", "cmd", ",", "'\\r'", "]", ")", ".", "encode", "(", "'utf-8'", ")", ")", "time", ".", "sleep", "(", "0.1", ")", "# not sure why, but otherwise it is not ready yet to do the read.", "msg", "=", "self", ".", "ser", ".", "read", "(", "self", ".", "ser", ".", "in_waiting", ")", "try", ":", "msg", "=", "msg", ".", "decode", "(", ")", "[", "1", ":", "-", "1", "]", "msg", "=", "msg", ".", "split", "(", "'='", ")", "[", "1", "]", "return", "msg", "except", "IndexError", ":", "pass", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiver.main_volume
Execute Main.Volume. Returns int
nad_receiver/__init__.py
def main_volume(self, operator, value=None): """ Execute Main.Volume. Returns int """ try: res = int(self.exec_command('main', 'volume', operator, value)) return res except (ValueError, TypeError): pass return None
def main_volume(self, operator, value=None): """ Execute Main.Volume. Returns int """ try: res = int(self.exec_command('main', 'volume', operator, value)) return res except (ValueError, TypeError): pass return None
[ "Execute", "Main", ".", "Volume", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L80-L93
[ "def", "main_volume", "(", "self", ",", "operator", ",", "value", "=", "None", ")", ":", "try", ":", "res", "=", "int", "(", "self", ".", "exec_command", "(", "'main'", ",", "'volume'", ",", "operator", ",", "value", ")", ")", "return", "res", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "None" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiver.main_source
Execute Main.Source. Returns int
nad_receiver/__init__.py
def main_source(self, operator, value=None): """ Execute Main.Source. Returns int """ try: source = int(self.exec_command('main', 'source', operator, value)) return source except (ValueError, TypeError): pass return None
def main_source(self, operator, value=None): """ Execute Main.Source. Returns int """ try: source = int(self.exec_command('main', 'source', operator, value)) return source except (ValueError, TypeError): pass return None
[ "Execute", "Main", ".", "Source", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L107-L119
[ "def", "main_source", "(", "self", ",", "operator", ",", "value", "=", "None", ")", ":", "try", ":", "source", "=", "int", "(", "self", ".", "exec_command", "(", "'main'", ",", "'source'", ",", "operator", ",", "value", ")", ")", "return", "source", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "None" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP._send
Send a command string to the amplifier.
nad_receiver/__init__.py
def _send(self, message, read_reply=False): """Send a command string to the amplifier.""" sock = None for tries in range(0, 3): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._host, self.PORT)) break except (ConnectionError, BrokenPipeError): if tries == 3: print("socket connect failed.") return sleep(0.1) sock.send(codecs.decode(message, 'hex_codec')) if read_reply: sleep(0.1) reply = '' tries = 0 max_tries = 20 while len(reply) < len(message) and tries < max_tries: try: reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\ .decode("utf-8") except (ConnectionError, BrokenPipeError): pass tries += 1 sock.close() if tries >= max_tries: return return reply sock.close()
def _send(self, message, read_reply=False): """Send a command string to the amplifier.""" sock = None for tries in range(0, 3): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._host, self.PORT)) break except (ConnectionError, BrokenPipeError): if tries == 3: print("socket connect failed.") return sleep(0.1) sock.send(codecs.decode(message, 'hex_codec')) if read_reply: sleep(0.1) reply = '' tries = 0 max_tries = 20 while len(reply) < len(message) and tries < max_tries: try: reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\ .decode("utf-8") except (ConnectionError, BrokenPipeError): pass tries += 1 sock.close() if tries >= max_tries: return return reply sock.close()
[ "Send", "a", "command", "string", "to", "the", "amplifier", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L183-L213
[ "def", "_send", "(", "self", ",", "message", ",", "read_reply", "=", "False", ")", ":", "sock", "=", "None", "for", "tries", "in", "range", "(", "0", ",", "3", ")", ":", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "self", ".", "_host", ",", "self", ".", "PORT", ")", ")", "break", "except", "(", "ConnectionError", ",", "BrokenPipeError", ")", ":", "if", "tries", "==", "3", ":", "print", "(", "\"socket connect failed.\"", ")", "return", "sleep", "(", "0.1", ")", "sock", ".", "send", "(", "codecs", ".", "decode", "(", "message", ",", "'hex_codec'", ")", ")", "if", "read_reply", ":", "sleep", "(", "0.1", ")", "reply", "=", "''", "tries", "=", "0", "max_tries", "=", "20", "while", "len", "(", "reply", ")", "<", "len", "(", "message", ")", "and", "tries", "<", "max_tries", ":", "try", ":", "reply", "+=", "codecs", ".", "encode", "(", "sock", ".", "recv", "(", "self", ".", "BUFFERSIZE", ")", ",", "'hex'", ")", ".", "decode", "(", "\"utf-8\"", ")", "except", "(", "ConnectionError", ",", "BrokenPipeError", ")", ":", "pass", "tries", "+=", "1", "sock", ".", "close", "(", ")", "if", "tries", ">=", "max_tries", ":", "return", "return", "reply", "sock", ".", "close", "(", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP.status
Return the status of the device. Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool), 'muted' (bool) and 'source' (str).
nad_receiver/__init__.py
def status(self): """ Return the status of the device. Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool), 'muted' (bool) and 'source' (str). """ nad_reply = self._send(self.POLL_VOLUME + self.POLL_POWER + self.POLL_MUTED + self.POLL_SOURCE, read_reply=True) if nad_reply is None: return # split reply into parts of 10 characters num_chars = 10 nad_status = [nad_reply[i:i + num_chars] for i in range(0, len(nad_reply), num_chars)] return {'volume': int(nad_status[0][-2:], 16), 'power': nad_status[1][-2:] == '01', 'muted': nad_status[2][-2:] == '01', 'source': self.SOURCES_REVERSED[nad_status[3][-2:]]}
def status(self): """ Return the status of the device. Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool), 'muted' (bool) and 'source' (str). """ nad_reply = self._send(self.POLL_VOLUME + self.POLL_POWER + self.POLL_MUTED + self.POLL_SOURCE, read_reply=True) if nad_reply is None: return # split reply into parts of 10 characters num_chars = 10 nad_status = [nad_reply[i:i + num_chars] for i in range(0, len(nad_reply), num_chars)] return {'volume': int(nad_status[0][-2:], 16), 'power': nad_status[1][-2:] == '01', 'muted': nad_status[2][-2:] == '01', 'source': self.SOURCES_REVERSED[nad_status[3][-2:]]}
[ "Return", "the", "status", "of", "the", "device", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L215-L237
[ "def", "status", "(", "self", ")", ":", "nad_reply", "=", "self", ".", "_send", "(", "self", ".", "POLL_VOLUME", "+", "self", ".", "POLL_POWER", "+", "self", ".", "POLL_MUTED", "+", "self", ".", "POLL_SOURCE", ",", "read_reply", "=", "True", ")", "if", "nad_reply", "is", "None", ":", "return", "# split reply into parts of 10 characters", "num_chars", "=", "10", "nad_status", "=", "[", "nad_reply", "[", "i", ":", "i", "+", "num_chars", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "nad_reply", ")", ",", "num_chars", ")", "]", "return", "{", "'volume'", ":", "int", "(", "nad_status", "[", "0", "]", "[", "-", "2", ":", "]", ",", "16", ")", ",", "'power'", ":", "nad_status", "[", "1", "]", "[", "-", "2", ":", "]", "==", "'01'", ",", "'muted'", ":", "nad_status", "[", "2", "]", "[", "-", "2", ":", "]", "==", "'01'", ",", "'source'", ":", "self", ".", "SOURCES_REVERSED", "[", "nad_status", "[", "3", "]", "[", "-", "2", ":", "]", "]", "}" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP.power_off
Power the device off.
nad_receiver/__init__.py
def power_off(self): """Power the device off.""" status = self.status() if status['power']: # Setting power off when it is already off can cause hangs self._send(self.CMD_POWERSAVE + self.CMD_OFF)
def power_off(self): """Power the device off.""" status = self.status() if status['power']: # Setting power off when it is already off can cause hangs self._send(self.CMD_POWERSAVE + self.CMD_OFF)
[ "Power", "the", "device", "off", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L239-L243
[ "def", "power_off", "(", "self", ")", ":", "status", "=", "self", ".", "status", "(", ")", "if", "status", "[", "'power'", "]", ":", "# Setting power off when it is already off can cause hangs", "self", ".", "_send", "(", "self", ".", "CMD_POWERSAVE", "+", "self", ".", "CMD_OFF", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP.power_on
Power the device on.
nad_receiver/__init__.py
def power_on(self): """Power the device on.""" status = self.status() if not status['power']: self._send(self.CMD_ON, read_reply=True) sleep(0.5)
def power_on(self): """Power the device on.""" status = self.status() if not status['power']: self._send(self.CMD_ON, read_reply=True) sleep(0.5)
[ "Power", "the", "device", "on", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L245-L250
[ "def", "power_on", "(", "self", ")", ":", "status", "=", "self", ".", "status", "(", ")", "if", "not", "status", "[", "'power'", "]", ":", "self", ".", "_send", "(", "self", ".", "CMD_ON", ",", "read_reply", "=", "True", ")", "sleep", "(", "0.5", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP.set_volume
Set volume level of the device. Accepts integer values 0-200.
nad_receiver/__init__.py
def set_volume(self, volume): """Set volume level of the device. Accepts integer values 0-200.""" if 0 <= volume <= 200: volume = format(volume, "02x") # Convert to hex self._send(self.CMD_VOLUME + volume)
def set_volume(self, volume): """Set volume level of the device. Accepts integer values 0-200.""" if 0 <= volume <= 200: volume = format(volume, "02x") # Convert to hex self._send(self.CMD_VOLUME + volume)
[ "Set", "volume", "level", "of", "the", "device", ".", "Accepts", "integer", "values", "0", "-", "200", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L252-L256
[ "def", "set_volume", "(", "self", ",", "volume", ")", ":", "if", "0", "<=", "volume", "<=", "200", ":", "volume", "=", "format", "(", "volume", ",", "\"02x\"", ")", "# Convert to hex", "self", ".", "_send", "(", "self", ".", "CMD_VOLUME", "+", "volume", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTCP.select_source
Select a source from the list of sources.
nad_receiver/__init__.py
def select_source(self, source): """Select a source from the list of sources.""" status = self.status() if status['power']: # Changing source when off may hang NAD7050 if status['source'] != source: # Setting the source to the current source will hang the NAD7050 if source in self.SOURCES: self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True)
def select_source(self, source): """Select a source from the list of sources.""" status = self.status() if status['power']: # Changing source when off may hang NAD7050 if status['source'] != source: # Setting the source to the current source will hang the NAD7050 if source in self.SOURCES: self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True)
[ "Select", "a", "source", "from", "the", "list", "of", "sources", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L266-L272
[ "def", "select_source", "(", "self", ",", "source", ")", ":", "status", "=", "self", ".", "status", "(", ")", "if", "status", "[", "'power'", "]", ":", "# Changing source when off may hang NAD7050", "if", "status", "[", "'source'", "]", "!=", "source", ":", "# Setting the source to the current source will hang the NAD7050", "if", "source", "in", "self", ".", "SOURCES", ":", "self", ".", "_send", "(", "self", ".", "CMD_SOURCE", "+", "self", ".", "SOURCES", "[", "source", "]", ",", "read_reply", "=", "True", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
NADReceiverTelnet.exec_command
Write a command to the receiver and read the value it returns.
nad_receiver/__init__.py
def exec_command(self, domain, function, operator, value=None): """ Write a command to the receiver and read the value it returns. """ if operator in CMDS[domain][function]['supported_operators']: if operator is '=' and value is None: raise ValueError('No value provided') if value is None: cmd = ''.join([CMDS[domain][function]['cmd'], operator]) else: cmd = ''.join( [CMDS[domain][function]['cmd'], operator, str(value)]) else: raise ValueError('Invalid operator provided %s' % operator) if self._open_connection(): # For telnet the first \r / \n is recommended only self.telnet.write((''.join(['\r', cmd, '\n']).encode())) # Could raise eg. socket.error, UnicodeError, let the client handle it # Test 3 x buffer is completely empty # With the default timeout that means a delay at # about 3+ seconds loop = 3 while loop: msg = self.telnet.read_until('\n'.encode(), self.timeout) # Could raise eg. EOFError, UnicodeError, let the client handle it if msg == "": # Nothing in buffer loop -= 1 continue msg = msg.decode().strip('\r\n') # Could raise eg. UnicodeError, let the client handle it #print("NAD reponded with '%s'" % msg) # Wait for the response that equals the requested domain.function if msg.strip().split('=')[0].lower() == '.'.join([domain, function]).lower(): # b'Main.Volume=-12\r will return -12 return msg.strip().split('=')[1] raise RuntimeError('Failed to read response') raise RuntimeError('Failed to open connection')
def exec_command(self, domain, function, operator, value=None): """ Write a command to the receiver and read the value it returns. """ if operator in CMDS[domain][function]['supported_operators']: if operator is '=' and value is None: raise ValueError('No value provided') if value is None: cmd = ''.join([CMDS[domain][function]['cmd'], operator]) else: cmd = ''.join( [CMDS[domain][function]['cmd'], operator, str(value)]) else: raise ValueError('Invalid operator provided %s' % operator) if self._open_connection(): # For telnet the first \r / \n is recommended only self.telnet.write((''.join(['\r', cmd, '\n']).encode())) # Could raise eg. socket.error, UnicodeError, let the client handle it # Test 3 x buffer is completely empty # With the default timeout that means a delay at # about 3+ seconds loop = 3 while loop: msg = self.telnet.read_until('\n'.encode(), self.timeout) # Could raise eg. EOFError, UnicodeError, let the client handle it if msg == "": # Nothing in buffer loop -= 1 continue msg = msg.decode().strip('\r\n') # Could raise eg. UnicodeError, let the client handle it #print("NAD reponded with '%s'" % msg) # Wait for the response that equals the requested domain.function if msg.strip().split('=')[0].lower() == '.'.join([domain, function]).lower(): # b'Main.Volume=-12\r will return -12 return msg.strip().split('=')[1] raise RuntimeError('Failed to read response') raise RuntimeError('Failed to open connection')
[ "Write", "a", "command", "to", "the", "receiver", "and", "read", "the", "value", "it", "returns", "." ]
joopert/nad_receiver
python
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L320-L365
[ "def", "exec_command", "(", "self", ",", "domain", ",", "function", ",", "operator", ",", "value", "=", "None", ")", ":", "if", "operator", "in", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'supported_operators'", "]", ":", "if", "operator", "is", "'='", "and", "value", "is", "None", ":", "raise", "ValueError", "(", "'No value provided'", ")", "if", "value", "is", "None", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", "]", ")", "else", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", ",", "str", "(", "value", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "'Invalid operator provided %s'", "%", "operator", ")", "if", "self", ".", "_open_connection", "(", ")", ":", "# For telnet the first \\r / \\n is recommended only", "self", ".", "telnet", ".", "write", "(", "(", "''", ".", "join", "(", "[", "'\\r'", ",", "cmd", ",", "'\\n'", "]", ")", ".", "encode", "(", ")", ")", ")", "# Could raise eg. socket.error, UnicodeError, let the client handle it", "# Test 3 x buffer is completely empty", "# With the default timeout that means a delay at", "# about 3+ seconds", "loop", "=", "3", "while", "loop", ":", "msg", "=", "self", ".", "telnet", ".", "read_until", "(", "'\\n'", ".", "encode", "(", ")", ",", "self", ".", "timeout", ")", "# Could raise eg. EOFError, UnicodeError, let the client handle it", "if", "msg", "==", "\"\"", ":", "# Nothing in buffer", "loop", "-=", "1", "continue", "msg", "=", "msg", ".", "decode", "(", ")", ".", "strip", "(", "'\\r\\n'", ")", "# Could raise eg. UnicodeError, let the client handle it", "#print(\"NAD reponded with '%s'\" % msg)", "# Wait for the response that equals the requested domain.function", "if", "msg", ".", "strip", "(", ")", ".", "split", "(", "'='", ")", "[", "0", "]", ".", "lower", "(", ")", "==", "'.'", ".", "join", "(", "[", "domain", ",", "function", "]", ")", ".", "lower", "(", ")", ":", "# b'Main.Volume=-12\\r will return -12", "return", "msg", ".", "strip", "(", ")", ".", "split", "(", "'='", ")", "[", "1", "]", "raise", "RuntimeError", "(", "'Failed to read response'", ")", "raise", "RuntimeError", "(", "'Failed to open connection'", ")" ]
416de0173a330c75cc73f9c90b0c5df32e5e0ba3
test
deobfuscate
Deobfuscates the URL and returns HttpResponse from source view. SEO juice is mostly ignored as it is intended for display purposes only.
unfriendly/views.py
def deobfuscate(request, key, juice=None): """ Deobfuscates the URL and returns HttpResponse from source view. SEO juice is mostly ignored as it is intended for display purposes only. """ try: url = decrypt(str(key), settings.UNFRIENDLY_SECRET, settings.UNFRIENDLY_IV, checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM) except (CheckSumError, InvalidKeyError): return HttpResponseNotFound() try: url = url.decode('utf-8') except UnicodeDecodeError: return HttpResponseNotFound() url_parts = urlparse(unquote(url)) path = url_parts.path query = url_parts.query try: view, args, kwargs = resolve(path) except Resolver404: return HttpResponseNotFound() # fix-up the environ object environ = request.environ.copy() environ['PATH_INFO'] = path[len(environ['SCRIPT_NAME']):] environ['QUERY_STRING'] = query # init a new request patched_request = request.__class__(environ) # copy over any missing request attributes - this feels hackish missing_items = set(dir(request)) - set(dir(patched_request)) while missing_items: missing_item = missing_items.pop() patched_request.__setattr__(missing_item, request.__getattribute__(missing_item)) # mark this request as obfuscated patched_request.META['obfuscated'] = True response = view(patched_request, *args, **kwargs) # offer up a friendlier juice-powered filename if downloaded if juice and not response.has_header('Content-Disposition'): response['Content-Disposition'] = 'inline; filename=%s' % juice return response
def deobfuscate(request, key, juice=None): """ Deobfuscates the URL and returns HttpResponse from source view. SEO juice is mostly ignored as it is intended for display purposes only. """ try: url = decrypt(str(key), settings.UNFRIENDLY_SECRET, settings.UNFRIENDLY_IV, checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM) except (CheckSumError, InvalidKeyError): return HttpResponseNotFound() try: url = url.decode('utf-8') except UnicodeDecodeError: return HttpResponseNotFound() url_parts = urlparse(unquote(url)) path = url_parts.path query = url_parts.query try: view, args, kwargs = resolve(path) except Resolver404: return HttpResponseNotFound() # fix-up the environ object environ = request.environ.copy() environ['PATH_INFO'] = path[len(environ['SCRIPT_NAME']):] environ['QUERY_STRING'] = query # init a new request patched_request = request.__class__(environ) # copy over any missing request attributes - this feels hackish missing_items = set(dir(request)) - set(dir(patched_request)) while missing_items: missing_item = missing_items.pop() patched_request.__setattr__(missing_item, request.__getattribute__(missing_item)) # mark this request as obfuscated patched_request.META['obfuscated'] = True response = view(patched_request, *args, **kwargs) # offer up a friendlier juice-powered filename if downloaded if juice and not response.has_header('Content-Disposition'): response['Content-Disposition'] = 'inline; filename=%s' % juice return response
[ "Deobfuscates", "the", "URL", "and", "returns", "HttpResponse", "from", "source", "view", ".", "SEO", "juice", "is", "mostly", "ignored", "as", "it", "is", "intended", "for", "display", "purposes", "only", "." ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/views.py#L21-L72
[ "def", "deobfuscate", "(", "request", ",", "key", ",", "juice", "=", "None", ")", ":", "try", ":", "url", "=", "decrypt", "(", "str", "(", "key", ")", ",", "settings", ".", "UNFRIENDLY_SECRET", ",", "settings", ".", "UNFRIENDLY_IV", ",", "checksum", "=", "settings", ".", "UNFRIENDLY_ENFORCE_CHECKSUM", ")", "except", "(", "CheckSumError", ",", "InvalidKeyError", ")", ":", "return", "HttpResponseNotFound", "(", ")", "try", ":", "url", "=", "url", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "return", "HttpResponseNotFound", "(", ")", "url_parts", "=", "urlparse", "(", "unquote", "(", "url", ")", ")", "path", "=", "url_parts", ".", "path", "query", "=", "url_parts", ".", "query", "try", ":", "view", ",", "args", ",", "kwargs", "=", "resolve", "(", "path", ")", "except", "Resolver404", ":", "return", "HttpResponseNotFound", "(", ")", "# fix-up the environ object", "environ", "=", "request", ".", "environ", ".", "copy", "(", ")", "environ", "[", "'PATH_INFO'", "]", "=", "path", "[", "len", "(", "environ", "[", "'SCRIPT_NAME'", "]", ")", ":", "]", "environ", "[", "'QUERY_STRING'", "]", "=", "query", "# init a new request", "patched_request", "=", "request", ".", "__class__", "(", "environ", ")", "# copy over any missing request attributes - this feels hackish", "missing_items", "=", "set", "(", "dir", "(", "request", ")", ")", "-", "set", "(", "dir", "(", "patched_request", ")", ")", "while", "missing_items", ":", "missing_item", "=", "missing_items", ".", "pop", "(", ")", "patched_request", ".", "__setattr__", "(", "missing_item", ",", "request", ".", "__getattribute__", "(", "missing_item", ")", ")", "# mark this request as obfuscated", "patched_request", ".", "META", "[", "'obfuscated'", "]", "=", "True", "response", "=", "view", "(", "patched_request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# offer up a friendlier juice-powered filename if downloaded", "if", "juice", "and", "not", "response", ".", "has_header", "(", "'Content-Disposition'", ")", ":", "response", "[", "'Content-Disposition'", "]", "=", "'inline; filename=%s'", "%", "juice", "return", "response" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
_lazysecret
Pads secret if not legal AES block size (16, 24, 32)
unfriendly/utils.py
def _lazysecret(secret, blocksize=32, padding='}'): """Pads secret if not legal AES block size (16, 24, 32)""" if not len(secret) in (16, 24, 32): return secret + (blocksize - len(secret)) * padding return secret
def _lazysecret(secret, blocksize=32, padding='}'): """Pads secret if not legal AES block size (16, 24, 32)""" if not len(secret) in (16, 24, 32): return secret + (blocksize - len(secret)) * padding return secret
[ "Pads", "secret", "if", "not", "legal", "AES", "block", "size", "(", "16", "24", "32", ")" ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/utils.py#L23-L27
[ "def", "_lazysecret", "(", "secret", ",", "blocksize", "=", "32", ",", "padding", "=", "'}'", ")", ":", "if", "not", "len", "(", "secret", ")", "in", "(", "16", ",", "24", ",", "32", ")", ":", "return", "secret", "+", "(", "blocksize", "-", "len", "(", "secret", ")", ")", "*", "padding", "return", "secret" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
_crc
Generates crc32. Modulo keep the value within int range.
unfriendly/utils.py
def _crc(plaintext): """Generates crc32. Modulo keep the value within int range.""" if not isinstance(plaintext, six.binary_type): plaintext = six.b(plaintext) return (zlib.crc32(plaintext) % 2147483647) & 0xffffffff
def _crc(plaintext): """Generates crc32. Modulo keep the value within int range.""" if not isinstance(plaintext, six.binary_type): plaintext = six.b(plaintext) return (zlib.crc32(plaintext) % 2147483647) & 0xffffffff
[ "Generates", "crc32", ".", "Modulo", "keep", "the", "value", "within", "int", "range", "." ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/utils.py#L29-L33
[ "def", "_crc", "(", "plaintext", ")", ":", "if", "not", "isinstance", "(", "plaintext", ",", "six", ".", "binary_type", ")", ":", "plaintext", "=", "six", ".", "b", "(", "plaintext", ")", "return", "(", "zlib", ".", "crc32", "(", "plaintext", ")", "%", "2147483647", ")", "&", "0xffffffff" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
encrypt
Encrypts plaintext with secret plaintext - content to encrypt secret - secret to encrypt plaintext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - attach crc32 byte encoded (default: True) returns ciphertext
unfriendly/utils.py
def encrypt(plaintext, secret, inital_vector, checksum=True, lazy=True): """Encrypts plaintext with secret plaintext - content to encrypt secret - secret to encrypt plaintext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - attach crc32 byte encoded (default: True) returns ciphertext """ if not isinstance(plaintext, six.binary_type): plaintext = six.b(plaintext) secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) if checksum: packed = _pack_crc(plaintext) plaintext += base64.urlsafe_b64encode(packed) encoded = base64.urlsafe_b64encode(encobj.encrypt(plaintext)) if isinstance(plaintext, six.binary_type): encoded = encoded.decode() return encoded.replace('=', '')
def encrypt(plaintext, secret, inital_vector, checksum=True, lazy=True): """Encrypts plaintext with secret plaintext - content to encrypt secret - secret to encrypt plaintext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - attach crc32 byte encoded (default: True) returns ciphertext """ if not isinstance(plaintext, six.binary_type): plaintext = six.b(plaintext) secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) if checksum: packed = _pack_crc(plaintext) plaintext += base64.urlsafe_b64encode(packed) encoded = base64.urlsafe_b64encode(encobj.encrypt(plaintext)) if isinstance(plaintext, six.binary_type): encoded = encoded.decode() return encoded.replace('=', '')
[ "Encrypts", "plaintext", "with", "secret", "plaintext", "-", "content", "to", "encrypt", "secret", "-", "secret", "to", "encrypt", "plaintext", "inital_vector", "-", "initial", "vector", "lazy", "-", "pad", "secret", "if", "less", "than", "legal", "blocksize", "(", "default", ":", "True", ")", "checksum", "-", "attach", "crc32", "byte", "encoded", "(", "default", ":", "True", ")", "returns", "ciphertext" ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/utils.py#L39-L62
[ "def", "encrypt", "(", "plaintext", ",", "secret", ",", "inital_vector", ",", "checksum", "=", "True", ",", "lazy", "=", "True", ")", ":", "if", "not", "isinstance", "(", "plaintext", ",", "six", ".", "binary_type", ")", ":", "plaintext", "=", "six", ".", "b", "(", "plaintext", ")", "secret", "=", "_lazysecret", "(", "secret", ")", "if", "lazy", "else", "secret", "encobj", "=", "AES", ".", "new", "(", "secret", ",", "AES", ".", "MODE_CFB", ",", "inital_vector", ")", "if", "checksum", ":", "packed", "=", "_pack_crc", "(", "plaintext", ")", "plaintext", "+=", "base64", ".", "urlsafe_b64encode", "(", "packed", ")", "encoded", "=", "base64", ".", "urlsafe_b64encode", "(", "encobj", ".", "encrypt", "(", "plaintext", ")", ")", "if", "isinstance", "(", "plaintext", ",", "six", ".", "binary_type", ")", ":", "encoded", "=", "encoded", ".", "decode", "(", ")", "return", "encoded", ".", "replace", "(", "'='", ",", "''", ")" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
decrypt
Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext
unfriendly/utils.py
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True): """Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext """ secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) try: padded = ciphertext + ('=' * (len(ciphertext) % 4)) decoded = base64.urlsafe_b64decode(str(padded)) plaintext = encobj.decrypt(decoded) except (TypeError, binascii.Error): raise InvalidKeyError("invalid key") if checksum: try: crc, plaintext = (base64.urlsafe_b64decode( plaintext[-8:]), plaintext[:-8]) except (TypeError, binascii.Error): raise CheckSumError("checksum mismatch") if not crc == _pack_crc(plaintext): raise CheckSumError("checksum mismatch") return plaintext
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True): """Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext """ secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) try: padded = ciphertext + ('=' * (len(ciphertext) % 4)) decoded = base64.urlsafe_b64decode(str(padded)) plaintext = encobj.decrypt(decoded) except (TypeError, binascii.Error): raise InvalidKeyError("invalid key") if checksum: try: crc, plaintext = (base64.urlsafe_b64decode( plaintext[-8:]), plaintext[:-8]) except (TypeError, binascii.Error): raise CheckSumError("checksum mismatch") if not crc == _pack_crc(plaintext): raise CheckSumError("checksum mismatch") return plaintext
[ "Decrypts", "ciphertext", "with", "secret", "ciphertext", "-", "encrypted", "content", "to", "decrypt", "secret", "-", "secret", "to", "decrypt", "ciphertext", "inital_vector", "-", "initial", "vector", "lazy", "-", "pad", "secret", "if", "less", "than", "legal", "blocksize", "(", "default", ":", "True", ")", "checksum", "-", "verify", "crc32", "byte", "encoded", "checksum", "(", "default", ":", "True", ")", "returns", "plaintext" ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/utils.py#L65-L93
[ "def", "decrypt", "(", "ciphertext", ",", "secret", ",", "inital_vector", ",", "checksum", "=", "True", ",", "lazy", "=", "True", ")", ":", "secret", "=", "_lazysecret", "(", "secret", ")", "if", "lazy", "else", "secret", "encobj", "=", "AES", ".", "new", "(", "secret", ",", "AES", ".", "MODE_CFB", ",", "inital_vector", ")", "try", ":", "padded", "=", "ciphertext", "+", "(", "'='", "*", "(", "len", "(", "ciphertext", ")", "%", "4", ")", ")", "decoded", "=", "base64", ".", "urlsafe_b64decode", "(", "str", "(", "padded", ")", ")", "plaintext", "=", "encobj", ".", "decrypt", "(", "decoded", ")", "except", "(", "TypeError", ",", "binascii", ".", "Error", ")", ":", "raise", "InvalidKeyError", "(", "\"invalid key\"", ")", "if", "checksum", ":", "try", ":", "crc", ",", "plaintext", "=", "(", "base64", ".", "urlsafe_b64decode", "(", "plaintext", "[", "-", "8", ":", "]", ")", ",", "plaintext", "[", ":", "-", "8", "]", ")", "except", "(", "TypeError", ",", "binascii", ".", "Error", ")", ":", "raise", "CheckSumError", "(", "\"checksum mismatch\"", ")", "if", "not", "crc", "==", "_pack_crc", "(", "plaintext", ")", ":", "raise", "CheckSumError", "(", "\"checksum mismatch\"", ")", "return", "plaintext" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
obfuscate
Template filter that obfuscates whatever text it is applied to. The text is supposed to be a URL, but it will obfuscate anything. Usage: Extremely unfriendly URL: {{ "/my-secret-path/"|obfuscate }} Include some SEO juice: {{ "/my-secret-path/"|obfuscate:"some SEO friendly text" }}
unfriendly/templatetags/unfriendly_tags.py
def obfuscate(value, juice=None): """ Template filter that obfuscates whatever text it is applied to. The text is supposed to be a URL, but it will obfuscate anything. Usage: Extremely unfriendly URL: {{ "/my-secret-path/"|obfuscate }} Include some SEO juice: {{ "/my-secret-path/"|obfuscate:"some SEO friendly text" }} """ if not settings.UNFRIENDLY_ENABLE_FILTER: return value kwargs = { 'key': encrypt(value, settings.UNFRIENDLY_SECRET, settings.UNFRIENDLY_IV, checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM), } if juice: kwargs['juice'] = slugify(juice) return reverse('unfriendly-deobfuscate', kwargs=kwargs)
def obfuscate(value, juice=None): """ Template filter that obfuscates whatever text it is applied to. The text is supposed to be a URL, but it will obfuscate anything. Usage: Extremely unfriendly URL: {{ "/my-secret-path/"|obfuscate }} Include some SEO juice: {{ "/my-secret-path/"|obfuscate:"some SEO friendly text" }} """ if not settings.UNFRIENDLY_ENABLE_FILTER: return value kwargs = { 'key': encrypt(value, settings.UNFRIENDLY_SECRET, settings.UNFRIENDLY_IV, checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM), } if juice: kwargs['juice'] = slugify(juice) return reverse('unfriendly-deobfuscate', kwargs=kwargs)
[ "Template", "filter", "that", "obfuscates", "whatever", "text", "it", "is", "applied", "to", ".", "The", "text", "is", "supposed", "to", "be", "a", "URL", "but", "it", "will", "obfuscate", "anything", "." ]
tomatohater/django-unfriendly
python
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/templatetags/unfriendly_tags.py#L19-L41
[ "def", "obfuscate", "(", "value", ",", "juice", "=", "None", ")", ":", "if", "not", "settings", ".", "UNFRIENDLY_ENABLE_FILTER", ":", "return", "value", "kwargs", "=", "{", "'key'", ":", "encrypt", "(", "value", ",", "settings", ".", "UNFRIENDLY_SECRET", ",", "settings", ".", "UNFRIENDLY_IV", ",", "checksum", "=", "settings", ".", "UNFRIENDLY_ENFORCE_CHECKSUM", ")", ",", "}", "if", "juice", ":", "kwargs", "[", "'juice'", "]", "=", "slugify", "(", "juice", ")", "return", "reverse", "(", "'unfriendly-deobfuscate'", ",", "kwargs", "=", "kwargs", ")" ]
38eca5fb45841db331fc66571fff37bef50dfa67
test
MrJattParser.missing_schema
It will print the list of songs that can be downloaded
song/commands/MusicWebsiteParser/MrJattParser.py
def missing_schema(self,html,song_name): ''' It will print the list of songs that can be downloaded ''' #html=self.get_html_response(url) soup=BeautifulSoup(html) name=' '.join(song_name) print '%s not found'%name print "But you can download any of the following songs :" a_list=soup.findAll('a','touch') for x in xrange(len(a_list)-1): r=a_list[x] p=str(r) q=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',p) print q
def missing_schema(self,html,song_name): ''' It will print the list of songs that can be downloaded ''' #html=self.get_html_response(url) soup=BeautifulSoup(html) name=' '.join(song_name) print '%s not found'%name print "But you can download any of the following songs :" a_list=soup.findAll('a','touch') for x in xrange(len(a_list)-1): r=a_list[x] p=str(r) q=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',p) print q
[ "It", "will", "print", "the", "list", "of", "songs", "that", "can", "be", "downloaded" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L13-L27
[ "def", "missing_schema", "(", "self", ",", "html", ",", "song_name", ")", ":", "#html=self.get_html_response(url)", "soup", "=", "BeautifulSoup", "(", "html", ")", "name", "=", "' '", ".", "join", "(", "song_name", ")", "print", "'%s not found'", "%", "name", "print", "\"But you can download any of the following songs :\"", "a_list", "=", "soup", ".", "findAll", "(", "'a'", ",", "'touch'", ")", "for", "x", "in", "xrange", "(", "len", "(", "a_list", ")", "-", "1", ")", ":", "r", "=", "a_list", "[", "x", "]", "p", "=", "str", "(", "r", ")", "q", "=", "re", ".", "sub", "(", "r'<a.*/>|<span.*\">|</span>|</a>|<a.*html\">|<font.*\">|</font>'", ",", "''", ",", "p", ")", "print", "q" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
MrJattParser.list_of_all_href
It will return all hyper links found in the mr-jatt page for download
song/commands/MusicWebsiteParser/MrJattParser.py
def list_of_all_href(self,html): ''' It will return all hyper links found in the mr-jatt page for download ''' soup=BeautifulSoup(html) links=[] a_list=soup.findAll('a','touch') for x in xrange(len(a_list)-1): link = a_list[x].get('href') name = a_list[x] name = str(name) name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name) name=re.sub(r'^[0-9]+\.','',name) links.append([link,name]) #quit() return links
def list_of_all_href(self,html): ''' It will return all hyper links found in the mr-jatt page for download ''' soup=BeautifulSoup(html) links=[] a_list=soup.findAll('a','touch') for x in xrange(len(a_list)-1): link = a_list[x].get('href') name = a_list[x] name = str(name) name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name) name=re.sub(r'^[0-9]+\.','',name) links.append([link,name]) #quit() return links
[ "It", "will", "return", "all", "hyper", "links", "found", "in", "the", "mr", "-", "jatt", "page", "for", "download" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L29-L45
[ "def", "list_of_all_href", "(", "self", ",", "html", ")", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "links", "=", "[", "]", "a_list", "=", "soup", ".", "findAll", "(", "'a'", ",", "'touch'", ")", "for", "x", "in", "xrange", "(", "len", "(", "a_list", ")", "-", "1", ")", ":", "link", "=", "a_list", "[", "x", "]", ".", "get", "(", "'href'", ")", "name", "=", "a_list", "[", "x", "]", "name", "=", "str", "(", "name", ")", "name", "=", "re", ".", "sub", "(", "r'<a.*/>|<span.*\">|</span>|</a>|<a.*html\">|<font.*\">|</font>'", ",", "''", ",", "name", ")", "name", "=", "re", ".", "sub", "(", "r'^[0-9]+\\.'", ",", "''", ",", "name", ")", "links", ".", "append", "(", "[", "link", ",", "name", "]", ")", "#quit()", "return", "links" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
MrJattParser.check_if_song_name
Returns true if user entered artist or movie name
song/commands/MusicWebsiteParser/MrJattParser.py
def check_if_song_name(self,html): ''' Returns true if user entered artist or movie name ''' soup=BeautifulSoup(html) a_list=soup.findAll('a','touch') #print a_list text=[str(x) for x in a_list] text=''.join(text) text=text.lower() string1='download in 48 kbps' string2='download in 128 kbps' string3='download in 320 kbps' href='' if string3 in text: #print 'Downloading in 320 kbps' href=a_list[2].get('href') elif string2 in text: #print 'Downloading in 128 kbps' href=a_list[1].get('href') elif string1 in text: #print 'Downloading in 48 kbps' href=a_list[0].get('href') else: return (True,'nothing') return (False,href)
def check_if_song_name(self,html): ''' Returns true if user entered artist or movie name ''' soup=BeautifulSoup(html) a_list=soup.findAll('a','touch') #print a_list text=[str(x) for x in a_list] text=''.join(text) text=text.lower() string1='download in 48 kbps' string2='download in 128 kbps' string3='download in 320 kbps' href='' if string3 in text: #print 'Downloading in 320 kbps' href=a_list[2].get('href') elif string2 in text: #print 'Downloading in 128 kbps' href=a_list[1].get('href') elif string1 in text: #print 'Downloading in 48 kbps' href=a_list[0].get('href') else: return (True,'nothing') return (False,href)
[ "Returns", "true", "if", "user", "entered", "artist", "or", "movie", "name" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L47-L76
[ "def", "check_if_song_name", "(", "self", ",", "html", ")", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "a_list", "=", "soup", ".", "findAll", "(", "'a'", ",", "'touch'", ")", "#print a_list", "text", "=", "[", "str", "(", "x", ")", "for", "x", "in", "a_list", "]", "text", "=", "''", ".", "join", "(", "text", ")", "text", "=", "text", ".", "lower", "(", ")", "string1", "=", "'download in 48 kbps'", "string2", "=", "'download in 128 kbps'", "string3", "=", "'download in 320 kbps'", "href", "=", "''", "if", "string3", "in", "text", ":", "#print 'Downloading in 320 kbps'", "href", "=", "a_list", "[", "2", "]", ".", "get", "(", "'href'", ")", "elif", "string2", "in", "text", ":", "#print 'Downloading in 128 kbps'", "href", "=", "a_list", "[", "1", "]", ".", "get", "(", "'href'", ")", "elif", "string1", "in", "text", ":", "#print 'Downloading in 48 kbps'\t", "href", "=", "a_list", "[", "0", "]", ".", "get", "(", "'href'", ")", "else", ":", "return", "(", "True", ",", "'nothing'", ")", "return", "(", "False", ",", "href", ")" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
MrJattParser.Parse
It will the resource URL if song is found, Otherwise it will return the list of songs that can be downloaded
song/commands/MusicWebsiteParser/MrJattParser.py
def Parse(self,url,song_name,flag): ''' It will the resource URL if song is found, Otherwise it will return the list of songs that can be downloaded ''' file_download=FileDownload() html=file_download.get_html_response(url) if flag == False: soup=BeautifulSoup(html) a_list=soup.findAll('a','touch') #print a_list text=[str(x) for x in a_list] text=''.join(text) text=text.lower() string1='download in 48 kbps' string2='download in 128 kbps' string3='download in 320 kbps' href='' if string3 in text: print 'Downloading in 320 kbps' href=a_list[2].get('href') elif string2 in text: print 'Downloading in 128 kbps' href=a_list[1].get('href') elif string1 in text: print 'Downloading in 48 kbps' href=a_list[0].get('href') else: self.missing_schema(html,song_name) quit() return href else: x,href=self.check_if_song_name(html) links = [] if x==True: links=self.list_of_all_href(html) else: file_download=FileDownload() file_download.file_download_cross_platform(href) quit() return links
def Parse(self,url,song_name,flag): ''' It will the resource URL if song is found, Otherwise it will return the list of songs that can be downloaded ''' file_download=FileDownload() html=file_download.get_html_response(url) if flag == False: soup=BeautifulSoup(html) a_list=soup.findAll('a','touch') #print a_list text=[str(x) for x in a_list] text=''.join(text) text=text.lower() string1='download in 48 kbps' string2='download in 128 kbps' string3='download in 320 kbps' href='' if string3 in text: print 'Downloading in 320 kbps' href=a_list[2].get('href') elif string2 in text: print 'Downloading in 128 kbps' href=a_list[1].get('href') elif string1 in text: print 'Downloading in 48 kbps' href=a_list[0].get('href') else: self.missing_schema(html,song_name) quit() return href else: x,href=self.check_if_song_name(html) links = [] if x==True: links=self.list_of_all_href(html) else: file_download=FileDownload() file_download.file_download_cross_platform(href) quit() return links
[ "It", "will", "the", "resource", "URL", "if", "song", "is", "found", "Otherwise", "it", "will", "return", "the", "list", "of", "songs", "that", "can", "be", "downloaded" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/MusicWebsiteParser/MrJattParser.py#L78-L124
[ "def", "Parse", "(", "self", ",", "url", ",", "song_name", ",", "flag", ")", ":", "file_download", "=", "FileDownload", "(", ")", "html", "=", "file_download", ".", "get_html_response", "(", "url", ")", "if", "flag", "==", "False", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "a_list", "=", "soup", ".", "findAll", "(", "'a'", ",", "'touch'", ")", "#print a_list", "text", "=", "[", "str", "(", "x", ")", "for", "x", "in", "a_list", "]", "text", "=", "''", ".", "join", "(", "text", ")", "text", "=", "text", ".", "lower", "(", ")", "string1", "=", "'download in 48 kbps'", "string2", "=", "'download in 128 kbps'", "string3", "=", "'download in 320 kbps'", "href", "=", "''", "if", "string3", "in", "text", ":", "print", "'Downloading in 320 kbps'", "href", "=", "a_list", "[", "2", "]", ".", "get", "(", "'href'", ")", "elif", "string2", "in", "text", ":", "print", "'Downloading in 128 kbps'", "href", "=", "a_list", "[", "1", "]", ".", "get", "(", "'href'", ")", "elif", "string1", "in", "text", ":", "print", "'Downloading in 48 kbps'", "href", "=", "a_list", "[", "0", "]", ".", "get", "(", "'href'", ")", "else", ":", "self", ".", "missing_schema", "(", "html", ",", "song_name", ")", "quit", "(", ")", "return", "href", "else", ":", "x", ",", "href", "=", "self", ".", "check_if_song_name", "(", "html", ")", "links", "=", "[", "]", "if", "x", "==", "True", ":", "links", "=", "self", ".", "list_of_all_href", "(", "html", ")", "else", ":", "file_download", "=", "FileDownload", "(", ")", "file_download", ".", "file_download_cross_platform", "(", "href", ")", "quit", "(", ")", "return", "links" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
GoogleParser.google_url
It will return the google url to be searched
song/commands/SearchEngineParser/GoogleParser.py
def google_url(self,song_name,website): ''' It will return the google url to be searched''' name='+'.join(song_name) prefix='https://www.google.co.in/search?q=' website=website.split(" ") suffix='+'.join(website) url=prefix+name+suffix #print url return url
def google_url(self,song_name,website): ''' It will return the google url to be searched''' name='+'.join(song_name) prefix='https://www.google.co.in/search?q=' website=website.split(" ") suffix='+'.join(website) url=prefix+name+suffix #print url return url
[ "It", "will", "return", "the", "google", "url", "to", "be", "searched" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/SearchEngineParser/GoogleParser.py#L10-L18
[ "def", "google_url", "(", "self", ",", "song_name", ",", "website", ")", ":", "name", "=", "'+'", ".", "join", "(", "song_name", ")", "prefix", "=", "'https://www.google.co.in/search?q='", "website", "=", "website", ".", "split", "(", "\" \"", ")", "suffix", "=", "'+'", ".", "join", "(", "website", ")", "url", "=", "prefix", "+", "name", "+", "suffix", "#print url", "return", "url" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
GoogleParser.parse_google
It will parse google html response and return the first url
song/commands/SearchEngineParser/GoogleParser.py
def parse_google(self,html): '''It will parse google html response and return the first url ''' soup = BeautifulSoup(html) href=soup.find('div','g').find('a').get('href') href_list=href.split('&') download_url=href_list[0] download_url=download_url.strip() download_url=download_url.replace('/url?q=','') return download_url
def parse_google(self,html): '''It will parse google html response and return the first url ''' soup = BeautifulSoup(html) href=soup.find('div','g').find('a').get('href') href_list=href.split('&') download_url=href_list[0] download_url=download_url.strip() download_url=download_url.replace('/url?q=','') return download_url
[ "It", "will", "parse", "google", "html", "response", "and", "return", "the", "first", "url" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/SearchEngineParser/GoogleParser.py#L20-L30
[ "def", "parse_google", "(", "self", ",", "html", ")", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "href", "=", "soup", ".", "find", "(", "'div'", ",", "'g'", ")", ".", "find", "(", "'a'", ")", ".", "get", "(", "'href'", ")", "href_list", "=", "href", ".", "split", "(", "'&'", ")", "download_url", "=", "href_list", "[", "0", "]", "download_url", "=", "download_url", ".", "strip", "(", ")", "download_url", "=", "download_url", ".", "replace", "(", "'/url?q='", ",", "''", ")", "return", "download_url" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
GoogleParser.Parse
song_name is a list of strings website is a string It will return the url from where music file needs to be downloaded
song/commands/SearchEngineParser/GoogleParser.py
def Parse(self,song_name,website): ''' song_name is a list of strings website is a string It will return the url from where music file needs to be downloaded ''' url_to_be_parsed=self.google_url(song_name,website) file_download=FileDownload() html=file_download.get_html_response(url_to_be_parsed) website_url=self.parse_google(html) return website_url
def Parse(self,song_name,website): ''' song_name is a list of strings website is a string It will return the url from where music file needs to be downloaded ''' url_to_be_parsed=self.google_url(song_name,website) file_download=FileDownload() html=file_download.get_html_response(url_to_be_parsed) website_url=self.parse_google(html) return website_url
[ "song_name", "is", "a", "list", "of", "strings", "website", "is", "a", "string", "It", "will", "return", "the", "url", "from", "where", "music", "file", "needs", "to", "be", "downloaded" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/SearchEngineParser/GoogleParser.py#L33-L43
[ "def", "Parse", "(", "self", ",", "song_name", ",", "website", ")", ":", "url_to_be_parsed", "=", "self", ".", "google_url", "(", "song_name", ",", "website", ")", "file_download", "=", "FileDownload", "(", ")", "html", "=", "file_download", ".", "get_html_response", "(", "url_to_be_parsed", ")", "website_url", "=", "self", ".", "parse_google", "(", "html", ")", "return", "website_url" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
FileDownload.get_html_response
It will download the html page specified by url and return the html response
song/commands/FileDownload.py
def get_html_response(self,url): '''It will download the html page specified by url and return the html response ''' print "Downloading page %s .."%url try: response=requests.get(url,timeout=50) except requests.exceptions.SSLError: try: response=requests.get(url,verify=False,timeout=50) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() return response.content
def get_html_response(self,url): '''It will download the html page specified by url and return the html response ''' print "Downloading page %s .."%url try: response=requests.get(url,timeout=50) except requests.exceptions.SSLError: try: response=requests.get(url,verify=False,timeout=50) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() return response.content
[ "It", "will", "download", "the", "html", "page", "specified", "by", "url", "and", "return", "the", "html", "response" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/FileDownload.py#L9-L24
[ "def", "get_html_response", "(", "self", ",", "url", ")", ":", "print", "\"Downloading page %s ..\"", "%", "url", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "50", ")", "except", "requests", ".", "exceptions", ".", "SSLError", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "False", ",", "timeout", "=", "50", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "return", "response", ".", "content" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
FileDownload.file_download_using_requests
It will download file specified by url using requests module
song/commands/FileDownload.py
def file_download_using_requests(self,url): '''It will download file specified by url using requests module''' file_name=url.split('/')[-1] if os.path.exists(os.path.join(os.getcwd(),file_name)): print 'File already exists' return #print 'Downloading file %s '%file_name #print 'Downloading from %s'%url try: r=requests.get(url,stream=True,timeout=200) except requests.exceptions.SSLError: try: response=requests.get(url,stream=True,verify=False,timeout=200) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() chunk_size = 1024 total_size = int(r.headers['Content-Length']) total_chunks = total_size/chunk_size file_iterable = r.iter_content(chunk_size = chunk_size) tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB', leave = False ) with open(file_name,'wb') as f: for data in tqdm_iter: f.write(data) #total_size=float(r.headers['Content-Length'])/(1024*1024) '''print 'Total size of file to be downloaded %.2f MB '%total_size total_downloaded_size=0.0 with open(file_name,'wb') as f: for chunk in r.iter_content(chunk_size=1*1024*1024): if chunk: size_of_chunk=float(len(chunk))/(1024*1024) total_downloaded_size+=size_of_chunk print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size) f.write(chunk)''' print 'Downloaded file %s '%file_name
def file_download_using_requests(self,url): '''It will download file specified by url using requests module''' file_name=url.split('/')[-1] if os.path.exists(os.path.join(os.getcwd(),file_name)): print 'File already exists' return #print 'Downloading file %s '%file_name #print 'Downloading from %s'%url try: r=requests.get(url,stream=True,timeout=200) except requests.exceptions.SSLError: try: response=requests.get(url,stream=True,verify=False,timeout=200) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() chunk_size = 1024 total_size = int(r.headers['Content-Length']) total_chunks = total_size/chunk_size file_iterable = r.iter_content(chunk_size = chunk_size) tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB', leave = False ) with open(file_name,'wb') as f: for data in tqdm_iter: f.write(data) #total_size=float(r.headers['Content-Length'])/(1024*1024) '''print 'Total size of file to be downloaded %.2f MB '%total_size total_downloaded_size=0.0 with open(file_name,'wb') as f: for chunk in r.iter_content(chunk_size=1*1024*1024): if chunk: size_of_chunk=float(len(chunk))/(1024*1024) total_downloaded_size+=size_of_chunk print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size) f.write(chunk)''' print 'Downloaded file %s '%file_name
[ "It", "will", "download", "file", "specified", "by", "url", "using", "requests", "module" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/FileDownload.py#L27-L74
[ "def", "file_download_using_requests", "(", "self", ",", "url", ")", ":", "file_name", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "file_name", ")", ")", ":", "print", "'File already exists'", "return", "#print 'Downloading file %s '%file_name", "#print 'Downloading from %s'%url", "try", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "timeout", "=", "200", ")", "except", "requests", ".", "exceptions", ".", "SSLError", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "verify", "=", "False", ",", "timeout", "=", "200", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "print", "e", "quit", "(", ")", "chunk_size", "=", "1024", "total_size", "=", "int", "(", "r", ".", "headers", "[", "'Content-Length'", "]", ")", "total_chunks", "=", "total_size", "/", "chunk_size", "file_iterable", "=", "r", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", "tqdm_iter", "=", "tqdm", "(", "iterable", "=", "file_iterable", ",", "total", "=", "total_chunks", ",", "unit", "=", "'KB'", ",", "leave", "=", "False", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "f", ":", "for", "data", "in", "tqdm_iter", ":", "f", ".", "write", "(", "data", ")", "#total_size=float(r.headers['Content-Length'])/(1024*1024)", "'''print 'Total size of file to be downloaded %.2f MB '%total_size\n\t\ttotal_downloaded_size=0.0\n\t\twith open(file_name,'wb') as f:\n\t\t\tfor chunk in r.iter_content(chunk_size=1*1024*1024):\n\t\t\t\tif chunk:\n\t\t\t\t\tsize_of_chunk=float(len(chunk))/(1024*1024)\n\t\t\t\t\ttotal_downloaded_size+=size_of_chunk\n\t\t\t\t\tprint '{0:.0%} Downloaded'.format(total_downloaded_size/total_size)\n\t\t\t\t\tf.write(chunk)'''", "print", "'Downloaded file %s '", "%", "file_name" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
FileDownload.file_download_using_wget
It will download file specified by url using wget utility of linux
song/commands/FileDownload.py
def file_download_using_wget(self,url): '''It will download file specified by url using wget utility of linux ''' file_name=url.split('/')[-1] print 'Downloading file %s '%file_name command='wget -c --read-timeout=50 --tries=3 -q --show-progress --no-check-certificate ' url='"'+url+'"' command=command+url os.system(command)
def file_download_using_wget(self,url): '''It will download file specified by url using wget utility of linux ''' file_name=url.split('/')[-1] print 'Downloading file %s '%file_name command='wget -c --read-timeout=50 --tries=3 -q --show-progress --no-check-certificate ' url='"'+url+'"' command=command+url os.system(command)
[ "It", "will", "download", "file", "specified", "by", "url", "using", "wget", "utility", "of", "linux" ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/commands/FileDownload.py#L77-L84
[ "def", "file_download_using_wget", "(", "self", ",", "url", ")", ":", "file_name", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "print", "'Downloading file %s '", "%", "file_name", "command", "=", "'wget -c --read-timeout=50 --tries=3 -q --show-progress --no-check-certificate '", "url", "=", "'\"'", "+", "url", "+", "'\"'", "command", "=", "command", "+", "url", "os", ".", "system", "(", "command", ")" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
main
Main CLI entrypoint.
song/cli.py
def main(): """Main CLI entrypoint.""" #print VERSION from commands.download import Download options = docopt(__doc__, version=VERSION) #print "You reached here" #print options print "working." p=Download(options) p.run()
def main(): """Main CLI entrypoint.""" #print VERSION from commands.download import Download options = docopt(__doc__, version=VERSION) #print "You reached here" #print options print "working." p=Download(options) p.run()
[ "Main", "CLI", "entrypoint", "." ]
ankitmathur3193/song-cli
python
https://github.com/ankitmathur3193/song-cli/blob/ca8ccfe547e9d702313ff6d14e81ae4355989a67/song/cli.py#L29-L38
[ "def", "main", "(", ")", ":", "#print VERSION", "from", "commands", ".", "download", "import", "Download", "options", "=", "docopt", "(", "__doc__", ",", "version", "=", "VERSION", ")", "#print \"You reached here\"", "#print options", "print", "\"working.\"", "p", "=", "Download", "(", "options", ")", "p", ".", "run", "(", ")" ]
ca8ccfe547e9d702313ff6d14e81ae4355989a67
test
ReadingBloomFilter
Create a read-only bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore.
src/hydra.py
def ReadingBloomFilter(filename, want_lock=False): """ Create a read-only bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore. """ with open('{}.desc'.format(filename), 'r') as descriptor: num_elements = int(descriptor.readline()) max_fp_prob = float(descriptor.readline()) ignore_case = int(descriptor.readline()) return _hydra.BloomFilter.getFilter( num_elements, max_fp_prob, filename=filename, ignore_case=ignore_case, read_only=True, want_lock=want_lock)
def ReadingBloomFilter(filename, want_lock=False): """ Create a read-only bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore. """ with open('{}.desc'.format(filename), 'r') as descriptor: num_elements = int(descriptor.readline()) max_fp_prob = float(descriptor.readline()) ignore_case = int(descriptor.readline()) return _hydra.BloomFilter.getFilter( num_elements, max_fp_prob, filename=filename, ignore_case=ignore_case, read_only=True, want_lock=want_lock)
[ "Create", "a", "read", "-", "only", "bloom", "filter", "with", "an", "upperbound", "of", "(", "num_elements", "max_fp_prob", ")", "as", "a", "specification", "and", "using", "filename", "as", "the", "backing", "datastore", "." ]
crankycoder/hydra
python
https://github.com/crankycoder/hydra/blob/3be536bd0c6716d4efcfde3e132582e6066bae43/src/hydra.py#L4-L18
[ "def", "ReadingBloomFilter", "(", "filename", ",", "want_lock", "=", "False", ")", ":", "with", "open", "(", "'{}.desc'", ".", "format", "(", "filename", ")", ",", "'r'", ")", "as", "descriptor", ":", "num_elements", "=", "int", "(", "descriptor", ".", "readline", "(", ")", ")", "max_fp_prob", "=", "float", "(", "descriptor", ".", "readline", "(", ")", ")", "ignore_case", "=", "int", "(", "descriptor", ".", "readline", "(", ")", ")", "return", "_hydra", ".", "BloomFilter", ".", "getFilter", "(", "num_elements", ",", "max_fp_prob", ",", "filename", "=", "filename", ",", "ignore_case", "=", "ignore_case", ",", "read_only", "=", "True", ",", "want_lock", "=", "want_lock", ")" ]
3be536bd0c6716d4efcfde3e132582e6066bae43
test
WritingBloomFilter
Create a read/write bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore.
src/hydra.py
def WritingBloomFilter(num_elements, max_fp_prob, filename=None, ignore_case=False, want_lock=False, fdatasync_on_close=True): """ Create a read/write bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore. """ new_filter = _hydra.BloomFilter.getFilter( num_elements, max_fp_prob, filename=filename, ignore_case=ignore_case, read_only=False, want_lock=want_lock, fdatasync_on_close=fdatasync_on_close) if filename: with open('{}.desc'.format(filename), 'w') as descriptor: descriptor.write("{}\n".format(num_elements)) descriptor.write("{:0.8f}\n".format(max_fp_prob)) descriptor.write("{:d}\n".format(ignore_case)) return new_filter
def WritingBloomFilter(num_elements, max_fp_prob, filename=None, ignore_case=False, want_lock=False, fdatasync_on_close=True): """ Create a read/write bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore. """ new_filter = _hydra.BloomFilter.getFilter( num_elements, max_fp_prob, filename=filename, ignore_case=ignore_case, read_only=False, want_lock=want_lock, fdatasync_on_close=fdatasync_on_close) if filename: with open('{}.desc'.format(filename), 'w') as descriptor: descriptor.write("{}\n".format(num_elements)) descriptor.write("{:0.8f}\n".format(max_fp_prob)) descriptor.write("{:d}\n".format(ignore_case)) return new_filter
[ "Create", "a", "read", "/", "write", "bloom", "filter", "with", "an", "upperbound", "of", "(", "num_elements", "max_fp_prob", ")", "as", "a", "specification", "and", "using", "filename", "as", "the", "backing", "datastore", "." ]
crankycoder/hydra
python
https://github.com/crankycoder/hydra/blob/3be536bd0c6716d4efcfde3e132582e6066bae43/src/hydra.py#L38-L56
[ "def", "WritingBloomFilter", "(", "num_elements", ",", "max_fp_prob", ",", "filename", "=", "None", ",", "ignore_case", "=", "False", ",", "want_lock", "=", "False", ",", "fdatasync_on_close", "=", "True", ")", ":", "new_filter", "=", "_hydra", ".", "BloomFilter", ".", "getFilter", "(", "num_elements", ",", "max_fp_prob", ",", "filename", "=", "filename", ",", "ignore_case", "=", "ignore_case", ",", "read_only", "=", "False", ",", "want_lock", "=", "want_lock", ",", "fdatasync_on_close", "=", "fdatasync_on_close", ")", "if", "filename", ":", "with", "open", "(", "'{}.desc'", ".", "format", "(", "filename", ")", ",", "'w'", ")", "as", "descriptor", ":", "descriptor", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "num_elements", ")", ")", "descriptor", ".", "write", "(", "\"{:0.8f}\\n\"", ".", "format", "(", "max_fp_prob", ")", ")", "descriptor", ".", "write", "(", "\"{:d}\\n\"", ".", "format", "(", "ignore_case", ")", ")", "return", "new_filter" ]
3be536bd0c6716d4efcfde3e132582e6066bae43
test
findStationCodesByCity
Lookup AQI database for station codes in a given city.
pwaqi/__init__.py
def findStationCodesByCity(city_name, token): """Lookup AQI database for station codes in a given city.""" req = requests.get( API_ENDPOINT_SEARCH, params={ 'token': token, 'keyword': city_name }) if req.status_code == 200 and req.json()["status"] == "ok": return [result["uid"] for result in req.json()["data"]] else: return []
def findStationCodesByCity(city_name, token): """Lookup AQI database for station codes in a given city.""" req = requests.get( API_ENDPOINT_SEARCH, params={ 'token': token, 'keyword': city_name }) if req.status_code == 200 and req.json()["status"] == "ok": return [result["uid"] for result in req.json()["data"]] else: return []
[ "Lookup", "AQI", "database", "for", "station", "codes", "in", "a", "given", "city", "." ]
valentinalexeev/pwaqi
python
https://github.com/valentinalexeev/pwaqi/blob/81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c/pwaqi/__init__.py#L16-L28
[ "def", "findStationCodesByCity", "(", "city_name", ",", "token", ")", ":", "req", "=", "requests", ".", "get", "(", "API_ENDPOINT_SEARCH", ",", "params", "=", "{", "'token'", ":", "token", ",", "'keyword'", ":", "city_name", "}", ")", "if", "req", ".", "status_code", "==", "200", "and", "req", ".", "json", "(", ")", "[", "\"status\"", "]", "==", "\"ok\"", ":", "return", "[", "result", "[", "\"uid\"", "]", "for", "result", "in", "req", ".", "json", "(", ")", "[", "\"data\"", "]", "]", "else", ":", "return", "[", "]" ]
81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c
test
get_location_observation
Lookup observations by geo coordinates.
pwaqi/__init__.py
def get_location_observation(lat, lng, token): """Lookup observations by geo coordinates.""" req = requests.get( API_ENDPOINT_GEO % (lat, lng), params={ 'token': token }) if req.status_code == 200 and req.json()["status"] == "ok": return parse_observation_response(req.json()["data"]) return {}
def get_location_observation(lat, lng, token): """Lookup observations by geo coordinates.""" req = requests.get( API_ENDPOINT_GEO % (lat, lng), params={ 'token': token }) if req.status_code == 200 and req.json()["status"] == "ok": return parse_observation_response(req.json()["data"]) return {}
[ "Lookup", "observations", "by", "geo", "coordinates", "." ]
valentinalexeev/pwaqi
python
https://github.com/valentinalexeev/pwaqi/blob/81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c/pwaqi/__init__.py#L31-L41
[ "def", "get_location_observation", "(", "lat", ",", "lng", ",", "token", ")", ":", "req", "=", "requests", ".", "get", "(", "API_ENDPOINT_GEO", "%", "(", "lat", ",", "lng", ")", ",", "params", "=", "{", "'token'", ":", "token", "}", ")", "if", "req", ".", "status_code", "==", "200", "and", "req", ".", "json", "(", ")", "[", "\"status\"", "]", "==", "\"ok\"", ":", "return", "parse_observation_response", "(", "req", ".", "json", "(", ")", "[", "\"data\"", "]", ")", "return", "{", "}" ]
81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c
test
parse_observation_response
Decode AQICN observation response JSON into python object.
pwaqi/__init__.py
def parse_observation_response(json): """Decode AQICN observation response JSON into python object.""" logging.debug(json) iaqi = json['iaqi'] result = { 'idx': json['idx'], 'city': json.get('city', ''), 'aqi': json['aqi'], 'dominentpol': json.get("dominentpol", ''), 'time': json['time']['s'], 'iaqi': [{'p': item, 'v': iaqi[item]['v']} for item in iaqi] } return result
def parse_observation_response(json): """Decode AQICN observation response JSON into python object.""" logging.debug(json) iaqi = json['iaqi'] result = { 'idx': json['idx'], 'city': json.get('city', ''), 'aqi': json['aqi'], 'dominentpol': json.get("dominentpol", ''), 'time': json['time']['s'], 'iaqi': [{'p': item, 'v': iaqi[item]['v']} for item in iaqi] } return result
[ "Decode", "AQICN", "observation", "response", "JSON", "into", "python", "object", "." ]
valentinalexeev/pwaqi
python
https://github.com/valentinalexeev/pwaqi/blob/81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c/pwaqi/__init__.py#L44-L58
[ "def", "parse_observation_response", "(", "json", ")", ":", "logging", ".", "debug", "(", "json", ")", "iaqi", "=", "json", "[", "'iaqi'", "]", "result", "=", "{", "'idx'", ":", "json", "[", "'idx'", "]", ",", "'city'", ":", "json", ".", "get", "(", "'city'", ",", "''", ")", ",", "'aqi'", ":", "json", "[", "'aqi'", "]", ",", "'dominentpol'", ":", "json", ".", "get", "(", "\"dominentpol\"", ",", "''", ")", ",", "'time'", ":", "json", "[", "'time'", "]", "[", "'s'", "]", ",", "'iaqi'", ":", "[", "{", "'p'", ":", "item", ",", "'v'", ":", "iaqi", "[", "item", "]", "[", "'v'", "]", "}", "for", "item", "in", "iaqi", "]", "}", "return", "result" ]
81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c
test
get_station_observation
Request station data for a specific station identified by code. A language parameter can also be specified to translate location information (default: "en")
pwaqi/__init__.py
def get_station_observation(station_code, token): """Request station data for a specific station identified by code. A language parameter can also be specified to translate location information (default: "en") """ req = requests.get( API_ENDPOINT_OBS % (station_code), params={ 'token': token }) if req.status_code == 200 and req.json()['status'] == "ok": return parse_observation_response(req.json()['data']) else: return {}
def get_station_observation(station_code, token): """Request station data for a specific station identified by code. A language parameter can also be specified to translate location information (default: "en") """ req = requests.get( API_ENDPOINT_OBS % (station_code), params={ 'token': token }) if req.status_code == 200 and req.json()['status'] == "ok": return parse_observation_response(req.json()['data']) else: return {}
[ "Request", "station", "data", "for", "a", "specific", "station", "identified", "by", "code", "." ]
valentinalexeev/pwaqi
python
https://github.com/valentinalexeev/pwaqi/blob/81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c/pwaqi/__init__.py#L61-L76
[ "def", "get_station_observation", "(", "station_code", ",", "token", ")", ":", "req", "=", "requests", ".", "get", "(", "API_ENDPOINT_OBS", "%", "(", "station_code", ")", ",", "params", "=", "{", "'token'", ":", "token", "}", ")", "if", "req", ".", "status_code", "==", "200", "and", "req", ".", "json", "(", ")", "[", "'status'", "]", "==", "\"ok\"", ":", "return", "parse_observation_response", "(", "req", ".", "json", "(", ")", "[", "'data'", "]", ")", "else", ":", "return", "{", "}" ]
81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c
test
AssetAttributes.search_paths
The list of logical paths which are used to search for an asset. This property makes sense only if the attributes was created with logical path. It is assumed that the logical path can be a directory containing a file named ``index`` with the same suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.search_paths ['js/app.js', 'js/app/index.js'] >>> attrs = AssetAttributes(environment, 'js/app/index.js') >>> attrs.search_paths ['js/models/index.js']
gears/asset_attributes.py
def search_paths(self): """The list of logical paths which are used to search for an asset. This property makes sense only if the attributes was created with logical path. It is assumed that the logical path can be a directory containing a file named ``index`` with the same suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.search_paths ['js/app.js', 'js/app/index.js'] >>> attrs = AssetAttributes(environment, 'js/app/index.js') >>> attrs.search_paths ['js/models/index.js'] """ paths = [self.path] if os.path.basename(self.path_without_suffix) != 'index': path = os.path.join(self.path_without_suffix, 'index') paths.append(path + ''.join(self.suffix)) return paths
def search_paths(self): """The list of logical paths which are used to search for an asset. This property makes sense only if the attributes was created with logical path. It is assumed that the logical path can be a directory containing a file named ``index`` with the same suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.search_paths ['js/app.js', 'js/app/index.js'] >>> attrs = AssetAttributes(environment, 'js/app/index.js') >>> attrs.search_paths ['js/models/index.js'] """ paths = [self.path] if os.path.basename(self.path_without_suffix) != 'index': path = os.path.join(self.path_without_suffix, 'index') paths.append(path + ''.join(self.suffix)) return paths
[ "The", "list", "of", "logical", "paths", "which", "are", "used", "to", "search", "for", "an", "asset", ".", "This", "property", "makes", "sense", "only", "if", "the", "attributes", "was", "created", "with", "logical", "path", "." ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L35-L57
[ "def", "search_paths", "(", "self", ")", ":", "paths", "=", "[", "self", ".", "path", "]", "if", "os", ".", "path", ".", "basename", "(", "self", ".", "path_without_suffix", ")", "!=", "'index'", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path_without_suffix", ",", "'index'", ")", "paths", ".", "append", "(", "path", "+", "''", ".", "join", "(", "self", ".", "suffix", ")", ")", "return", "paths" ]
5729c2525a8c04c185e998bd9a86233708972921
test
AssetAttributes.path_without_suffix
The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app'
gears/asset_attributes.py
def path_without_suffix(self): """The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app' """ if self.suffix: return self.path[:-len(''.join(self.suffix))] return self.path
def path_without_suffix(self): """The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app' """ if self.suffix: return self.path[:-len(''.join(self.suffix))] return self.path
[ "The", "relative", "path", "to", "asset", "without", "suffix", ".", "Example", "::" ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L60-L70
[ "def", "path_without_suffix", "(", "self", ")", ":", "if", "self", ".", "suffix", ":", "return", "self", ".", "path", "[", ":", "-", "len", "(", "''", ".", "join", "(", "self", ".", "suffix", ")", ")", "]", "return", "self", ".", "path" ]
5729c2525a8c04c185e998bd9a86233708972921
test
AssetAttributes.logical_path
The logical path to asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.logical_path 'js/models.js'
gears/asset_attributes.py
def logical_path(self): """The logical path to asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.logical_path 'js/models.js' """ format_extension = self.format_extension or self.compiler_format_extension if format_extension is None: return self.path return self.path_without_suffix + format_extension
def logical_path(self): """The logical path to asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.logical_path 'js/models.js' """ format_extension = self.format_extension or self.compiler_format_extension if format_extension is None: return self.path return self.path_without_suffix + format_extension
[ "The", "logical", "path", "to", "asset", ".", "Example", "::" ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L73-L84
[ "def", "logical_path", "(", "self", ")", ":", "format_extension", "=", "self", ".", "format_extension", "or", "self", ".", "compiler_format_extension", "if", "format_extension", "is", "None", ":", "return", "self", ".", "path", "return", "self", ".", "path_without_suffix", "+", "format_extension" ]
5729c2525a8c04c185e998bd9a86233708972921
test
AssetAttributes.extensions
The list of asset extensions. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.extensions ['.js', '.coffee'] >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension ['.min', '.js', '.coffee']
gears/asset_attributes.py
def extensions(self): """The list of asset extensions. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.extensions ['.js', '.coffee'] >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension ['.min', '.js', '.coffee'] """ return re.findall(r'\.[^.]+', os.path.basename(self.path))
def extensions(self): """The list of asset extensions. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.extensions ['.js', '.coffee'] >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension ['.min', '.js', '.coffee'] """ return re.findall(r'\.[^.]+', os.path.basename(self.path))
[ "The", "list", "of", "asset", "extensions", ".", "Example", "::" ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L87-L99
[ "def", "extensions", "(", "self", ")", ":", "return", "re", ".", "findall", "(", "r'\\.[^.]+'", ",", "os", ".", "path", ".", "basename", "(", "self", ".", "path", ")", ")" ]
5729c2525a8c04c185e998bd9a86233708972921
test
AssetAttributes.format_extension
The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js'
gears/asset_attributes.py
def format_extension(self): """The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js' """ for extension in reversed(self.extensions): compiler = self.environment.compilers.get(extension) if not compiler and self.environment.mimetypes.get(extension): return extension
def format_extension(self): """The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js' """ for extension in reversed(self.extensions): compiler = self.environment.compilers.get(extension) if not compiler and self.environment.mimetypes.get(extension): return extension
[ "The", "format", "extension", "of", "asset", ".", "Example", "::" ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L102-L117
[ "def", "format_extension", "(", "self", ")", ":", "for", "extension", "in", "reversed", "(", "self", ".", "extensions", ")", ":", "compiler", "=", "self", ".", "environment", ".", "compilers", ".", "get", "(", "extension", ")", "if", "not", "compiler", "and", "self", ".", "environment", ".", "mimetypes", ".", "get", "(", "extension", ")", ":", "return", "extension" ]
5729c2525a8c04c185e998bd9a86233708972921
test
AssetAttributes.unknown_extensions
The list of unknown extensions, which are actually parts of asset filename. Example:: >>> attrs = AssetAttributes(environment, 'js/lib-2.0.min.js') >>> attrs.suffix ['.0', '.min']
gears/asset_attributes.py
def unknown_extensions(self): """The list of unknown extensions, which are actually parts of asset filename. Example:: >>> attrs = AssetAttributes(environment, 'js/lib-2.0.min.js') >>> attrs.suffix ['.0', '.min'] """ unknown_extensions = [] for extension in self.extensions: compiler = self.environment.compilers.get(extension) if compiler or self.environment.mimetypes.get(extension): return unknown_extensions unknown_extensions.append(extension) return unknown_extensions
def unknown_extensions(self): """The list of unknown extensions, which are actually parts of asset filename. Example:: >>> attrs = AssetAttributes(environment, 'js/lib-2.0.min.js') >>> attrs.suffix ['.0', '.min'] """ unknown_extensions = [] for extension in self.extensions: compiler = self.environment.compilers.get(extension) if compiler or self.environment.mimetypes.get(extension): return unknown_extensions unknown_extensions.append(extension) return unknown_extensions
[ "The", "list", "of", "unknown", "extensions", "which", "are", "actually", "parts", "of", "asset", "filename", ".", "Example", "::" ]
gears/gears
python
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/asset_attributes.py#L131-L145
[ "def", "unknown_extensions", "(", "self", ")", ":", "unknown_extensions", "=", "[", "]", "for", "extension", "in", "self", ".", "extensions", ":", "compiler", "=", "self", ".", "environment", ".", "compilers", ".", "get", "(", "extension", ")", "if", "compiler", "or", "self", ".", "environment", ".", "mimetypes", ".", "get", "(", "extension", ")", ":", "return", "unknown_extensions", "unknown_extensions", ".", "append", "(", "extension", ")", "return", "unknown_extensions" ]
5729c2525a8c04c185e998bd9a86233708972921