signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def project(dataIn, projectionScript):
|
<EOL>dataOut = {}<EOL>try:<EOL><INDENT>projectionScript = str(projectionScript)<EOL>program = makeProgramFromString(projectionScript)<EOL>if PY3:<EOL><INDENT>loc = {<EOL>'<STR_LIT>': dataIn,<EOL>'<STR_LIT>': dataOut<EOL>}<EOL>exec(program, {}, loc)<EOL>dataOut = loc['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>exec(program)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>glogger.error("<STR_LIT>")<EOL>glogger.error(projectionScript)<EOL>glogger.error("<STR_LIT>")<EOL>glogger.error(e)<EOL>dataOut = {<EOL>'<STR_LIT:status>': '<STR_LIT:error>',<EOL>'<STR_LIT:message>': e.message<EOL>}<EOL><DEDENT>return dataOut<EOL>
|
Programs may make use of data in the `dataIn` variable and should
produce data on the `dataOut` variable.
|
f3383:m0
|
def guess_endpoint_uri(rq, gh_repo):
|
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)<EOL>if auth == ('<STR_LIT:none>', '<STR_LIT:none>'):<EOL><INDENT>auth = None<EOL><DEDENT>if has_request_context() and "<STR_LIT>" in request.args:<EOL><INDENT>endpoint = request.args['<STR_LIT>']<EOL>glogger.info("<STR_LIT>" + endpoint)<EOL>return endpoint, auth<EOL><DEDENT>try:<EOL><INDENT>decorators = get_yaml_decorators(rq)<EOL>endpoint = decorators['<STR_LIT>']<EOL>auth = None<EOL>glogger.info("<STR_LIT>" + endpoint)<EOL><DEDENT>except (TypeError, KeyError):<EOL><INDENT>try:<EOL><INDENT>endpoint_content = gh_repo.getTextFor({'<STR_LIT>': '<STR_LIT>'})<EOL>endpoint = endpoint_content.strip().splitlines()[<NUM_LIT:0>]<EOL>auth = None<EOL>glogger.info("<STR_LIT>" + endpoint)<EOL><DEDENT>except:<EOL><INDENT>endpoint = static.DEFAULT_ENDPOINT<EOL>auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)<EOL>if auth == ('<STR_LIT:none>', '<STR_LIT:none>'):<EOL><INDENT>auth = None<EOL><DEDENT>glogger.warning("<STR_LIT>".format(endpoint))<EOL><DEDENT><DEDENT>return endpoint, auth<EOL>
|
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
|
f3384:m0
|
def count_query_results(query, endpoint):
|
<EOL>return <NUM_LIT:1000><EOL>
|
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
|
f3384:m1
|
def _getDictWithKey(key, dict_list):
|
for d in dict_list:<EOL><INDENT>if key in d:<EOL><INDENT>return d<EOL><DEDENT><DEDENT>return None<EOL>
|
Returns the first dictionary in dict_list which contains the given key
|
f3384:m2
|
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
|
<EOL>internal_matcher = re.compile("<STR_LIT>")<EOL>variable_matcher = re.compile(<EOL>"<STR_LIT>")<EOL>parameters = {}<EOL>for v in variables:<EOL><INDENT>if internal_matcher.match(v):<EOL><INDENT>continue<EOL><DEDENT>match = variable_matcher.match(v)<EOL>if match:<EOL><INDENT>vname = match.group('<STR_LIT:name>')<EOL>vrequired = True if match.group('<STR_LIT>') == '<STR_LIT:_>' else False<EOL>vtype = '<STR_LIT:string>'<EOL>vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)<EOL>vdefault = get_defaults(rq, vname, query_metadata)<EOL>vlang = None<EOL>vdatatype = None<EOL>vformat = None<EOL>mtype = match.group('<STR_LIT:type>')<EOL>muserdefined = match.group('<STR_LIT>')<EOL>if mtype in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:string>']:<EOL><INDENT>vtype = mtype<EOL><DEDENT>elif mtype in ['<STR_LIT>']: <EOL><INDENT>vtype = '<STR_LIT:string>'<EOL>vformat = '<STR_LIT>'<EOL><DEDENT>elif mtype:<EOL><INDENT>vtype = '<STR_LIT:string>'<EOL>if mtype in static.XSD_DATATYPES:<EOL><INDENT>vdatatype = '<STR_LIT>'.format(mtype)<EOL><DEDENT>elif len(mtype) == <NUM_LIT:2>:<EOL><INDENT>vlang = mtype<EOL><DEDENT>elif muserdefined:<EOL><INDENT>vdatatype = '<STR_LIT>'.format(mtype, muserdefined)<EOL><DEDENT><DEDENT>parameters[vname] = {<EOL>'<STR_LIT>': '<STR_LIT>'.format(v),<EOL>'<STR_LIT>': vrequired,<EOL>'<STR_LIT:name>': vname,<EOL>'<STR_LIT:type>': vtype<EOL>}<EOL>if vcodes is not None:<EOL><INDENT>parameters[vname]['<STR_LIT>'] = sorted(vcodes)<EOL><DEDENT>if vlang is not None:<EOL><INDENT>parameters[vname]['<STR_LIT>'] = vlang<EOL><DEDENT>if vdatatype is not None:<EOL><INDENT>parameters[vname]['<STR_LIT>'] = vdatatype<EOL><DEDENT>if vformat is not None:<EOL><INDENT>parameters[vname]['<STR_LIT>'] = vformat<EOL><DEDENT>if vdefault is not None:<EOL><INDENT>parameters[vname]['<STR_LIT:default>'] = vdefault<EOL><DEDENT>glogger.info('<STR_LIT>'.format(parameters))<EOL><DEDENT><DEDENT>return parameters<EOL>
|
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
|
f3384:m3
|
def get_defaults(rq, v, metadata):
|
glogger.debug("<STR_LIT>".format(metadata))<EOL>if '<STR_LIT>' not in metadata:<EOL><INDENT>return None<EOL><DEDENT>defaultsDict = _getDictWithKey(v, metadata['<STR_LIT>'])<EOL>if defaultsDict:<EOL><INDENT>return defaultsDict[v]<EOL><DEDENT>return None<EOL>
|
Returns the default value for a parameter or None
|
f3384:m4
|
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
|
<EOL>if '<STR_LIT>' not in metadata:<EOL><INDENT>return None<EOL><DEDENT>enumDict = _getDictWithKey(v, metadata['<STR_LIT>'])<EOL>if enumDict:<EOL><INDENT>return enumDict[v]<EOL><DEDENT>if v in metadata['<STR_LIT>']:<EOL><INDENT>return get_enumeration_sparql(rq, v, endpoint, auth)<EOL><DEDENT>return None<EOL>
|
Returns a list of enumerated values for variable 'v' in query 'rq'
|
f3384:m5
|
def get_enumeration_sparql(rq, v, endpoint, auth=None):
|
glogger.info('<STR_LIT>'.format(v))<EOL>vcodes = []<EOL>tpattern_matcher = re.compile("<STR_LIT>",<EOL>flags=re.DOTALL)<EOL>glogger.debug(rq)<EOL>tp_match = tpattern_matcher.match(rq)<EOL>if tp_match:<EOL><INDENT>vtpattern = tp_match.group('<STR_LIT>')<EOL>gnames = tp_match.group('<STR_LIT>')<EOL>glogger.debug("<STR_LIT>".format(gnames))<EOL>glogger.debug("<STR_LIT>".format(vtpattern))<EOL>glogger.debug("<STR_LIT>")<EOL>if gnames:<EOL><INDENT>codes_subquery = re.sub("<STR_LIT>",<EOL>"<STR_LIT>" + v + "<STR_LIT>" + gnames + "<STR_LIT>" + vtpattern + "<STR_LIT>", rq,<EOL>flags=re.DOTALL)<EOL><DEDENT>else:<EOL><INDENT>codes_subquery = re.sub("<STR_LIT>",<EOL>"<STR_LIT>" + v + "<STR_LIT>" + vtpattern + "<STR_LIT>", rq,<EOL>flags=re.DOTALL)<EOL><DEDENT>glogger.debug("<STR_LIT>".format(codes_subquery))<EOL>glogger.debug(endpoint)<EOL>codes_json = requests.get(endpoint, params={'<STR_LIT>': codes_subquery},<EOL>headers={'<STR_LIT>': static.mimetypes['<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT>'.format(static.ACCESS_TOKEN)}, auth=auth).json()<EOL>for code in codes_json['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>vcodes.append(list(code.values())[<NUM_LIT:0>]["<STR_LIT:value>"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>glogger.debug("<STR_LIT>")<EOL><DEDENT>return vcodes<EOL>
|
Returns a list of enumerated values for variable 'v' in query 'rq'
|
f3384:m6
|
def get_yaml_decorators(rq):
|
<EOL>if not rq:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(rq, dict) and '<STR_LIT>' in rq: <EOL><INDENT>yaml_string = rq['<STR_LIT>']<EOL>query_string = rq<EOL><DEDENT>else: <EOL><INDENT>yaml_string = "<STR_LIT:\n>".join([row.lstrip('<STR_LIT>') for row in rq.split('<STR_LIT:\n>') if row.startswith('<STR_LIT>')])<EOL>query_string = "<STR_LIT:\n>".join([row for row in rq.split('<STR_LIT:\n>') if not row.startswith('<STR_LIT>')])<EOL><DEDENT>query_metadata = None<EOL>if type(yaml_string) == dict:<EOL><INDENT>query_metadata = yaml_string<EOL><DEDENT>elif type(yaml_string) == str:<EOL><INDENT>try: <EOL><INDENT>query_metadata = yaml.load(yaml_string)<EOL><DEDENT>except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:<EOL><INDENT>try:<EOL><INDENT>query_metadata = json.loads(yaml_string)<EOL><DEDENT>except json.JSONDecodeError:<EOL><INDENT>glogger.warning("<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT>if query_metadata is None:<EOL><INDENT>query_metadata = {}<EOL><DEDENT>query_metadata['<STR_LIT>'] = query_string<EOL>return query_metadata<EOL>
|
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
|
f3384:m7
|
def get_metadata(rq, endpoint):
|
query_metadata = get_yaml_decorators(rq)<EOL>query_metadata['<STR_LIT:type>'] = '<STR_LIT>'<EOL>query_metadata['<STR_LIT>'] = rq<EOL>if isinstance(rq, dict): <EOL><INDENT>rq, proto, opt = SPARQLTransformer.pre_process(rq)<EOL>rq = rq.strip()<EOL>query_metadata['<STR_LIT>'] = proto<EOL>query_metadata['<STR_LIT>'] = opt<EOL>query_metadata['<STR_LIT>'] = rq<EOL><DEDENT>rq = enable_custom_function_prefix(rq, '<STR_LIT>')<EOL>rq = enable_custom_function_prefix(rq, '<STR_LIT>')<EOL>try:<EOL><INDENT>parsed_query = translateQuery(Query.parseString(rq, parseAll=True))<EOL>query_metadata['<STR_LIT:type>'] = parsed_query.algebra.name<EOL>if query_metadata['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>query_metadata['<STR_LIT>'] = parsed_query.algebra['<STR_LIT>']<EOL>query_metadata['<STR_LIT>'] = get_parameters(rq, parsed_query.algebra['<STR_LIT>'], endpoint, query_metadata)<EOL><DEDENT>elif query_metadata['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>query_metadata['<STR_LIT>'] = get_parameters(rq, parsed_query.algebra['<STR_LIT>'], endpoint, query_metadata)<EOL><DEDENT>else:<EOL><INDENT>glogger.warning(<EOL>"<STR_LIT>".format(query_metadata['<STR_LIT:type>']))<EOL><DEDENT><DEDENT>except ParseException as pe:<EOL><INDENT>glogger.warning(pe)<EOL>glogger.warning("<STR_LIT>")<EOL>try:<EOL><INDENT>glogger.info("<STR_LIT>")<EOL>parsed_query = UpdateUnit.parseString(rq, parseAll=True)<EOL>glogger.info(parsed_query)<EOL>query_metadata['<STR_LIT:type>'] = parsed_query[<NUM_LIT:0>]['<STR_LIT>'][<NUM_LIT:0>].name<EOL>if query_metadata['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>query_metadata['<STR_LIT>'] = {<EOL>'<STR_LIT:g>': {'<STR_LIT>': None, '<STR_LIT>': [], '<STR_LIT>': None, '<STR_LIT:name>': '<STR_LIT:g>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': True, '<STR_LIT:type>': '<STR_LIT>'},<EOL>'<STR_LIT:data>': {'<STR_LIT>': None, '<STR_LIT>': [], '<STR_LIT>': None, '<STR_LIT:name>': '<STR_LIT:data>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': True, '<STR_LIT:type>': '<STR_LIT>'}}<EOL><DEDENT>glogger.info("<STR_LIT>".format(query_metadata['<STR_LIT:type>']))<EOL><DEDENT>except:<EOL><INDENT>glogger.error("<STR_LIT>")<EOL>glogger.error(query_metadata['<STR_LIT>'])<EOL>glogger.error(traceback.print_exc())<EOL>pass<EOL><DEDENT><DEDENT>glogger.debug("<STR_LIT>".format(query_metadata['<STR_LIT:type>']))<EOL>glogger.debug("<STR_LIT>")<EOL>glogger.debug(pformat(query_metadata, indent=<NUM_LIT:32>))<EOL>return query_metadata<EOL>
|
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
|
f3384:m9
|
def turtleize(swag):
|
swag_graph = Graph()<EOL>return swag_graph.serialize(format='<STR_LIT>')<EOL>
|
Transforms a JSON swag object into a text/turtle LDA equivalent representation
|
f3386:m0
|
def getLoader(user, repo, sha=None, prov=None):
|
if user is None and repo is None:<EOL><INDENT>loader = LocalLoader()<EOL><DEDENT>else:<EOL><INDENT>loader = GithubLoader(user, repo, sha, prov)<EOL><DEDENT>return loader<EOL>
|
Build a fileLoader (LocalLoader or GithubLoader) for the given repository.
|
f3386:m1
|
def build_swagger_spec(user, repo, sha, serverName):
|
if user and repo:<EOL><INDENT>prov_g = grlcPROV(user, repo)<EOL><DEDENT>else:<EOL><INDENT>prov_g = None<EOL><DEDENT>swag = swagger.get_blank_spec()<EOL>swag['<STR_LIT:host>'] = serverName<EOL>try:<EOL><INDENT>loader = getLoader(user, repo, sha, prov_g)<EOL><DEDENT>except Exception as e:<EOL><INDENT>swag['<STR_LIT:info>'] = {<EOL>'<STR_LIT:title>': '<STR_LIT>',<EOL>'<STR_LIT:description>': str(e)<EOL>}<EOL>swag['<STR_LIT>'] = {}<EOL>return swag<EOL><DEDENT>prev_commit, next_commit, info, basePath =swagger.get_repo_info(loader, sha, prov_g)<EOL>swag['<STR_LIT>'] = prev_commit<EOL>swag['<STR_LIT>'] = next_commit<EOL>swag['<STR_LIT:info>'] = info<EOL>swag['<STR_LIT>'] = basePath<EOL>spec = swagger.build_spec(user, repo, sha, prov_g)<EOL>for item in spec:<EOL><INDENT>swag['<STR_LIT>'][item['<STR_LIT>']] = swagger.get_path_for_item(item)<EOL><DEDENT>if prov_g:<EOL><INDENT>prov_g.end_prov_graph()<EOL>swag['<STR_LIT>'] = prov_g.serialize(format='<STR_LIT>')<EOL><DEDENT>return swag<EOL>
|
Build grlc specification for the given github user / repo in swagger format
|
f3386:m3
|
def __init__(self, user, repo):
|
self.user = user<EOL>self.repo = repo<EOL>self.prov_g = Graph()<EOL>prov_uri = URIRef("<STR_LIT>")<EOL>self.prov = Namespace(prov_uri)<EOL>self.prov_g.bind('<STR_LIT>', self.prov)<EOL>self.agent = URIRef("<STR_LIT>".format(static.SERVER_NAME))<EOL>self.entity_d = URIRef("<STR_LIT>".format(static.SERVER_NAME, self.user, self.repo))<EOL>self.activity = URIRef(self.entity_d + "<STR_LIT>")<EOL>self.init_prov_graph()<EOL>
|
Default constructor
|
f3387:c0:m0
|
def init_prov_graph(self):
|
try:<EOL><INDENT>repo_prov = check_output(<EOL>['<STR_LIT>', '<STR_LIT>'.format(self.user, self.repo),<EOL>'<STR_LIT>']).decode("<STR_LIT:utf-8>")<EOL>repo_prov = repo_prov[repo_prov.find('<STR_LIT:@>'):]<EOL>glogger.debug('<STR_LIT>')<EOL>with open('<STR_LIT>', '<STR_LIT:w>') as temp_prov:<EOL><INDENT>temp_prov.write(repo_prov)<EOL><DEDENT>self.prov_g.parse('<STR_LIT>', format='<STR_LIT>')<EOL><DEDENT>except Exception as e:<EOL><INDENT>glogger.error(e)<EOL>glogger.error("<STR_LIT>")<EOL>pass<EOL><DEDENT>self.prov_g.add((self.agent, RDF.type, self.prov.Agent))<EOL>self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))<EOL>self.prov_g.add((self.activity, RDF.type, self.prov.Activity))<EOL>self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))<EOL>self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))<EOL>self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))<EOL>self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now())))<EOL>
|
Initialize PROV graph with all we know at the start of the recording
|
f3387:c0:m1
|
def add_used_entity(self, entity_uri):
|
entity_o = URIRef(entity_uri)<EOL>self.prov_g.add((entity_o, RDF.type, self.prov.Entity))<EOL>self.prov_g.add((self.activity, self.prov.used, entity_o))<EOL>
|
Add the provided URI as a used entity by the logged activity
|
f3387:c0:m2
|
def end_prov_graph(self):
|
endTime = Literal(datetime.now())<EOL>self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))<EOL>self.prov_g.add((self.activity, self.prov.endedAtTime, endTime))<EOL>
|
Finalize prov recording with end time
|
f3387:c0:m3
|
def log_prov_graph(self):
|
glogger.debug("<STR_LIT>")<EOL>glogger.debug(self.prov_g.serialize(format='<STR_LIT>'))<EOL>
|
Log provenance graph so far
|
f3387:c0:m4
|
def serialize(self, format):
|
if PY3:<EOL><INDENT>return self.prov_g.serialize(format=format).decode('<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>return self.prov_g.serialize(format=format)<EOL><DEDENT>
|
Serialize provenance graph in the specified format
|
f3387:c0:m5
|
def data_from_nesta():
|
data = pd.read_csv(nesta_uk_url)<EOL>return data<EOL>
|
Read data from the GitHub repo.
|
f3391:m0
|
def get_labs(format):
|
ukmakerspaces_data = data_from_nesta()<EOL>ukmakerspaces = {}<EOL>for index, row in ukmakerspaces_data.iterrows():<EOL><INDENT>current_lab = UKMakerspace()<EOL>current_lab.address_1 = row["<STR_LIT>"].replace("<STR_LIT:\r>", "<STR_LIT:U+0020>")<EOL>current_lab.address_2 = row["<STR_LIT>"].replace("<STR_LIT:\r>", "<STR_LIT:U+0020>") + "<STR_LIT>" + row["<STR_LIT>"].replace("<STR_LIT:\r>", "<STR_LIT:U+0020>")<EOL>current_lab.city = "<STR_LIT>"<EOL>current_lab.county = "<STR_LIT>"<EOL>current_lab.email = row["<STR_LIT>"]<EOL>current_lab.latitude = "<STR_LIT>"<EOL>current_lab.longitude = "<STR_LIT>"<EOL>current_lab.links = "<STR_LIT>"<EOL>current_lab.name = row["<STR_LIT>"]<EOL>current_lab.phone = row["<STR_LIT>"]<EOL>current_lab.postal_code = row["<STR_LIT>"]<EOL>current_lab.url = row["<STR_LIT>"]<EOL>ukmakerspaces[current_lab.name] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in ukmakerspaces:<EOL><INDENT>output[j] = ukmakerspaces[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in ukmakerspaces:<EOL><INDENT>single = ukmakerspaces[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in ukmakerspaces:<EOL><INDENT>output[j] = ukmakerspaces[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = ukmakerspaces<EOL><DEDENT>else:<EOL><INDENT>output = ukmakerspaces<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets current UK Makerspaces data as listed by NESTA.
|
f3391:m1
|
def labs_count():
|
ukmakerspaces = get_labs("<STR_LIT:object>")<EOL>return len(ukmakerspaces)<EOL>
|
Gets the number of current UK Makerspaces listed by NESTA.
|
f3391:m2
|
def data_from_repaircafe_org():
|
<EOL>browser = webdriver.Chrome()<EOL>browser.get("<STR_LIT>")<EOL>browser.maximize_window()<EOL>viewmore_button = True<EOL>while viewmore_button:<EOL><INDENT>try:<EOL><INDENT>viewmore = browser.find_element_by_id("<STR_LIT>")<EOL>browser.execute_script("<STR_LIT>", viewmore)<EOL>viewmore.click()<EOL><DEDENT>except:<EOL><INDENT>viewmore_button = False<EOL><DEDENT>sleep(<NUM_LIT:2>)<EOL><DEDENT>page_source = BeautifulSoup(browser.page_source, "<STR_LIT>")<EOL>browser.quit()<EOL>data = []<EOL>for h4 in page_source.find_all("<STR_LIT>"):<EOL><INDENT>for a in h4.find_all('<STR_LIT:a>', href=True):<EOL><INDENT>data.append({"<STR_LIT:name>": a.contents[<NUM_LIT:0>], "<STR_LIT:url>": a['<STR_LIT>']})<EOL><DEDENT><DEDENT>return data<EOL>
|
Gets data from repaircafe_org.
|
f3392:m0
|
def get_labs(format):
|
data = data_from_repaircafe_org()<EOL>repaircafes = {}<EOL>for i in data:<EOL><INDENT>current_lab = RepairCafe()<EOL>current_lab.name = i["<STR_LIT:name>"]<EOL>slug = i["<STR_LIT:url>"].replace("<STR_LIT>", "<STR_LIT>")<EOL>if slug.endswith("<STR_LIT:/>"):<EOL><INDENT>slug.replace("<STR_LIT:/>", "<STR_LIT>")<EOL><DEDENT>current_lab.slug = slug<EOL>current_lab.url = i["<STR_LIT:url>"]<EOL>page_request = requests.get(i["<STR_LIT:url>"])<EOL>if page_request.status_code == <NUM_LIT:200>:<EOL><INDENT>page_source = BeautifulSoup(page_request.text, "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>output = "<STR_LIT>"<EOL><DEDENT>current_lab.links = {"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": "<STR_LIT>"}<EOL>column = page_source.find_all("<STR_LIT>", class_="<STR_LIT>")<EOL>for j in column:<EOL><INDENT>for p in j.find_all('<STR_LIT:p>'):<EOL><INDENT>for a in p.find_all('<STR_LIT:a>', href=True):<EOL><INDENT>if "<STR_LIT>" in a['<STR_LIT>']:<EOL><INDENT>current_lab.links["<STR_LIT>"] = a['<STR_LIT>']<EOL><DEDENT>elif "<STR_LIT>" in a['<STR_LIT>']:<EOL><INDENT>current_lab.links["<STR_LIT>"] = a['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>current_lab.links[a['<STR_LIT>']] = a['<STR_LIT>']<EOL><DEDENT><DEDENT><DEDENT><DEDENT>column = page_source.find_all("<STR_LIT>", class_="<STR_LIT>")<EOL>for x in column:<EOL><INDENT>if x.string:<EOL><INDENT>print(x.string.strip())<EOL><DEDENT><DEDENT>exit()<EOL>repaircafes[slug] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in repaircafes:<EOL><INDENT>output[j] = repaircafes[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in repaircafes:<EOL><INDENT>single = repaircafes[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in repaircafes:<EOL><INDENT>output[j] = repaircafes[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = repaircafes<EOL><DEDENT>else:<EOL><INDENT>output = repaircafes<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets Repair Cafe data from repairecafe.org.
|
f3392:m1
|
def labs_count():
|
repaircafes = data_from_repaircafe_org()<EOL>return len(repaircafes["<STR_LIT>"])<EOL>
|
Gets the number of current Repair Cafes registered on repaircafe.org.
|
f3392:m2
|
def get_lab_text(lab_slug, language):
|
if language == "<STR_LIT>" or language == "<STR_LIT>" or language == "<STR_LIT>" or language == "<STR_LIT>":<EOL><INDENT>language = "<STR_LIT>"<EOL><DEDENT>elif language == "<STR_LIT>" or language == "<STR_LIT>" or language == "<STR_LIT>" or language == "<STR_LIT>" or language == "<STR_LIT>":<EOL><INDENT>language = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>language = "<STR_LIT>"<EOL><DEDENT>wiki = MediaWiki(makeinitaly__foundation_api_url)<EOL>wiki_response = wiki.call(<EOL>{'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': lab_slug + "<STR_LIT:/>" + language,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:content>'})<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>if "<STR_LIT>" in wiki_response["<STR_LIT>"]["<STR_LIT>"][i]:<EOL><INDENT>content = wiki_response["<STR_LIT>"]["<STR_LIT>"][i]["<STR_LIT>"][<NUM_LIT:0>]["<STR_LIT:*>"]<EOL><DEDENT>else:<EOL><INDENT>content = "<STR_LIT>"<EOL><DEDENT><DEDENT>newstr01 = content.replace("<STR_LIT>", "<STR_LIT>")<EOL>newstr02 = newstr01.replace("<STR_LIT>", "<STR_LIT>")<EOL>result = newstr02.rstrip("<STR_LIT>").split("<STR_LIT>")<EOL>return result[<NUM_LIT:0>]<EOL>
|
Gets text description in English or Italian from a single lab from makeinitaly.foundation.
|
f3393:m0
|
def get_single_lab(lab_slug):
|
wiki = MediaWiki(makeinitaly__foundation_api_url)<EOL>wiki_response = wiki.call(<EOL>{'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': lab_slug,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:content>'})<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>content = wiki_response["<STR_LIT>"]["<STR_LIT>"][i]["<STR_LIT>"][<NUM_LIT:0>]["<STR_LIT:*>"]<EOL><DEDENT>newstr01 = content.replace("<STR_LIT>", "<STR_LIT>")<EOL>newstr02 = newstr01.replace("<STR_LIT>", "<STR_LIT>")<EOL>result = newstr02.rstrip("<STR_LIT>").split("<STR_LIT>")<EOL>current_lab = MILab()<EOL>for i in result:<EOL><INDENT>if "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.coordinates = value<EOL>latlong = []<EOL>if "<STR_LIT:U+002CU+0020>" in value:<EOL><INDENT>latlong = value.rstrip("<STR_LIT:U+002CU+0020>").split("<STR_LIT:U+002CU+0020>")<EOL><DEDENT>elif "<STR_LIT>" in value:<EOL><INDENT>latlong = value.rstrip("<STR_LIT>").split("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>latlong = ["<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>current_lab.latitude = latlong[<NUM_LIT:0>]<EOL>current_lab.longitude = latlong[<NUM_LIT:1>]<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.province = value.upper()<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.region = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.address = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.city = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.fablabsio = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.website = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.facebook = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.twitter = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.email = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.manager = value<EOL><DEDENT>elif "<STR_LIT>" in i:<EOL><INDENT>value = i.replace("<STR_LIT>", "<STR_LIT>")<EOL>current_lab.birthyear = value<EOL><DEDENT><DEDENT>current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="<STR_LIT>")<EOL>current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="<STR_LIT>")<EOL>return current_lab<EOL>
|
Gets data from a single lab from makeinitaly.foundation.
|
f3393:m1
|
def get_labs(format):
|
labs = []<EOL>wiki = MediaWiki(makeinitaly__foundation_api_url)<EOL>wiki_response = wiki.call(<EOL>{'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:list>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'})<EOL>if "<STR_LIT>" in wiki_response:<EOL><INDENT>nextpage = wiki_response[<EOL>"<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"]<EOL><DEDENT>urls = []<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>urls.append(i["<STR_LIT:title>"].replace("<STR_LIT:U+0020>", "<STR_LIT:_>"))<EOL><DEDENT>for i in urls:<EOL><INDENT>current_lab = get_single_lab(i)<EOL>labs.append(current_lab)<EOL><DEDENT>while "<STR_LIT>" in wiki_response:<EOL><INDENT>wiki = MediaWiki(makeinitaly__foundation_api_url)<EOL>wiki_response = wiki.call({'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:list>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>"<STR_LIT>": nextpage})<EOL>urls = []<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>urls.append(i["<STR_LIT:title>"].replace("<STR_LIT:U+0020>", "<STR_LIT:_>"))<EOL><DEDENT>for i in urls:<EOL><INDENT>current_lab = get_single_lab(i, data_format)<EOL>labs.append(current_lab)<EOL><DEDENT>if "<STR_LIT>" in wiki_response:<EOL><INDENT>nextpage = wiki_response[<EOL>"<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>labs_dict = {}<EOL>for j, k in enumerate(labs):<EOL><INDENT>labs_dict[j] = k.__dict__<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs_dict<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in labs_dict:<EOL><INDENT>single = labs_dict[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in labs_dict:<EOL><INDENT>output[j] = labs_dict[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs<EOL><DEDENT>else:<EOL><INDENT>output = labs<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(labs_dict)<EOL><DEDENT>return output<EOL>
|
Gets data from all labs from makeinitaly.foundation.
|
f3393:m2
|
def labs_count():
|
labs = get_labs(data_format="<STR_LIT>")<EOL>return len(labs)<EOL>
|
Gets the number of current Labs registered on makeinitaly.foundation.
|
f3393:m3
|
def data_from_fablabs_io(endpoint):
|
data = requests.get(endpoint).json()<EOL>return data<EOL>
|
Gets data from fablabs.io.
|
f3394:m0
|
def get_labs(format):
|
fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)<EOL>fablabs = {}<EOL>for i in fablabs_json["<STR_LIT>"]:<EOL><INDENT>current_lab = FabLab()<EOL>current_lab.name = i["<STR_LIT:name>"]<EOL>current_lab.address_1 = i["<STR_LIT>"]<EOL>current_lab.address_2 = i["<STR_LIT>"]<EOL>current_lab.address_notes = i["<STR_LIT>"]<EOL>current_lab.avatar = i["<STR_LIT>"]<EOL>current_lab.blurb = i["<STR_LIT>"]<EOL>current_lab.capabilities = i["<STR_LIT>"]<EOL>if i["<STR_LIT>"].isupper():<EOL><INDENT>i["<STR_LIT>"] = i["<STR_LIT>"].title()<EOL><DEDENT>current_lab.city = i["<STR_LIT>"]<EOL>current_lab.country_code = i["<STR_LIT>"]<EOL>current_lab.county = i["<STR_LIT>"]<EOL>current_lab.description = i["<STR_LIT:description>"]<EOL>current_lab.email = i["<STR_LIT:email>"]<EOL>current_lab.id = i["<STR_LIT:id>"]<EOL>current_lab.phone = i["<STR_LIT>"]<EOL>current_lab.postal_code = i["<STR_LIT>"]<EOL>current_lab.slug = i["<STR_LIT>"]<EOL>current_lab.url = i["<STR_LIT:url>"]<EOL>current_lab.continent = country_alpha2_to_continent_code(i["<STR_LIT>"].upper())<EOL>current_country = pycountry.countries.get(alpha_2=i["<STR_LIT>"].upper())<EOL>current_lab.country_code = current_country.alpha_3<EOL>current_lab.country = current_country.name<EOL>if i["<STR_LIT>"] is not None:<EOL><INDENT>current_lab.longitude = i["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>current_lab.longitude = <NUM_LIT:0.0><EOL><DEDENT>if i["<STR_LIT>"] is not None:<EOL><INDENT>current_lab.latitude = i["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>current_lab.latitude = <NUM_LIT:0.0><EOL><DEDENT>current_lab.links = {"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": "<STR_LIT>"}<EOL>for link in i["<STR_LIT>"]:<EOL><INDENT>if "<STR_LIT>" in link["<STR_LIT:url>"]:<EOL><INDENT>current_lab.links["<STR_LIT>"] = link["<STR_LIT:url>"]<EOL><DEDENT>elif "<STR_LIT>" in link["<STR_LIT:url>"]:<EOL><INDENT>current_lab.links["<STR_LIT>"] = link["<STR_LIT:url>"]<EOL><DEDENT>else:<EOL><INDENT>current_lab.links[link["<STR_LIT:id>"]] = link["<STR_LIT:url>"]<EOL><DEDENT><DEDENT>fablabs[i["<STR_LIT>"]] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in fablabs:<EOL><INDENT>output[j] = fablabs[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in fablabs:<EOL><INDENT>single = fablabs[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in fablabs:<EOL><INDENT>output[j] = fablabs[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = fablabs<EOL><DEDENT>else:<EOL><INDENT>output = fablabs<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets Fab Lab data from fablabs.io.
|
f3394:m1
|
def labs_count():
|
fablabs = data_from_fablabs_io(fablabs_io_labs_api_url_v0)<EOL>return len(fablabs["<STR_LIT>"])<EOL>
|
Gets the number of current Fab Labs registered on fablabs.io.
|
f3394:m2
|
def get_projects(format):
|
projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0)<EOL>projects = {}<EOL>project_url = "<STR_LIT>"<EOL>fablabs = get_labs(format="<STR_LIT:object>")<EOL>for i in projects_json["<STR_LIT>"]:<EOL><INDENT>i = i["<STR_LIT>"]<EOL>current_project = Project()<EOL>current_project.id = i["<STR_LIT:id>"]<EOL>current_project.title = i["<STR_LIT:title>"]<EOL>current_project.description = i["<STR_LIT:description>"]<EOL>current_project.github = i["<STR_LIT>"]<EOL>current_project.web = i["<STR_LIT>"]<EOL>current_project.dropbox = i["<STR_LIT>"]<EOL>current_project.bitbucket = i["<STR_LIT>"]<EOL>current_project.lab_id = i["<STR_LIT>"]<EOL>if i["<STR_LIT>"] is not None:<EOL><INDENT>for k in fablabs:<EOL><INDENT>if fablabs[k].id == i["<STR_LIT>"]:<EOL><INDENT>current_project.lab = fablabs[k]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>current_project.lab = None<EOL><DEDENT>current_project.owner_id = i["<STR_LIT>"]<EOL>current_project.created_at = i["<STR_LIT>"]<EOL>current_project.updated_at = i["<STR_LIT>"]<EOL>current_project.vimeo = i["<STR_LIT>"]<EOL>current_project.flickr = i["<STR_LIT>"]<EOL>current_project.youtube = i["<STR_LIT>"]<EOL>current_project.drive = i["<STR_LIT>"]<EOL>current_project.twitter = i["<STR_LIT>"]<EOL>current_project.facebook = i["<STR_LIT>"]<EOL>current_project.googleplus = i["<STR_LIT>"]<EOL>current_project.instagram = i["<STR_LIT>"]<EOL>current_project.status = i["<STR_LIT:status>"]<EOL>current_project.version = i["<STR_LIT:version>"]<EOL>current_project.faq = i["<STR_LIT>"]<EOL>current_project.scope = i["<STR_LIT>"]<EOL>current_project.community = i["<STR_LIT>"]<EOL>current_project.lookingfor = i["<STR_LIT>"]<EOL>current_project.cover = i["<STR_LIT>"]<EOL>url = project_url + str(current_project.id)<EOL>current_project.url = url<EOL>projects[current_project.id] = current_project<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in projects:<EOL><INDENT>project_dict = projects[j].__dict__<EOL>if project_dict["<STR_LIT>"] is not None:<EOL><INDENT>project_dict["<STR_LIT>"] = project_dict["<STR_LIT>"].__dict__<EOL><DEDENT>output[j] = project_dict<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>projects_list = []<EOL>for p in projects:<EOL><INDENT>if projects[p].lab_id is not None:<EOL><INDENT>single_project = projects[p].__dict__<EOL>if projects[p].lab is not None:<EOL><INDENT>single_project["<STR_LIT>"] = single_project["<STR_LIT>"].__dict__<EOL><DEDENT>for l in fablabs:<EOL><INDENT>single_lab = fablabs[l].__dict__<EOL>if single_lab["<STR_LIT:id>"] == single_project["<STR_LIT>"]:<EOL><INDENT>project_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single_lab["<STR_LIT>"],<EOL>single_lab["<STR_LIT>"])),<EOL>properties=single_project)<EOL>projects_list.append(project_lab)<EOL><DEDENT><DEDENT>output = dumps(FeatureCollection(projects_list))<EOL><DEDENT><DEDENT><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = projects<EOL><DEDENT>else:<EOL><INDENT>output = projects<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets projects data from fablabs.io.
|
f3394:m3
|
def projects_count():
|
projects = data_from_fablabs_io(fablabs_io_projects_api_url_v0)<EOL>return len(projects["<STR_LIT>"])<EOL>
|
Gets the number of current projects submitted on fablabs.io.
|
f3394:m4
|
def get_multiple_data():
|
<EOL>all_labs = {}<EOL>all_labs["<STR_LIT>"] = diybio_org.get_labs(format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = fablabs_io.get_labs(format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = makeinitaly_foundation.get_labs(<EOL>format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = hackaday_io.get_labs(format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = hackerspaces_org.get_labs(format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = makery_info.get_labs(format="<STR_LIT>")<EOL>all_labs["<STR_LIT>"] = nesta.get_labs(format="<STR_LIT>")<EOL>return all_labs<EOL>
|
Get data from all the platforms listed in makerlabs.
|
f3395:m0
|
def get_timeline(source):
|
<EOL>timeline_format = ["<STR_LIT:name>", "<STR_LIT:type>", "<STR_LIT:source>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>"]<EOL>timeline = pd.DataFrame(timeline_format)<EOL>if source.lower() == "<STR_LIT>":<EOL><INDENT>data = diybio_org.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = fablabs_io.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = makeinitaly_foundation.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = hackaday_io.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = hackerspaces_org.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = makery_info.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT>":<EOL><INDENT>data = nesta.get_labs(format="<STR_LIT>")<EOL><DEDENT>elif source.lower() == "<STR_LIT:all>":<EOL><INDENT>pass<EOL><DEDENT>for lab in labs_data:<EOL><INDENT>for link in lab.links:<EOL><INDENT>print(link)<EOL>if "<STR_LIT>" in link:<EOL><INDENT>print(link)<EOL><DEDENT>if "<STR_LIT>" in link:<EOL><INDENT>print(link)<EOL><DEDENT><DEDENT>lab_dataframe_dict = {"<STR_LIT:name>": lab.name,<EOL>"<STR_LIT:type>": lab.lab_type,<EOL>"<STR_LIT:source>": lab.source,<EOL>"<STR_LIT>": lab.country,<EOL>"<STR_LIT>": lab.city,<EOL>"<STR_LIT>": lab.latitude,<EOL>"<STR_LIT>": lab.longitude,<EOL>"<STR_LIT>": lab.url}<EOL>timeline.append(lab_dataframe_dict)<EOL>["<STR_LIT:name>", "<STR_LIT:type>", "<STR_LIT:source>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>"]<EOL><DEDENT>return timeline<EOL>
|
Rebuild a timeline of the history of makerlabs.
|
f3395:m1
|
def get_single_lab(lab_slug, open_cage_api_key):
|
wiki = MediaWiki(hackerspaces_org_api_url)<EOL>wiki_response = wiki.call(<EOL>{'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': lab_slug,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:content>'})<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>content = wiki_response["<STR_LIT>"]["<STR_LIT>"][i]["<STR_LIT>"][<NUM_LIT:0>]["<STR_LIT:*>"]<EOL><DEDENT>current_lab = Hackerspace()<EOL>equipment_list = []<EOL>wikicode = mwparserfromhell.parse(content)<EOL>for k in wikicode.filter_templates():<EOL><INDENT>element_name = unicode(k.name)<EOL>if "<STR_LIT>" in element_name:<EOL><INDENT>for j in k.params:<EOL><INDENT>current_lab.name = lab_slug<EOL>j_value = unicode(j.value)<EOL>j_name = unicode(j.name)<EOL>if j_value[-<NUM_LIT:1>:] == "<STR_LIT:\n>" or j_value[:<NUM_LIT:1>] == "<STR_LIT:\n>":<EOL><INDENT>j_value = j_value.replace('<STR_LIT:\n>', '<STR_LIT>')<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.logo = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.founding = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>j_value = j_value.replace('<STR_LIT:">', '<STR_LIT>')<EOL>j_value = j_value.replace('<STR_LIT:N>', '<STR_LIT>')<EOL>j_value = j_value.replace('<STR_LIT:S>', '<STR_LIT>')<EOL>j_value = j_value.replace('<STR_LIT>', '<STR_LIT>')<EOL>j_value = j_value.replace('<STR_LIT:E>', '<STR_LIT>')<EOL>j_value = j_value.replace(u'<STR_LIT>', '<STR_LIT>')<EOL>j_value = j_value.replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL>address = get_location(query=j_value, format="<STR_LIT>", api_key=open_cage_api_key)<EOL>current_lab.city = address["<STR_LIT>"]<EOL>current_lab.county = address["<STR_LIT>"]<EOL>current_lab.state = address["<STR_LIT:state>"]<EOL>current_lab.postal_code = address["<STR_LIT>"]<EOL>current_lab.address_1 = address["<STR_LIT>"]<EOL>current_lab.country = address["<STR_LIT>"]<EOL>current_lab.country_code = address["<STR_LIT>"]<EOL>current_lab.continent = address["<STR_LIT>"]<EOL>current_lab.latitude = address["<STR_LIT>"]<EOL>current_lab.longitude = address["<STR_LIT>"]<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.membercount = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.fee = j_value<EOL><DEDENT>if j_name == "<STR_LIT:size>":<EOL><INDENT>current_lab.size = j_value<EOL><DEDENT>if j_name == "<STR_LIT:status>":<EOL><INDENT>current_lab.status = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.site = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.wiki = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.irc = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.jabber = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.phone = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.youtube = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.eventbrite = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.facebook = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.ustream = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.flickr = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.twitter = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.googleplus = j_value<EOL><DEDENT>if j_name == "<STR_LIT:email>":<EOL><INDENT>current_lab.email = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.maillist = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.ical = j_value<EOL><DEDENT>if j_name == "<STR_LIT>":<EOL><INDENT>current_lab.forum = j_value<EOL><DEDENT><DEDENT><DEDENT>elif "<STR_LIT>" in element_name:<EOL><INDENT>for j in k.params:<EOL><INDENT>equipment_list.append(j.replace("<STR_LIT>", "<STR_LIT>"))<EOL><DEDENT>current_lab.equipment = equipment_list<EOL><DEDENT><DEDENT>freetext = "<STR_LIT>"<EOL>for k in wikicode._nodes:<EOL><INDENT>try:<EOL><INDENT>test_value = k.name<EOL><DEDENT>except AttributeError:<EOL><INDENT>freetext += unicode(k)<EOL><DEDENT><DEDENT>current_lab.text = freetext<EOL>return current_lab<EOL>
|
Gets data from a single lab from hackerspaces.org.
|
f3397:m0
|
def get_labs(format, open_cage_api_key):
|
labs = []<EOL>wiki = MediaWiki(hackerspaces_org_api_url)<EOL>wiki_response = wiki.call(<EOL>{'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:list>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'})<EOL>nextpage = wiki_response["<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"]<EOL>urls = []<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>urls.append(i["<STR_LIT:title>"].replace("<STR_LIT:U+0020>", "<STR_LIT:_>"))<EOL><DEDENT>for i in urls:<EOL><INDENT>current_lab = get_single_lab(i, open_cage_api_key)<EOL>labs.append(current_lab)<EOL><DEDENT>while "<STR_LIT>" in wiki_response:<EOL><INDENT>wiki = MediaWiki(hackerspaces_org_api_url)<EOL>wiki_response = wiki.call({'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:list>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>"<STR_LIT>": nextpage})<EOL>urls = []<EOL>for i in wiki_response["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>urls.append(i["<STR_LIT:title>"].replace("<STR_LIT:U+0020>", "<STR_LIT:_>"))<EOL><DEDENT>for i in urls:<EOL><INDENT>current_lab = get_single_lab(i, open_cage_api_key)<EOL>labs.append(current_lab)<EOL><DEDENT>if "<STR_LIT>" in wiki_response:<EOL><INDENT>nextpage = wiki_response[<EOL>"<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>labs_dict = {}<EOL>for j, k in enumerate(labs):<EOL><INDENT>labs_dict[j] = k.__dict__<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs_dict<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in labs_dict:<EOL><INDENT>single = labs_dict[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs_dict<EOL>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL>output = output.set_index(['<STR_LIT:name>'])<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs<EOL><DEDENT>else:<EOL><INDENT>output = labs<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(labs_dict)<EOL><DEDENT>return output<EOL>
|
Gets data from all labs from hackerspaces.org.
|
f3397:m1
|
def labs_count():
|
labs = get_labs()<EOL>return len(labs)<EOL>
|
Gets the number of current Hackerspaces registered on hackerspaces.org.
|
f3397:m2
|
def data_from_techshop_ws(tws_url):
|
r = requests.get(tws_url)<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>data = BeautifulSoup(r.text, "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>data = "<STR_LIT>"<EOL><DEDENT>return data<EOL>
|
Scrapes data from techshop.ws.
|
f3398:m0
|
def get_labs(format):
|
techshops_soup = data_from_techshop_ws(techshop_us_url)<EOL>techshops = {}<EOL>data = techshops_soup.findAll('<STR_LIT>', attrs={'<STR_LIT:id>': '<STR_LIT>'})<EOL>for element in data:<EOL><INDENT>links = element.findAll('<STR_LIT:a>')<EOL>hrefs = {}<EOL>for k, a in enumerate(links):<EOL><INDENT>if "<STR_LIT>" not in a['<STR_LIT>']:<EOL><INDENT>hrefs[k] = a['<STR_LIT>']<EOL><DEDENT><DEDENT>for k, v in hrefs.iteritems():<EOL><INDENT>if "<STR_LIT>" not in v:<EOL><INDENT>hrefs[k] = "<STR_LIT>" + v<EOL><DEDENT>else:<EOL><INDENT>hrefs[k] = v<EOL><DEDENT><DEDENT>for k, v in hrefs.iteritems():<EOL><INDENT>if "<STR_LIT>" in v:<EOL><INDENT>hrefs[k] = v.replace("<STR_LIT>", "<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT>hr = []<EOL>for key, value in hrefs.iteritems():<EOL><INDENT>if value not in hr:<EOL><INDENT>hr.append(value)<EOL><DEDENT><DEDENT>hrefs = hr<EOL>for page in hrefs:<EOL><INDENT>data = data_from_techshop_ws(page)<EOL>current_lab = Techshop()<EOL>name = data.title.contents[<NUM_LIT:0>].split('<STR_LIT>')[<NUM_LIT:1>].encode('<STR_LIT:utf-8>')<EOL>if "<STR_LIT>" not in name:<EOL><INDENT>name = "<STR_LIT>" + name<EOL><DEDENT>current_lab.name = name<EOL>current_lab.slug = name<EOL>current_lab.url = page<EOL>current_lab.links = {"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": "<STR_LIT>"}<EOL>page_links = data.findAll('<STR_LIT:a>')<EOL>for link in page_links:<EOL><INDENT>if link.has_attr("<STR_LIT>"):<EOL><INDENT>if "<STR_LIT>" in link.attrs["<STR_LIT>"]:<EOL><INDENT>current_lab.links["<STR_LIT>"] = link.attrs["<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in link.attrs["<STR_LIT>"]:<EOL><INDENT>current_lab.links["<STR_LIT>"] = link.attrs["<STR_LIT>"]<EOL><DEDENT><DEDENT><DEDENT>iframes = data.findAll('<STR_LIT>')<EOL>if len(iframes) != <NUM_LIT:0>:<EOL><INDENT>for iframe in iframes:<EOL><INDENT>embed_url = iframe.attrs["<STR_LIT:src>"]<EOL>if "<STR_LIT>" in embed_url:<EOL><INDENT>two_d = embed_url.find("<STR_LIT>")<EOL>three_d = embed_url.find("<STR_LIT>")<EOL>longitude = embed_url[two_d:].split('<STR_LIT:!>')[<NUM_LIT:0>]<EOL>latitude = embed_url[three_d:].split('<STR_LIT:!>')[<NUM_LIT:0>]<EOL>longitude = longitude[<NUM_LIT:2>:]<EOL>latitude = latitude[<NUM_LIT:2>:]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>page_links = data.findAll('<STR_LIT:a>')<EOL>for link in page_links:<EOL><INDENT>if "<STR_LIT>" in link.attrs["<STR_LIT>"]:<EOL><INDENT>embed_url = link.attrs["<STR_LIT>"]<EOL>if "<STR_LIT>" in embed_url:<EOL><INDENT>first_string = embed_url.split('<STR_LIT>')[<NUM_LIT:0>]<EOL>coordinates = first_string.split('<STR_LIT>')[<NUM_LIT:1>]<EOL>latitude = coordinates.split('<STR_LIT:U+002C>')[<NUM_LIT:0>]<EOL>longitude = coordinates.split('<STR_LIT:U+002C>')[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif "<STR_LIT>" in link.attrs["<STR_LIT>"]:<EOL><INDENT>embed_url = link.attrs["<STR_LIT>"]<EOL>if "<STR_LIT>" in embed_url:<EOL><INDENT>one_d = embed_url.find("<STR_LIT>")<EOL>two_d = embed_url.find("<STR_LIT>")<EOL>longitude = embed_url[one_d:].split('<STR_LIT:!>')[<NUM_LIT:0>]<EOL>latitude = embed_url[two_d:].split('<STR_LIT:!>')[<NUM_LIT:0>]<EOL>longitude = longitude[<NUM_LIT:2>:]<EOL>latitude = latitude[<NUM_LIT:2>:]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>current_lab.latitude = latitude<EOL>current_lab.longitude = longitude<EOL>current_lab.continent = "<STR_LIT>"<EOL>current_lab.country_code = "<STR_LIT>"<EOL>current_lab.country = "<STR_LIT>"<EOL>location = geolocator.reverse((latitude, longitude))<EOL>if "<STR_LIT>" in location.raw["<STR_LIT:address>"]:<EOL><INDENT>current_lab.county = location.raw["<STR_LIT:address>"]["<STR_LIT>"].encode(<EOL>'<STR_LIT:utf-8>')<EOL><DEDENT>if "<STR_LIT>" in location.raw["<STR_LIT:address>"]:<EOL><INDENT>current_lab.county = location.raw["<STR_LIT:address>"]["<STR_LIT>"].encode(<EOL>'<STR_LIT:utf-8>')<EOL><DEDENT>if "<STR_LIT:state>" in location.raw["<STR_LIT:address>"]:<EOL><INDENT>current_lab.state = location.raw["<STR_LIT:address>"]["<STR_LIT:state>"].encode(<EOL>'<STR_LIT:utf-8>')<EOL><DEDENT>if "<STR_LIT>" in location.raw["<STR_LIT:address>"]:<EOL><INDENT>current_lab.postal_code = location.raw["<STR_LIT:address>"][<EOL>"<STR_LIT>"].encode('<STR_LIT:utf-8>')<EOL><DEDENT>current_lab.address_1 = location.address.encode('<STR_LIT:utf-8>')<EOL>techshops[current_lab.slug] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in techshops:<EOL><INDENT>output[j] = techshops[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in techshops:<EOL><INDENT>single = techshops[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in techshops:<EOL><INDENT>output[j] = techshops[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = techshops<EOL><DEDENT>else:<EOL><INDENT>output = techshops<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets Techshop data from techshop.ws.
|
f3398:m1
|
def labs_count():
|
techshops = get_labs("<STR_LIT:object>")<EOL>return len(techshops)<EOL>
|
Gets the number of current Techshops listed on techshops.ws.
|
f3398:m2
|
def data_from_hackaday_io(endpoint):
|
data = requests.get(endpoint).json()<EOL>return data<EOL>
|
Gets data from hackaday.io.
|
f3399:m0
|
def get_labs(format):
|
hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url)<EOL>hackerspaces = {}<EOL>for i in hackerspaces_json:<EOL><INDENT>current_lab = Hackerspace()<EOL>current_lab.id = i["<STR_LIT:id>"]<EOL>current_lab.url = "<STR_LIT>" + current_lab.id<EOL>current_lab.name = i["<STR_LIT:name>"]<EOL>if len(i["<STR_LIT:description>"]) != <NUM_LIT:0>:<EOL><INDENT>current_lab.description = i["<STR_LIT:description>"]<EOL><DEDENT>elif len(i["<STR_LIT>"]) != <NUM_LIT:0>:<EOL><INDENT>current_lab.description = i["<STR_LIT>"]<EOL><DEDENT>current_lab.created_at = i["<STR_LIT>"]["<STR_LIT>"]<EOL>if i["<STR_LIT>"] is not None:<EOL><INDENT>latlon = json.loads(i["<STR_LIT>"])<EOL>current_lab.latitude = latlon["<STR_LIT>"]<EOL>current_lab.longitude = latlon["<STR_LIT>"]<EOL>country = geolocator.reverse(<EOL>[latlon["<STR_LIT>"], latlon["<STR_LIT>"]])<EOL>current_lab.country = country.raw[<EOL>"<STR_LIT:address>"]["<STR_LIT>"]<EOL>current_lab.address = country.raw["<STR_LIT>"]<EOL>current_lab.address_1 = country.raw["<STR_LIT>"]<EOL>current_lab.country_code = country.raw[<EOL>"<STR_LIT:address>"]["<STR_LIT>"]<EOL>current_lab.county = country.raw[<EOL>"<STR_LIT:address>"]["<STR_LIT>"]<EOL>current_lab.city = country.raw[<EOL>"<STR_LIT:address>"]["<STR_LIT>"]<EOL>current_lab.postal_code = country.raw[<EOL>"<STR_LIT:address>"]["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>current_lab.latitude = <NUM_LIT:0.0><EOL>current_lab.longitude = <NUM_LIT:0.0><EOL><DEDENT>hackerspaces[i["<STR_LIT:name>"]] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in hackerspaces:<EOL><INDENT>output[j] = hackerspaces[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in hackerspaces:<EOL><INDENT>single = hackerspaces[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in hackerspaces:<EOL><INDENT>output[j] = hackerspaces[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = hackerspaces<EOL><DEDENT>else:<EOL><INDENT>output = hackerspaces<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets Hackerspaces data from hackaday.io.
|
f3399:m1
|
def labs_count():
|
hackerspaces = data_from_hackaday_io(hackaday_io_labs_api_url)<EOL>return len(hackerspaces["<STR_LIT>"])<EOL>
|
Gets the number of current Hackerspaces listed on hackaday.io.
|
f3399:m2
|
def data_from_makery_info(endpoint):
|
data = requests.get(endpoint).json()<EOL>return data<EOL>
|
Gets data from makery.info.
|
f3400:m0
|
def get_labs(format):
|
labs_json = data_from_makery_info(makery_info_labs_api_url)<EOL>labs = {}<EOL>for i in labs_json["<STR_LIT>"]:<EOL><INDENT>current_lab = MakeryLab()<EOL>current_lab.address_1 = i["<STR_LIT>"]<EOL>current_lab.address_2 = i["<STR_LIT>"]<EOL>current_lab.address_notes = i["<STR_LIT>"]<EOL>current_lab.avatar = i["<STR_LIT>"]<EOL>current_lab.blurb = i["<STR_LIT>"]<EOL>current_lab.capabilities = i["<STR_LIT>"]<EOL>current_lab.city = i["<STR_LIT>"]<EOL>current_lab.country_code = i["<STR_LIT>"]<EOL>current_lab.county = i["<STR_LIT>"]<EOL>current_lab.description = i["<STR_LIT:description>"]<EOL>current_lab.email = i["<STR_LIT:email>"]<EOL>current_lab.header_image_src = i["<STR_LIT>"]<EOL>current_lab.id = i["<STR_LIT:id>"]<EOL>current_lab.kind_name = i["<STR_LIT>"]<EOL>if i["<STR_LIT>"] is None or i["<STR_LIT>"] is None:<EOL><INDENT>address = i["<STR_LIT>"] + i["<STR_LIT>"] + i["<STR_LIT>"]<EOL>try:<EOL><INDENT>location = geolocator.geocode(address)<EOL>current_lab.latitude = location.latitude<EOL>current_lab.longitude = location.longitude<EOL><DEDENT>except:<EOL><INDENT>try:<EOL><INDENT>location = geolocator.geocode(i["<STR_LIT>"])<EOL>current_lab.latitude = location.latitude<EOL>current_lab.longitude = location.longitude<EOL><DEDENT>except:<EOL><INDENT>current_lab.latitude = <NUM_LIT:0.0><EOL>current_lab.longitude = <NUM_LIT:0.0><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>current_lab.latitude = i["<STR_LIT>"]<EOL>current_lab.longitude = i["<STR_LIT>"]<EOL><DEDENT>current_lab.links = i["<STR_LIT>"]<EOL>current_lab.name = i["<STR_LIT:name>"]<EOL>current_lab.parent_id = i["<STR_LIT>"]<EOL>current_lab.phone = i["<STR_LIT>"]<EOL>current_lab.postal_code = i["<STR_LIT>"]<EOL>current_lab.slug = i["<STR_LIT>"]<EOL>current_lab.url = i["<STR_LIT:url>"]<EOL>labs[i["<STR_LIT>"]] = current_lab<EOL><DEDENT>if format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in labs:<EOL><INDENT>output[j] = labs[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in labs:<EOL><INDENT>single = labs[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in labs_list:<EOL><INDENT>output[j] = labs_list[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = labs<EOL><DEDENT>else:<EOL><INDENT>output = labs<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(output)<EOL><DEDENT>return output<EOL>
|
Gets Lab data from makery.info.
|
f3400:m1
|
def labs_count():
|
labs = data_from_makery_info(makery_info_labs_api_url)<EOL>return len(labs["<STR_LIT>"])<EOL>
|
Gets the number of current Labs listed on makery.info.
|
f3400:m2
|
def get_location(query, format, api_key):
|
<EOL>sleep(<NUM_LIT:1>)<EOL>geolocator = OpenCage(api_key=api_key, timeout=<NUM_LIT:10>)<EOL>data = {"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT:state>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None}<EOL>road = "<STR_LIT>"<EOL>number = "<STR_LIT>"<EOL>location_data = {"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT:state>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": None}<EOL>if format == "<STR_LIT>":<EOL><INDENT>if query is None or len(query) < <NUM_LIT:3>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>location = geolocator.reverse(query)<EOL>if location is not None:<EOL><INDENT>location_data = location[<NUM_LIT:0>].raw[u'<STR_LIT>']<EOL>location_data["<STR_LIT>"] = location[<NUM_LIT:0>].raw[u'<STR_LIT>']["<STR_LIT>"]<EOL>location_data["<STR_LIT>"] = location[<NUM_LIT:0>].raw[u'<STR_LIT>']["<STR_LIT>"]<EOL><DEDENT><DEDENT><DEDENT>if format == "<STR_LIT>":<EOL><INDENT>if query is None or len(query) < <NUM_LIT:3>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>location = geolocator.geocode(query)<EOL>if location is not None:<EOL><INDENT>location_data = location.raw[u'<STR_LIT>']<EOL>location_data["<STR_LIT>"] = location.raw[u'<STR_LIT>']["<STR_LIT>"]<EOL>location_data["<STR_LIT>"] = location.raw[u'<STR_LIT>']["<STR_LIT>"]<EOL><DEDENT><DEDENT><DEDENT>for component in location_data:<EOL><INDENT>if component == "<STR_LIT>" or component == "<STR_LIT>":<EOL><INDENT>data["<STR_LIT>"] = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>road = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>number = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>data["<STR_LIT>"] = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>data["<STR_LIT>"] = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>data["<STR_LIT>"] = location_data[component]<EOL><DEDENT>if component == "<STR_LIT:state>":<EOL><INDENT>data["<STR_LIT:state>"] = location_data[component]<EOL><DEDENT>if component == "<STR_LIT>":<EOL><INDENT>data["<STR_LIT>"] = location_data[component]<EOL><DEDENT><DEDENT>data["<STR_LIT>"] = unicode(road) + "<STR_LIT:U+0020>" + unicode(number)<EOL>data["<STR_LIT>"] = location_data["<STR_LIT>"]<EOL>data["<STR_LIT>"] = location_data["<STR_LIT>"]<EOL>try:<EOL><INDENT>country_data = transformations.cca2_to_ccn(data["<STR_LIT>"])<EOL>data["<STR_LIT>"] = transformations.ccn_to_cca3(country_data)<EOL><DEDENT>except:<EOL><INDENT>data["<STR_LIT>"] = None<EOL><DEDENT>try:<EOL><INDENT>country_data = transformations.cc_to_cn(data["<STR_LIT>"])<EOL>data["<STR_LIT>"] = transformations.cn_to_ctn(country_data)<EOL><DEDENT>except:<EOL><INDENT>data["<STR_LIT>"] = None<EOL><DEDENT>return data<EOL>
|
Get geographic data of a lab in a coherent way for all labs.
|
f3401:m0
|
def data_from_diybio_org():
|
r = requests.get(diy_bio_labs_url)<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>data = BeautifulSoup(r.text.replace(u'<STR_LIT>', u'<STR_LIT>'), "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>data = "<STR_LIT>"<EOL><DEDENT>return data<EOL>
|
Scrapes data from diybio.org.
|
f3402:m0
|
def get_labs(format, open_cage_api_key):
|
diybiolabs_soup = data_from_diybio_org()<EOL>diybiolabs = {}<EOL>rows_list = []<EOL>continents_dict = {}<EOL>continents_order = <NUM_LIT:0><EOL>ranges_starting_points = []<EOL>for row in diybiolabs_soup.select("<STR_LIT>"):<EOL><INDENT>cells = row.find_all('<STR_LIT>')<EOL>rows_list.append(cells)<EOL><DEDENT>for k, row in enumerate(rows_list):<EOL><INDENT>for col in row:<EOL><INDENT>if col.find('<STR_LIT>'):<EOL><INDENT>for h3 in col.findAll('<STR_LIT>'):<EOL><INDENT>ranges_starting_points.append(k)<EOL>continents_dict[continents_order] = h3.get_text()<EOL>continents_order += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>ranges = {}<EOL>for k, j in enumerate(reversed(ranges_starting_points)):<EOL><INDENT>if k < len(ranges_starting_points) - <NUM_LIT:1>:<EOL><INDENT>ranges[k] = {"<STR_LIT:start>": ranges_starting_points[k],<EOL>"<STR_LIT:end>": ranges_starting_points[k + <NUM_LIT:1>]}<EOL><DEDENT>else:<EOL><INDENT>ranges[k] = {"<STR_LIT:start>": ranges_starting_points[k],<EOL>"<STR_LIT:end>": len(rows_list)}<EOL><DEDENT><DEDENT>for i in ranges:<EOL><INDENT>for j in range(ranges[i]["<STR_LIT:start>"] + <NUM_LIT:1>, ranges[i]["<STR_LIT:end>"]):<EOL><INDENT>rules = [len(n) == <NUM_LIT:0> for n in rows_list[j]]<EOL>if False in rules:<EOL><INDENT>current_lab = DiyBioLab()<EOL>current_lab.city = rows_list[j][<NUM_LIT:1>].contents[<NUM_LIT:0>].encode('<STR_LIT:utf-8>')<EOL>if continents_dict[i] == "<STR_LIT>" or continents_dict[<EOL>i] == "<STR_LIT>":<EOL><INDENT>current_lab.state = rows_list[j][<NUM_LIT:2>].contents[<NUM_LIT:0>].replace(<EOL>"<STR_LIT:U+0020>", "<STR_LIT>").encode('<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>current_lab.country_code = rows_list[j][<NUM_LIT:2>].contents[<EOL><NUM_LIT:0>].encode('<STR_LIT:utf-8>')<EOL><DEDENT>current_lab.url = rows_list[j][<NUM_LIT:3>].contents[<NUM_LIT:0>].attrs['<STR_LIT>']<EOL>slug = current_lab.url<EOL>if "<STR_LIT>" in slug:<EOL><INDENT>slug = slug.replace("<STR_LIT>", "<STR_LIT>")<EOL><DEDENT>elif "<STR_LIT>" in slug:<EOL><INDENT>slug = slug.replace("<STR_LIT>", "<STR_LIT>")<EOL><DEDENT>if "<STR_LIT>" in slug:<EOL><INDENT>slug = slug.replace("<STR_LIT>", "<STR_LIT>")<EOL><DEDENT>current_lab.name = slug<EOL>current_lab.slug = slug<EOL>if continents_dict[i] == "<STR_LIT>" or continents_dict[i] == "<STR_LIT>":<EOL><INDENT>current_lab.continent = "<STR_LIT>"<EOL>current_lab.country_code = "<STR_LIT>"<EOL>current_lab.country = "<STR_LIT>"<EOL>current_lab.state = us.states.lookup(<EOL>current_lab.state).name<EOL><DEDENT>address = get_location(query=current_lab.city, format="<STR_LIT>", api_key=open_cage_api_key)<EOL>current_lab.continent = address["<STR_LIT>"]<EOL>current_lab.latitude = address["<STR_LIT>"]<EOL>current_lab.longitude = address["<STR_LIT>"]<EOL>current_lab.address_1 = address["<STR_LIT>"]<EOL>current_lab.country = address["<STR_LIT>"]<EOL>current_lab.country_code = address["<STR_LIT>"]<EOL>current_lab.latitude = address["<STR_LIT>"]<EOL>current_lab.longitude = address["<STR_LIT>"]<EOL>current_lab.county = address["<STR_LIT>"]<EOL>current_lab.postal_code = address["<STR_LIT>"]<EOL>current_lab.state = address["<STR_LIT:state>"]<EOL>diybiolabs[slug] = current_lab<EOL>del current_lab<EOL><DEDENT><DEDENT><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in diybiolabs:<EOL><INDENT>output[j] = diybiolabs[j].__dict__<EOL><DEDENT><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>labs_list = []<EOL>for l in diybiolabs:<EOL><INDENT>single = diybiolabs[l].__dict__<EOL>single_lab = Feature(<EOL>type="<STR_LIT>",<EOL>geometry=Point((single["<STR_LIT>"], single["<STR_LIT>"])),<EOL>properties=single)<EOL>labs_list.append(single_lab)<EOL><DEDENT>output = dumps(FeatureCollection(labs_list))<EOL><DEDENT>elif format.lower() == "<STR_LIT>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = {}<EOL>for j in diybiolabs:<EOL><INDENT>output[j] = diybiolabs[j].__dict__<EOL><DEDENT>output = pd.DataFrame.from_dict(output)<EOL>output = output.transpose()<EOL><DEDENT>elif format.lower() == "<STR_LIT:object>" or format.lower() == "<STR_LIT>":<EOL><INDENT>output = diybiolabs<EOL><DEDENT>else:<EOL><INDENT>output = diybiolabs<EOL><DEDENT>if format.lower() == "<STR_LIT>":<EOL><INDENT>output = json.dumps(diybiolabs)<EOL><DEDENT>return output<EOL>
|
Gets DIYBio Lab data from diybio.org.
|
f3402:m1
|
def labs_count():
|
diybiolabs = get_labs("<STR_LIT:object>")<EOL>return len(diybiolabs)<EOL>
|
Gets the number of current DIYBio Labs listed on diybio.org.
|
f3402:m2
|
def __send_notification(self, message, title, title_link='<STR_LIT>', color='<STR_LIT>',<EOL>fields='<STR_LIT>', log_level=LogLv.INFO):
|
if log_level < self.log_level:<EOL><INDENT>return None<EOL><DEDENT>payload = self.__build_payload(message, title, title_link, color, fields)<EOL>try:<EOL><INDENT>response = self.__post(payload)<EOL><DEDENT>except Exception:<EOL><INDENT>raise Exception(traceback.format_exc())<EOL><DEDENT>return response<EOL>
|
Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
|
f3408:c1:m4
|
def load_data(flist, drop_duplicates=False):
|
if (len(flist['<STR_LIT:train>'])==<NUM_LIT:0>) or (len(flist['<STR_LIT:target>'])==<NUM_LIT:0>) or (len(flist['<STR_LIT:test>'])==<NUM_LIT:0>):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>X_train = pd.DataFrame()<EOL>test = pd.DataFrame()<EOL>print('<STR_LIT>')<EOL>for i in flist['<STR_LIT:train>']:<EOL><INDENT>X_train = pd.concat([X_train, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=<NUM_LIT:1>)<EOL><DEDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>y_train = paratext.load_csv_to_pandas(PATH+flist['<STR_LIT:target>'][<NUM_LIT:0>], allow_quoted_newlines=True)['<STR_LIT:target>']<EOL>print('<STR_LIT>')<EOL>for i in flist['<STR_LIT:test>']:<EOL><INDENT>test = pd.concat([test, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=<NUM_LIT:1>)<EOL><DEDENT>assert( (False in X_train.columns == test.columns) == False)<EOL>print('<STR_LIT>'.format(X_train.shape))<EOL>if drop_duplicates == True:<EOL><INDENT>unique_col = X_train.T.drop_duplicates().T.columns<EOL>X_train = X_train[unique_col]<EOL>test = test[unique_col]<EOL>assert( all(X_train.columns == test.columns))<EOL>print('<STR_LIT>'.format(X_train.shape))<EOL><DEDENT>return X_train, y_train, test<EOL>
|
Usage: set train, target, and test key and feature files.
FEATURE_LIST_stage2 = {
'train':(
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
),#target is not in 'train'
'target':(
INPUT_PATH + 'target.csv',
),#target is in 'target'
'test':(
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
),
}
|
f3410:m1
|
def __init__(self, name="<STR_LIT>", flist={}, params={}, kind='<STR_LIT:s>', fold_name=cv_id_name):
|
if BaseModel.problem_type == '<STR_LIT>':<EOL><INDENT>if not ((BaseModel.classification_type in classification_type_list)<EOL>and (BaseModel.eval_type in eval_type_list)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif BaseModel.problem_type == '<STR_LIT>':<EOL><INDENT>if not BaseModel.eval_type in eval_type_list:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.name = name<EOL>self.flist = flist<EOL>self.params = params<EOL>self.kind = kind<EOL>self.fold_name = fold_name<EOL>assert(self.kind in ['<STR_LIT:s>', '<STR_LIT:t>', '<STR_LIT>', '<STR_LIT>'])<EOL>
|
name: Model name
flist: Feature list
params: Parameters
kind: Kind of run()
{'s': Stacking only. Svaing a oof prediction({}_all_fold.csv)
and average of test prediction based on fold-train models({}_test.csv).
't': Training all data and predict test({}_TestInAllTrainingData.csv).
'st': Stacking and then training all data and predict test
Using save final model with cross-validation
'cv': Only cross validation without saving the prediction
|
f3410:c0:m0
|
@classmethod<EOL><INDENT>def set_prob_type(cls, problem_type, classification_type, eval_type):<DEDENT>
|
assert problem_type in problem_type_list, '<STR_LIT>'<EOL>if problem_type == '<STR_LIT>':<EOL><INDENT>assert classification_type in classification_type_list,'<STR_LIT>'<EOL><DEDENT>assert eval_type in eval_type_list, '<STR_LIT>'<EOL>cls.problem_type = problem_type<EOL>cls.classification_type = classification_type<EOL>cls.eval_type = eval_type<EOL>if cls.problem_type == '<STR_LIT>':<EOL><INDENT>print('<STR_LIT>'.format(cls.problem_type,<EOL>cls.classification_type,<EOL>cls.eval_type))<EOL><DEDENT>elif cls.problem_type == '<STR_LIT>':<EOL><INDENT>print('<STR_LIT>'.format(cls.problem_type,<EOL>cls.eval_type))<EOL><DEDENT>return<EOL>
|
Set problem type
|
f3410:c0:m1
|
def make_multi_cols(self, num_class, name):
|
cols = ['<STR_LIT:c>' + str(i) + '<STR_LIT:_>' for i in range(num_class)]<EOL>cols = [x + name for x in cols]<EOL>return cols<EOL>
|
make cols for multi-class predictions
|
f3410:c0:m3
|
def load_data(self):
|
return load_data(self.flist, drop_duplicates=False )<EOL>
|
flistにシリアライゼーションを渡すことでより効率的に
data構造をここで考慮
|
f3410:c0:m5
|
def datagram_received(self, data, addr):
|
channel = self.peer_to_channel.get(addr)<EOL>if channel:<EOL><INDENT>self.client_protocol._send(struct.pack('<STR_LIT>', channel, len(data)) + data,<EOL>self.client_address)<EOL><DEDENT>
|
Relay data from peer to client.
|
f3435:c0:m2
|
def error_response(self, request, code, message):
|
response = stun.Message(<EOL>message_method=request.message_method,<EOL>message_class=stun.Class.ERROR,<EOL>transaction_id=request.transaction_id)<EOL>response.attributes['<STR_LIT>'] = (code, message)<EOL>return response<EOL>
|
Build an error response for the given request.
|
f3435:c1:m7
|
def candidate_foundation(candidate_type, candidate_transport, base_address):
|
key = '<STR_LIT>' % (candidate_type, candidate_transport, base_address)<EOL>return hashlib.md5(key.encode('<STR_LIT:ascii>')).hexdigest()<EOL>
|
See RFC 5245 - 4.1.1.3. Computing Foundations
|
f3439:m0
|
def candidate_priority(candidate_component, candidate_type, local_pref=<NUM_LIT>):
|
if candidate_type == '<STR_LIT:host>':<EOL><INDENT>type_pref = <NUM_LIT><EOL><DEDENT>elif candidate_type == '<STR_LIT>':<EOL><INDENT>type_pref = <NUM_LIT><EOL><DEDENT>elif candidate_type == '<STR_LIT>':<EOL><INDENT>type_pref = <NUM_LIT:100><EOL><DEDENT>else:<EOL><INDENT>type_pref = <NUM_LIT:0><EOL><DEDENT>return (<NUM_LIT:1> << <NUM_LIT>) * type_pref +(<NUM_LIT:1> << <NUM_LIT:8>) * local_pref +(<NUM_LIT> - candidate_component)<EOL>
|
See RFC 5245 - 4.1.2.1. Recommended Formula
|
f3439:m1
|
@classmethod<EOL><INDENT>def from_sdp(cls, sdp):<DEDENT>
|
bits = sdp.split()<EOL>if len(bits) < <NUM_LIT:8>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>kwargs = {<EOL>'<STR_LIT>': bits[<NUM_LIT:0>],<EOL>'<STR_LIT>': int(bits[<NUM_LIT:1>]),<EOL>'<STR_LIT>': bits[<NUM_LIT:2>],<EOL>'<STR_LIT>': int(bits[<NUM_LIT:3>]),<EOL>'<STR_LIT:host>': bits[<NUM_LIT:4>],<EOL>'<STR_LIT:port>': int(bits[<NUM_LIT:5>]),<EOL>'<STR_LIT:type>': bits[<NUM_LIT:7>],<EOL>}<EOL>for i in range(<NUM_LIT:8>, len(bits) - <NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>if bits[i] == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = bits[i + <NUM_LIT:1>]<EOL><DEDENT>elif bits[i] == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = int(bits[i + <NUM_LIT:1>])<EOL><DEDENT>elif bits[i] == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = bits[i + <NUM_LIT:1>]<EOL><DEDENT>elif bits[i] == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = int(bits[i + <NUM_LIT:1>])<EOL><DEDENT><DEDENT>return Candidate(**kwargs)<EOL>
|
Parse a :class:`Candidate` from SDP.
.. code-block:: python
Candidate.from_sdp(
'6815297761 1 udp 659136 1.2.3.4 31102 typ host generation 0')
|
f3439:c0:m1
|
def to_sdp(self):
|
sdp = '<STR_LIT>' % (<EOL>self.foundation,<EOL>self.component,<EOL>self.transport,<EOL>self.priority,<EOL>self.host,<EOL>self.port,<EOL>self.type)<EOL>if self.related_address is not None:<EOL><INDENT>sdp += '<STR_LIT>' % self.related_address<EOL><DEDENT>if self.related_port is not None:<EOL><INDENT>sdp += '<STR_LIT>' % self.related_port<EOL><DEDENT>if self.tcptype is not None:<EOL><INDENT>sdp += '<STR_LIT>' % self.tcptype<EOL><DEDENT>if self.generation is not None:<EOL><INDENT>sdp += '<STR_LIT>' % self.generation<EOL><DEDENT>return sdp<EOL>
|
Return a string representation suitable for SDP.
|
f3439:c0:m2
|
def can_pair_with(self, other):
|
a = ipaddress.ip_address(self.host)<EOL>b = ipaddress.ip_address(other.host)<EOL>return (<EOL>self.component == other.component and<EOL>self.transport.lower() == other.transport.lower() and<EOL>a.version == b.version<EOL>)<EOL>
|
A local candidate is paired with a remote candidate if and only if
the two candidates have the same component ID and have the same IP
address version.
|
f3439:c0:m3
|
async def create_turn_endpoint(protocol_factory, server_addr, username, password,<EOL>lifetime=<NUM_LIT>, ssl=False, transport='<STR_LIT>'):
|
loop = asyncio.get_event_loop()<EOL>if transport == '<STR_LIT>':<EOL><INDENT>_, inner_protocol = await loop.create_connection(<EOL>lambda: TurnClientTcpProtocol(server_addr,<EOL>username=username,<EOL>password=password,<EOL>lifetime=lifetime),<EOL>host=server_addr[<NUM_LIT:0>],<EOL>port=server_addr[<NUM_LIT:1>],<EOL>ssl=ssl)<EOL><DEDENT>else:<EOL><INDENT>_, inner_protocol = await loop.create_datagram_endpoint(<EOL>lambda: TurnClientUdpProtocol(server_addr,<EOL>username=username,<EOL>password=password,<EOL>lifetime=lifetime),<EOL>remote_addr=server_addr)<EOL><DEDENT>protocol = protocol_factory()<EOL>transport = TurnTransport(protocol, inner_protocol)<EOL>await transport._connect()<EOL>return transport, protocol<EOL>
|
Create datagram connection relayed over TURN.
|
f3441:m2
|
async def connect(self):
|
request = stun.Message(message_method=stun.Method.ALLOCATE,<EOL>message_class=stun.Class.REQUEST)<EOL>request.attributes['<STR_LIT>'] = self.lifetime<EOL>request.attributes['<STR_LIT>'] = UDP_TRANSPORT<EOL>try:<EOL><INDENT>response, _ = await self.request(request)<EOL><DEDENT>except exceptions.TransactionFailed as e:<EOL><INDENT>response = e.response<EOL>if response.attributes['<STR_LIT>'][<NUM_LIT:0>] == <NUM_LIT>:<EOL><INDENT>self.nonce = response.attributes['<STR_LIT>']<EOL>self.realm = response.attributes['<STR_LIT>']<EOL>self.integrity_key = make_integrity_key(self.username, self.realm, self.password)<EOL>request.transaction_id = random_transaction_id()<EOL>response, _ = await self.request(request)<EOL><DEDENT><DEDENT>self.relayed_address = response.attributes['<STR_LIT>']<EOL>logger.info('<STR_LIT>', self.relayed_address)<EOL>self.refresh_handle = asyncio.ensure_future(self.refresh())<EOL>return self.relayed_address<EOL>
|
Create a TURN allocation.
|
f3441:c1:m2
|
async def delete(self):
|
if self.refresh_handle:<EOL><INDENT>self.refresh_handle.cancel()<EOL>self.refresh_handle = None<EOL><DEDENT>request = stun.Message(message_method=stun.Method.REFRESH,<EOL>message_class=stun.Class.REQUEST)<EOL>request.attributes['<STR_LIT>'] = <NUM_LIT:0><EOL>await self.request(request)<EOL>logger.info('<STR_LIT>', self.relayed_address)<EOL>if self.receiver:<EOL><INDENT>self.receiver.connection_lost(None)<EOL><DEDENT>
|
Delete the TURN allocation.
|
f3441:c1:m5
|
async def refresh(self):
|
while True:<EOL><INDENT>await asyncio.sleep(<NUM_LIT:5>/<NUM_LIT:6> * self.lifetime)<EOL>request = stun.Message(message_method=stun.Method.REFRESH,<EOL>message_class=stun.Class.REQUEST)<EOL>request.attributes['<STR_LIT>'] = self.lifetime<EOL>await self.request(request)<EOL>logger.info('<STR_LIT>', self.relayed_address)<EOL><DEDENT>
|
Periodically refresh the TURN allocation.
|
f3441:c1:m6
|
async def request(self, request):
|
assert request.transaction_id not in self.transactions<EOL>if self.integrity_key:<EOL><INDENT>self.__add_authentication(request)<EOL><DEDENT>transaction = stun.Transaction(request, self.server, self)<EOL>self.transactions[request.transaction_id] = transaction<EOL>try:<EOL><INDENT>return await transaction.run()<EOL><DEDENT>finally:<EOL><INDENT>del self.transactions[request.transaction_id]<EOL><DEDENT>
|
Execute a STUN transaction and return the response.
|
f3441:c1:m7
|
async def send_data(self, data, addr):
|
channel = self.peer_to_channel.get(addr)<EOL>if channel is None:<EOL><INDENT>channel = self.channel_number<EOL>self.channel_number += <NUM_LIT:1><EOL>self.channel_to_peer[channel] = addr<EOL>self.peer_to_channel[addr] = channel<EOL>await self.channel_bind(channel, addr)<EOL><DEDENT>header = struct.pack('<STR_LIT>', channel, len(data))<EOL>self._send(header + data)<EOL>
|
Send data to a remote host via the TURN server.
|
f3441:c1:m8
|
def send_stun(self, message, addr):
|
logger.debug('<STR_LIT>', self, addr, message)<EOL>self._send(bytes(message))<EOL>
|
Send a STUN message to the TURN server.
|
f3441:c1:m9
|
def close(self):
|
asyncio.ensure_future(self.__inner_protocol.delete())<EOL>
|
Close the transport.
After the TURN allocation has been deleted, the protocol's
`connection_lost()` method will be called with None as its argument.
|
f3441:c4:m1
|
def get_extra_info(self, name, default=None):
|
if name == '<STR_LIT>':<EOL><INDENT>return self.__inner_protocol.transport.get_extra_info('<STR_LIT>')<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return self.__relayed_address<EOL><DEDENT>return default<EOL>
|
Return optional transport information.
- `'related_address'`: the related address
- `'sockname'`: the relayed address
|
f3441:c4:m2
|
def sendto(self, data, addr):
|
asyncio.ensure_future(self.__inner_protocol.send_data(data, addr))<EOL>
|
Sends the `data` bytes to the remote peer given `addr`.
This will bind a TURN channel as necessary.
|
f3441:c4:m3
|
def candidate_pair_priority(local, remote, ice_controlling):
|
G = ice_controlling and local.priority or remote.priority<EOL>D = ice_controlling and remote.priority or local.priority<EOL>return (<NUM_LIT:1> << <NUM_LIT:32>) * min(G, D) + <NUM_LIT:2> * max(G, D) + (G > D and <NUM_LIT:1> or <NUM_LIT:0>)<EOL>
|
See RFC 5245 - 5.7.2. Computing Pair Priority and Ordering Pairs
|
f3444:m0
|
def get_host_addresses(use_ipv4, use_ipv6):
|
addresses = []<EOL>for interface in netifaces.interfaces():<EOL><INDENT>ifaddresses = netifaces.ifaddresses(interface)<EOL>for address in ifaddresses.get(socket.AF_INET, []):<EOL><INDENT>if use_ipv4 and address['<STR_LIT>'] != '<STR_LIT:127.0.0.1>':<EOL><INDENT>addresses.append(address['<STR_LIT>'])<EOL><DEDENT><DEDENT>for address in ifaddresses.get(socket.AF_INET6, []):<EOL><INDENT>if use_ipv6 and address['<STR_LIT>'] != '<STR_LIT>' and '<STR_LIT:%>' not in address['<STR_LIT>']:<EOL><INDENT>addresses.append(address['<STR_LIT>'])<EOL><DEDENT><DEDENT><DEDENT>return addresses<EOL>
|
Get local IP addresses.
|
f3444:m1
|
async def server_reflexive_candidate(protocol, stun_server):
|
<EOL>loop = asyncio.get_event_loop()<EOL>stun_server = (<EOL>await loop.run_in_executor(None, socket.gethostbyname, stun_server[<NUM_LIT:0>]),<EOL>stun_server[<NUM_LIT:1>])<EOL>request = stun.Message(message_method=stun.Method.BINDING,<EOL>message_class=stun.Class.REQUEST)<EOL>response, _ = await protocol.request(request, stun_server)<EOL>local_candidate = protocol.local_candidate<EOL>return Candidate(<EOL>foundation=candidate_foundation('<STR_LIT>', '<STR_LIT>', local_candidate.host),<EOL>component=local_candidate.component,<EOL>transport=local_candidate.transport,<EOL>priority=candidate_priority(local_candidate.component, '<STR_LIT>'),<EOL>host=response.attributes['<STR_LIT>'][<NUM_LIT:0>],<EOL>port=response.attributes['<STR_LIT>'][<NUM_LIT:1>],<EOL>type='<STR_LIT>',<EOL>related_address=local_candidate.host,<EOL>related_port=local_candidate.port)<EOL>
|
Query STUN server to obtain a server-reflexive candidate.
|
f3444:m2
|
def sort_candidate_pairs(pairs, ice_controlling):
|
def pair_priority(pair):<EOL><INDENT>return -candidate_pair_priority(pair.local_candidate,<EOL>pair.remote_candidate,<EOL>ice_controlling)<EOL><DEDENT>pairs.sort(key=pair_priority)<EOL>
|
Sort a list of candidate pairs.
|
f3444:m3
|
async def request(self, request, addr, integrity_key=None, retransmissions=None):
|
assert request.transaction_id not in self.transactions<EOL>if integrity_key is not None:<EOL><INDENT>request.add_message_integrity(integrity_key)<EOL>request.add_fingerprint()<EOL><DEDENT>transaction = stun.Transaction(request, addr, self, retransmissions=retransmissions)<EOL>transaction.integrity_key = integrity_key<EOL>self.transactions[request.transaction_id] = transaction<EOL>try:<EOL><INDENT>return await transaction.run()<EOL><DEDENT>finally:<EOL><INDENT>del self.transactions[request.transaction_id]<EOL><DEDENT>
|
Execute a STUN transaction and return the response.
|
f3444:c1:m6
|
def send_stun(self, message, addr):
|
self.__log_debug('<STR_LIT>', addr, message)<EOL>self.transport.sendto(bytes(message), addr)<EOL>
|
Send a STUN message.
|
f3444:c1:m8
|
@property<EOL><INDENT>def local_candidates(self):<DEDENT>
|
return self._local_candidates[:]<EOL>
|
Local candidates, automatically set by :meth:`gather_candidates`.
|
f3444:c2:m1
|
@property<EOL><INDENT>def remote_candidates(self):<DEDENT>
|
return self._remote_candidates[:]<EOL>
|
Remote candidates, which you need to set.
Assigning this attribute will automatically signal end-of-candidates.
If you will be adding more remote candidates in the future, use the
:meth:`add_remote_candidate` method instead.
|
f3444:c2:m2
|
def add_remote_candidate(self, remote_candidate):
|
if self._remote_candidates_end:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if remote_candidate is None:<EOL><INDENT>self._prune_components()<EOL>self._remote_candidates_end = True<EOL>return<EOL><DEDENT>self._remote_candidates.append(remote_candidate)<EOL>for protocol in self._protocols:<EOL><INDENT>if (protocol.local_candidate.can_pair_with(remote_candidate) and<EOL>not self._find_pair(protocol, remote_candidate)):<EOL><INDENT>pair = CandidatePair(protocol, remote_candidate)<EOL>self._check_list.append(pair)<EOL><DEDENT><DEDENT>self.sort_check_list()<EOL>
|
Add a remote candidate or signal end-of-candidates.
To signal end-of-candidates, pass `None`.
|
f3444:c2:m4
|
async def gather_candidates(self):
|
if not self._local_candidates_start:<EOL><INDENT>self._local_candidates_start = True<EOL>addresses = get_host_addresses(use_ipv4=self._use_ipv4, use_ipv6=self._use_ipv6)<EOL>for component in self._components:<EOL><INDENT>self._local_candidates += await self.get_component_candidates(<EOL>component=component,<EOL>addresses=addresses)<EOL><DEDENT>self._local_candidates_end = True<EOL><DEDENT>
|
Gather local candidates.
You **must** call this coroutine before calling :meth:`connect`.
|
f3444:c2:m5
|
def get_default_candidate(self, component):
|
for candidate in sorted(self._local_candidates, key=lambda x: x.priority):<EOL><INDENT>if candidate.component == component:<EOL><INDENT>return candidate<EOL><DEDENT><DEDENT>
|
Gets the default local candidate for the specified component.
|
f3444:c2:m6
|
async def connect(self):
|
if not self._local_candidates_end:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>if (self.remote_username is None or<EOL>self.remote_password is None):<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>for remote_candidate in self._remote_candidates:<EOL><INDENT>for protocol in self._protocols:<EOL><INDENT>if (protocol.local_candidate.can_pair_with(remote_candidate) and<EOL>not self._find_pair(protocol, remote_candidate)):<EOL><INDENT>pair = CandidatePair(protocol, remote_candidate)<EOL>self._check_list.append(pair)<EOL><DEDENT><DEDENT><DEDENT>self.sort_check_list()<EOL>self._unfreeze_initial()<EOL>for check in self._early_checks:<EOL><INDENT>self.check_incoming(*check)<EOL><DEDENT>self._early_checks = []<EOL>while True:<EOL><INDENT>if not self.check_periodic():<EOL><INDENT>break<EOL><DEDENT>await asyncio.sleep(<NUM_LIT>)<EOL><DEDENT>if self._check_list:<EOL><INDENT>res = await self._check_list_state.get()<EOL><DEDENT>else:<EOL><INDENT>res = ICE_FAILED<EOL><DEDENT>for check in self._check_list:<EOL><INDENT>if check.handle:<EOL><INDENT>check.handle.cancel()<EOL><DEDENT><DEDENT>if res != ICE_COMPLETED:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>self._query_consent_handle = asyncio.ensure_future(self.query_consent())<EOL>
|
Perform ICE handshake.
This coroutine returns if a candidate pair was successfuly nominated
and raises an exception otherwise.
|
f3444:c2:m7
|
async def close(self):
|
<EOL>if self._query_consent_handle and not self._query_consent_handle.done():<EOL><INDENT>self._query_consent_handle.cancel()<EOL>try:<EOL><INDENT>await self._query_consent_handle<EOL><DEDENT>except asyncio.CancelledError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if self._check_list and not self._check_list_done:<EOL><INDENT>await self._check_list_state.put(ICE_FAILED)<EOL><DEDENT>self._nominated.clear()<EOL>for protocol in self._protocols:<EOL><INDENT>await protocol.close()<EOL><DEDENT>self._protocols.clear()<EOL>self._local_candidates.clear()<EOL>
|
Close the connection.
|
f3444:c2:m8
|
async def recv(self):
|
data, component = await self.recvfrom()<EOL>return data<EOL>
|
Receive the next datagram.
The return value is a `bytes` object representing the data received.
If the connection is not established, a `ConnectionError` is raised.
|
f3444:c2:m9
|
async def recvfrom(self):
|
if not len(self._nominated):<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>result = await self._queue.get()<EOL>if result[<NUM_LIT:0>] is None:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>return result<EOL>
|
Receive the next datagram.
The return value is a `(bytes, component)` tuple where `bytes` is a
bytes object representing the data received and `component` is the
component on which the data was received.
If the connection is not established, a `ConnectionError` is raised.
|
f3444:c2:m10
|
async def send(self, data):
|
await self.sendto(data, <NUM_LIT:1>)<EOL>
|
Send a datagram on the first component.
If the connection is not established, a `ConnectionError` is raised.
|
f3444:c2:m11
|
async def sendto(self, data, component):
|
active_pair = self._nominated.get(component)<EOL>if active_pair:<EOL><INDENT>await active_pair.protocol.send_data(data, active_pair.remote_addr)<EOL><DEDENT>else:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>
|
Send a datagram on the specified component.
If the connection is not established, a `ConnectionError` is raised.
|
f3444:c2:m12
|
def set_selected_pair(self, component, local_foundation, remote_foundation):
|
<EOL>protocol = None<EOL>for p in self._protocols:<EOL><INDENT>if (p.local_candidate.component == component and<EOL>p.local_candidate.foundation == local_foundation):<EOL><INDENT>protocol = p<EOL>break<EOL><DEDENT><DEDENT>remote_candidate = None<EOL>for c in self._remote_candidates:<EOL><INDENT>if c.component == component and c.foundation == remote_foundation:<EOL><INDENT>remote_candidate = c<EOL><DEDENT><DEDENT>assert (protocol and remote_candidate)<EOL>self._nominated[component] = CandidatePair(protocol, remote_candidate)<EOL>
|
Force the selected candidate pair.
If the remote party does not support ICE, you should using this
instead of calling :meth:`connect`.
|
f3444:c2:m13
|
def check_incoming(self, message, addr, protocol):
|
component = protocol.local_candidate.component<EOL>remote_candidate = None<EOL>for c in self._remote_candidates:<EOL><INDENT>if c.host == addr[<NUM_LIT:0>] and c.port == addr[<NUM_LIT:1>]:<EOL><INDENT>remote_candidate = c<EOL>assert remote_candidate.component == component<EOL>break<EOL><DEDENT><DEDENT>if remote_candidate is None:<EOL><INDENT>remote_candidate = Candidate(<EOL>foundation=random_string(<NUM_LIT:10>),<EOL>component=component,<EOL>transport='<STR_LIT>',<EOL>priority=message.attributes['<STR_LIT>'],<EOL>host=addr[<NUM_LIT:0>],<EOL>port=addr[<NUM_LIT:1>],<EOL>type='<STR_LIT>')<EOL>self._remote_candidates.append(remote_candidate)<EOL>self.__log_info('<STR_LIT>', remote_candidate)<EOL><DEDENT>pair = self._find_pair(protocol, remote_candidate)<EOL>if pair is None:<EOL><INDENT>pair = CandidatePair(protocol, remote_candidate)<EOL>pair.state = CandidatePair.State.WAITING<EOL>self._check_list.append(pair)<EOL>self.sort_check_list()<EOL><DEDENT>if pair.state in [CandidatePair.State.WAITING, CandidatePair.State.FAILED]:<EOL><INDENT>pair.handle = asyncio.ensure_future(self.check_start(pair))<EOL><DEDENT>if '<STR_LIT>' in message.attributes and not self.ice_controlling:<EOL><INDENT>pair.remote_nominated = True<EOL>if pair.state == CandidatePair.State.SUCCEEDED:<EOL><INDENT>pair.nominated = True<EOL>self.check_complete(pair)<EOL><DEDENT><DEDENT>
|
Handle a succesful incoming check.
|
f3444:c2:m16
|
async def check_start(self, pair):
|
self.check_state(pair, CandidatePair.State.IN_PROGRESS)<EOL>request = self.build_request(pair)<EOL>try:<EOL><INDENT>response, addr = await pair.protocol.request(<EOL>request, pair.remote_addr,<EOL>integrity_key=self.remote_password.encode('<STR_LIT:utf8>'))<EOL><DEDENT>except exceptions.TransactionError as exc:<EOL><INDENT>if exc.response and exc.response.attributes.get('<STR_LIT>', (None, None))[<NUM_LIT:0>] == <NUM_LIT>:<EOL><INDENT>if '<STR_LIT>' in request.attributes:<EOL><INDENT>self.switch_role(ice_controlling=False)<EOL><DEDENT>elif '<STR_LIT>' in request.attributes:<EOL><INDENT>self.switch_role(ice_controlling=True)<EOL><DEDENT>return await self.check_start(pair)<EOL><DEDENT>else:<EOL><INDENT>self.check_state(pair, CandidatePair.State.FAILED)<EOL>self.check_complete(pair)<EOL>return<EOL><DEDENT><DEDENT>if addr != pair.remote_addr:<EOL><INDENT>self.__log_info('<STR_LIT>', pair)<EOL>self.check_state(pair, CandidatePair.State.FAILED)<EOL>self.check_complete(pair)<EOL>return<EOL><DEDENT>self.check_state(pair, CandidatePair.State.SUCCEEDED)<EOL>if self.ice_controlling or pair.remote_nominated:<EOL><INDENT>pair.nominated = True<EOL><DEDENT>self.check_complete(pair)<EOL>
|
Starts a check.
|
f3444:c2:m18
|
def check_state(self, pair, state):
|
self.__log_info('<STR_LIT>', pair, pair.state, state)<EOL>pair.state = state<EOL>
|
Updates the state of a check.
|
f3444:c2:m19
|
def _find_pair(self, protocol, remote_candidate):
|
for pair in self._check_list:<EOL><INDENT>if (pair.protocol == protocol and pair.remote_candidate == remote_candidate):<EOL><INDENT>return pair<EOL><DEDENT><DEDENT>return None<EOL>
|
Find a candidate pair in the check list.
|
f3444:c2:m20
|
def _prune_components(self):
|
seen_components = set(map(lambda x: x.component, self._remote_candidates))<EOL>missing_components = self._components - seen_components<EOL>if missing_components:<EOL><INDENT>self.__log_info('<STR_LIT>' % missing_components)<EOL>self._components = seen_components<EOL><DEDENT>
|
Remove components for which the remote party did not provide any candidates.
This can only be determined after end-of-candidates.
|
f3444:c2:m22
|
async def query_consent(self):
|
failures = <NUM_LIT:0><EOL>while True:<EOL><INDENT>await asyncio.sleep(CONSENT_INTERVAL * (<NUM_LIT> + <NUM_LIT> * random.random()))<EOL>for pair in self._nominated.values():<EOL><INDENT>request = self.build_request(pair)<EOL>try:<EOL><INDENT>await pair.protocol.request(<EOL>request, pair.remote_addr,<EOL>integrity_key=self.remote_password.encode('<STR_LIT:utf8>'),<EOL>retransmissions=<NUM_LIT:0>)<EOL>failures = <NUM_LIT:0><EOL><DEDENT>except exceptions.TransactionError:<EOL><INDENT>failures += <NUM_LIT:1><EOL><DEDENT>if failures >= CONSENT_FAILURES:<EOL><INDENT>self.__log_info('<STR_LIT>')<EOL>self._query_consent_handle = None<EOL>return await self.close()<EOL><DEDENT><DEDENT><DEDENT>
|
Periodically check consent (RFC 7675).
|
f3444:c2:m23
|
def parse_message(data, integrity_key=None):
|
if len(data) < HEADER_LENGTH:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>message_type, length, cookie, transaction_id = unpack('<STR_LIT>', data[<NUM_LIT:0>:HEADER_LENGTH])<EOL>if len(data) != HEADER_LENGTH + length:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>attributes = OrderedDict()<EOL>pos = HEADER_LENGTH<EOL>while pos <= len(data) - <NUM_LIT:4>:<EOL><INDENT>attr_type, attr_len = unpack('<STR_LIT>', data[pos:pos + <NUM_LIT:4>])<EOL>v = data[pos + <NUM_LIT:4>:pos + <NUM_LIT:4> + attr_len]<EOL>pad_len = <NUM_LIT:4> * ((attr_len + <NUM_LIT:3>) // <NUM_LIT:4>) - attr_len<EOL>if attr_type in ATTRIBUTES_BY_TYPE:<EOL><INDENT>_, attr_name, attr_pack, attr_unpack = ATTRIBUTES_BY_TYPE[attr_type]<EOL>if attr_unpack == unpack_xor_address:<EOL><INDENT>attributes[attr_name] = attr_unpack(v, transaction_id=transaction_id)<EOL><DEDENT>else:<EOL><INDENT>attributes[attr_name] = attr_unpack(v)<EOL><DEDENT>if attr_name == '<STR_LIT>':<EOL><INDENT>if attributes[attr_name] != message_fingerprint(data[<NUM_LIT:0>:pos]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif attr_name == '<STR_LIT>':<EOL><INDENT>if (integrity_key is not None and<EOL>attributes[attr_name] != message_integrity(data[<NUM_LIT:0>:pos], integrity_key)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>pos += <NUM_LIT:4> + attr_len + pad_len<EOL><DEDENT>return Message(<EOL>message_method=message_type & <NUM_LIT>,<EOL>message_class=message_type & <NUM_LIT>,<EOL>transaction_id=transaction_id,<EOL>attributes=attributes)<EOL>
|
Parses a STUN message.
If the ``integrity_key`` parameter is given, the message's HMAC will be verified.
|
f3445:m22
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.