_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41000
DCSVectorizer._semsim
train
def _semsim(self, c1, c2): """ Computes the semantic similarity between two concepts. The semantic similarity is a combination of two sem sims: 1. An "explicit" sem sim metric, that is, one which is directly encoded in the WordNet graph. Here it is just Wu-Palmer similarity. 2. An "implicit" sem sim metric. See `_imp_semsim`. Note we can't use the NLTK Wu-Palmer similarity implementation because we need to incorporate the implicit sem sim, but it's fairly straightforward -- leaning on <http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html#Synset.wup_similarity>, see that for more info. Though...the formula in the paper includes an extra term in the denominator, which is wrong, so we leave it out. """ if c1 == c2: return 1. if (c1, c2) in self.concept_sims: return self.concept_sims[(c1, c2)] elif (c2, c1) in self.concept_sims: return self.concept_sims[(c2, c1)] else: need_root = c1._needs_root() subsumers = c1.lowest_common_hypernyms(c2, simulate_root=need_root) if not subsumers: # For relationships not in WordNet, fallback on just implicit semsim. return self._imp_semsim(c1, c2) subsumer = subsumers[0] depth = subsumer.max_depth() + 1 len1 = c1.shortest_path_distance(subsumer, simulate_root=need_root) len2 = c2.shortest_path_distance(subsumer, simulate_root=need_root) if len1 is None or len2 is None: # See above return self._imp_semsim(c1, c2) len1 += depth len2 += depth imp_score = self._imp_semsim(c1, c2) sim = (2.*depth + imp_score)/(len1 + len2 + imp_score) self.concept_sims[(c1, c2)] = sim return sim
python
{ "resource": "" }
q41001
DCSVectorizer._core_semantics
train
def _core_semantics(self, lex_chains, concept_weights): """ Returns the n representative lexical chains for a document. """ chain_scores = [self._score_chain(lex_chain, adj_submat, concept_weights) for lex_chain, adj_submat in lex_chains] scored_chains = zip(lex_chains, chain_scores) scored_chains = sorted(scored_chains, key=lambda x: x[1], reverse=True) thresh = (self.alpha/len(lex_chains)) * sum(chain_scores) return [chain for (chain, adj_mat), score in scored_chains if score >= thresh][:self.n_chains]
python
{ "resource": "" }
q41002
DCSVectorizer._extract_core_semantics
train
def _extract_core_semantics(self, docs): """ Extracts core semantics for a list of documents, returning them along with a list of all the concepts represented. """ all_concepts = [] doc_core_sems = [] for doc in docs: core_sems = self._process_doc(doc) doc_core_sems.append(core_sems) all_concepts += [con for con, weight in core_sems] return doc_core_sems, list(set(all_concepts))
python
{ "resource": "" }
q41003
DCSVectorizer._lexical_chains
train
def _lexical_chains(self, doc, term_concept_map): """ Builds lexical chains, as an adjacency matrix, using a disambiguated term-concept map. """ concepts = list({c for c in term_concept_map.values()}) # Build an adjacency matrix for the graph # Using the encoding: # 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge n_cons = len(concepts) adj_mat = np.zeros((n_cons, n_cons)) for i, c in enumerate(concepts): # TO DO can only do i >= j since the graph is undirected for j, c_ in enumerate(concepts): edge = 0 if c == c_: edge = 1 # TO DO when should simulate root be True? elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c in c_._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms(): edge = 3 elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms(): edge = 3 adj_mat[i,j] = edge # Group connected concepts by labels concept_labels = connected_components(adj_mat, directed=False)[1] lexical_chains = [([], []) for i in range(max(concept_labels) + 1)] for i, concept in enumerate(concepts): label = concept_labels[i] lexical_chains[label][0].append(concept) lexical_chains[label][1].append(i) # Return the lexical chains as (concept list, adjacency sub-matrix) tuples return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains]
python
{ "resource": "" }
q41004
DCSVectorizer._score_chain
train
def _score_chain(self, lexical_chain, adj_submat, concept_weights): """ Computes the score for a lexical chain. """ scores = [] # Compute scores for concepts in the chain for i, c in enumerate(lexical_chain): score = concept_weights[c] * self.relation_weights[0] rel_scores = [] for j, c_ in enumerate(lexical_chain): if adj_submat[i,j] == 2: rel_scores.append(self.relation_weights[1] * concept_weights[c_]) elif adj_submat[i,j] == 3: rel_scores.append(self.relation_weights[2] * concept_weights[c_]) scores.append(score + sum(rel_scores)) # The chain's score is just the sum of its concepts' scores return sum(scores)
python
{ "resource": "" }
q41005
DCSVectorizer._weight_concepts
train
def _weight_concepts(self, tokens, term_concept_map): """ Calculates weights for concepts in a document. This is just the frequency of terms which map to a concept. """ weights = {c: 0 for c in term_concept_map.values()} for t in tokens: # Skip terms that aren't one of the PoS we used if t not in term_concept_map: continue con = term_concept_map[t] weights[con] += 1 # TO DO paper doesn't mention normalizing these weights...should we? return weights
python
{ "resource": "" }
q41006
DCSVectorizer._description
train
def _description(self, concept): """ Returns a "description" of a concept, as defined in the paper. The paper describes the description as a string, so this is a slight modification where we instead represent the definition as a list of tokens. """ if concept not in self.descriptions: lemmas = concept.lemma_names() gloss = self._gloss(concept) glosses = [self._gloss(rel) for rel in self._related(concept)] raw_desc = ' '.join(lemmas + [gloss] + glosses) desc = [w for w in raw_desc.split() if w not in stops] self.descriptions[concept] = desc return self.descriptions[concept]
python
{ "resource": "" }
q41007
DCSVectorizer._related
train
def _related(self, concept): """ Returns related concepts for a concept. """ return concept.hypernyms() + \ concept.hyponyms() + \ concept.member_meronyms() + \ concept.substance_meronyms() + \ concept.part_meronyms() + \ concept.member_holonyms() + \ concept.substance_holonyms() + \ concept.part_holonyms() + \ concept.attributes() + \ concept.also_sees() + \ concept.similar_tos()
python
{ "resource": "" }
q41008
init_tasks
train
def init_tasks(): """ Performs basic setup before any of the tasks are run. All tasks needs to run this before continuing. It only fires once. """ # Make sure exist are set if "exists" not in env: env.exists = exists if "run" not in env: env.run = run if "cd" not in env: env.cd = cd if "max_releases" not in env: env.max_releases = 5 if "public_path" in env: public_path = env.public_path.rstrip("/") env.public_path = public_path run_hook("init_tasks")
python
{ "resource": "" }
q41009
setup
train
def setup(): """ Creates shared and upload directory then fires setup to recipes. """ init_tasks() run_hook("before_setup") # Create shared folder env.run("mkdir -p %s" % (paths.get_shared_path())) env.run("chmod 755 %s" % (paths.get_shared_path())) # Create backup folder env.run("mkdir -p %s" % (paths.get_backup_path())) env.run("chmod 750 %s" % (paths.get_backup_path())) # Create uploads folder env.run("mkdir -p %s" % (paths.get_upload_path())) env.run("chmod 775 %s" % (paths.get_upload_path())) run_hook("setup") run_hook("after_setup")
python
{ "resource": "" }
q41010
deploy
train
def deploy(): """ Performs a deploy by invoking copy, then generating next release name and invoking necessary hooks. """ init_tasks() if not has_hook("copy"): return report("No copy method has been defined") if not env.exists(paths.get_shared_path()): return report("You need to run setup before running deploy") run_hook("before_deploy") release_name = int(time.time()*1000) release_path = paths.get_releases_path(release_name) env.current_release = release_path try: run_hook("copy") except Exception as e: return report("Error occurred on copy. Aborting deploy", err=e) if not env.exists(paths.get_source_path(release_name)): return report("Source path not found '%s'" % paths.get_source_path(release_name)) try: run_hook("deploy") except Exception as e: message = "Error occurred on deploy, starting rollback..." logger.error(message) logger.error(e) run_task("rollback") return report("Error occurred on deploy") # Symlink current folder paths.symlink(paths.get_source_path(release_name), paths.get_current_path()) # Clean older releases if "max_releases" in env: cleanup_releases(int(env.max_releases)) run_hook("after_deploy") if "public_path" in env: paths.symlink(paths.get_source_path(release_name), env.public_path) logger.info("Deploy complete")
python
{ "resource": "" }
q41011
rollback
train
def rollback(): """ Rolls back to previous release """ init_tasks() run_hook("before_rollback") # Remove current version current_release = paths.get_current_release_path() if current_release: env.run("rm -rf %s" % current_release) # Restore previous version old_release = paths.get_current_release_name() if old_release: paths.symlink(paths.get_source_path(old_release), paths.get_current_path()) run_hook("rollback") run_hook("after_rollback") logger.info("Rollback complete")
python
{ "resource": "" }
q41012
cleanup_releases
train
def cleanup_releases(limit=5): """ Removes older releases. """ init_tasks() max_versions = limit + 1 env.run("ls -dt %s/*/ | tail -n +%s | xargs rm -rf" % ( paths.get_releases_path(), max_versions) )
python
{ "resource": "" }
q41013
CachedView.from_db
train
def from_db(cls, db, force=False): """Make instance from database. For performance, this caches the episode types for the database. The `force` parameter can be used to bypass this. """ if force or db not in cls._cache: cls._cache[db] = cls._new_from_db(db) return cls._cache[db]
python
{ "resource": "" }
q41014
prune
train
def prune(tdocs): """ Prune terms which are totally subsumed by a phrase This could be better if it just removes the individual keywords that occur in a phrase for each time that phrase occurs. """ all_terms = set([t for toks in tdocs for t in toks]) terms = set() phrases = set() for t in all_terms: if gram_size(t) > 1: phrases.add(t) else: terms.add(t) # Identify candidates for redundant terms (1-gram terms found in a phrase) redundant = set() for t in terms: if any(t in ph for ph in phrases): redundant.add(t) # Search all documents to check that these terms occur # only in a phrase. If not, remove it as a candidate. # This could be more efficient cleared = set() for t in redundant: if any(check_term(d, term=t) for d in tdocs): cleared.add(t) redundant = redundant.difference(cleared) pruned_tdocs = [] for doc in tdocs: pruned_tdocs.append([t for t in doc if t not in redundant]) return pruned_tdocs
python
{ "resource": "" }
q41015
build_CLASS
train
def build_CLASS(prefix): """ Function to dowwnload CLASS from github and and build the library """ # latest class version and download link args = (package_basedir, package_basedir, CLASS_VERSION, os.path.abspath(prefix)) command = 'sh %s/depends/install_class.sh %s %s %s' %args ret = os.system(command) if ret != 0: raise ValueError("could not build CLASS v%s" %CLASS_VERSION)
python
{ "resource": "" }
q41016
main
train
def main(filename): """ Creates a PDF by embedding the first page from the given image and writes some text to it. @param[in] filename The source filename of the image to embed. """ # Prepare font. font_family = 'arial' font = Font(font_family, bold=True) if not font: raise RuntimeError('No font found for %r' % font_family) # Initialize PDF document on a stream. with Document('output.pdf') as document: # Initialize a new page and begin its context. with document.Page() as ctx: # Open the image to embed. with Image(filename) as embed: # Set the media box for the page to the same as the # image to embed. ctx.box = embed.box # Embed the image. ctx.embed(embed) # Write some text. ctx.add(Text('Hello World', font, size=14, x=100, y=60))
python
{ "resource": "" }
q41017
User.login
train
def login(self, password): """Login to filemail as the current user. :param password: :type password: ``str`` """ method, url = get_URL('login') payload = { 'apikey': self.config.get('apikey'), 'username': self.username, 'password': password, 'source': 'Desktop' } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41018
User.logout
train
def logout(self): """Logout of filemail and closing the session.""" # Check if all transfers are complete before logout self.transfers_complete payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } method, url = get_URL('logout') res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: self.session.cookies['logintoken'] = None return True hellraiser(res)
python
{ "resource": "" }
q41019
User.transfers_complete
train
def transfers_complete(self): """Check if all transfers are completed.""" for transfer in self.transfers: if not transfer.is_complete: error = { 'errorcode': 4003, 'errormessage': 'You must complete transfer before logout.' } hellraiser(error)
python
{ "resource": "" }
q41020
User.get_sent
train
def get_sent(self, expired=False, for_all=False): """Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects """ method, url = get_URL('get_sent') payload = { 'apikey': self.session.cookies.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getexpired': expired, 'getforallusers': for_all } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res.json())
python
{ "resource": "" }
q41021
User.get_user_info
train
def get_user_info(self, save_to_config=True): """Get user info and settings from Filemail. :param save_to_config: Whether or not to save settings to config file :type save_to_config: ``bool`` :rtype: ``dict`` containig user information and default settings. """ method, url = get_URL('user_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: settings = res.json()['user'] if save_to_config: self.config.update(settings) return settings hellraiser(res)
python
{ "resource": "" }
q41022
User.update_user_info
train
def update_user_info(self, **kwargs): """Update user info and settings. :param \*\*kwargs: settings to be merged with :func:`User.get_configfile` setings and sent to Filemail. :rtype: ``bool`` """ if kwargs: self.config.update(kwargs) method, url = get_URL('user_update') res = getattr(self.session, method)(url, params=self.config) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41023
User.get_received
train
def get_received(self, age=None, for_all=True): """Retrieve a list of transfers sent to you or your company from other people. :param age: between 1 and 90 days. :param for_all: If ``True`` will return received files for all users in the same business. (Available for business account members only). :type age: ``int`` :type for_all: ``bool`` :rtype: ``list`` of :class:`Transfer` objects. """ method, url = get_URL('received_get') if age: if not isinstance(age, int) or age < 0 or age > 90: raise FMBaseError('Age must be <int> between 0-90') past = datetime.utcnow() - timedelta(days=age) age = timegm(past.utctimetuple()) payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getForAllUsers': for_all, 'from': age } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res)
python
{ "resource": "" }
q41024
User.get_contact
train
def get_contact(self, email): """Get Filemail contact based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ contacts = self.get_contacts() for contact in contacts: if contact['email'] == email: return contact msg = 'No contact with email: "{email}" found.' raise FMBaseError(msg.format(email=email))
python
{ "resource": "" }
q41025
User.get_group
train
def get_group(self, name): """Get contact group by name :param name: name of group :type name: ``str``, ``unicode`` :rtype: ``dict`` with group data """ groups = self.get_groups() for group in groups: if group['contactgroupname'] == name: return group msg = 'No group named: "{name}" found.' raise FMBaseError(msg.format(name=name))
python
{ "resource": "" }
q41026
User.delete_group
train
def delete_group(self, name): """Delete contact group :param name: of group :type name: ``str``, ``unicode`` :rtype: ``bool`` """ group = self.get_group(name) method, url = get_URL('group_delete') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41027
User.rename_group
train
def rename_group(self, group, newname): """Rename contact group :param group: group data or name of group :param newname: of group :type group: ``str``, ``unicode``, ``dict`` :type newname: ``str``, ``unicode`` :rtype: ``bool`` """ if isinstance(group, basestring): group = self.get_contact(group) method, url = get_URL('group_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'], 'name': newname } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41028
User.add_contact_to_group
train
def add_contact_to_group(self, contact, group): """Add contact to group :param contact: name or contact object :param group: name or group object :type contact: ``str``, ``unicode``, ``dict`` :type group: ``str``, ``unicode``, ``dict`` :rtype: ``bool`` """ if isinstance(contact, basestring): contact = self.get_contact(contact) if isinstance(group, basestring): group = self.get_group(group) method, url = get_URL('contacts_add_to_group') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact['contactid'], 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41029
User.get_company_info
train
def get_company_info(self): """Get company settings from Filemail :rtype: ``dict`` with company data """ method, url = get_URL('company_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return res.json()['company'] hellraiser(res)
python
{ "resource": "" }
q41030
User.update_company
train
def update_company(self, company): """Update company settings :param company: updated settings :type company: ``dict`` :rtype: ``bool`` """ if not isinstance(company, dict): raise AttributeError('company must be a <dict>') method, url = get_URL('company_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } payload.update(company) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41031
User.get_company_user
train
def get_company_user(self, email): """Get company user based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ users = self.get_company_users() for user in users: if user['email'] == email: return user msg = 'No user with email: "{email}" associated with this company.' raise FMBaseError(msg.format(email=email))
python
{ "resource": "" }
q41032
User.company_add_user
train
def company_add_user(self, email, name, password, receiver, admin): """Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool`` """ method, url = get_URL('company_add_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'email': email, 'name': name, 'password': password, 'canreceivefiles': receiver, 'admin': admin } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41033
User.update_company_user
train
def update_company_user(self, email, userdata): """Update a company users settings :param email: current email address of user :param userdata: updated settings :type email: ``str`` or ``unicode`` :type userdata: ``dict`` :rtype: ``bool`` """ if not isinstance(userdata, dict): raise AttributeError('userdata must be a <dict>') method, url = get_URL('company_update_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'useremail': email } payload.update(userdata) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
{ "resource": "" }
q41034
home
train
def home(request): """This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html""" cage_list = Animal.objects.values("Cage").distinct() cage_list_current = cage_list.filter(Alive=True) animal_list = Animal.objects.all() animal_list_current = animal_list.filter(Alive=True) strain_list = animal_list.values("Strain").distinct() strain_list_current = animal_list_current.values("Strain").distinct() return render(request, 'home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current})
python
{ "resource": "" }
q41035
PrintTable.col_widths
train
def col_widths(self): # type: () -> defaultdict """Get MAX possible width of each column in the table. :return: defaultdict """ _widths = defaultdict(int) all_rows = [self.headers] all_rows.extend(self._rows) for row in all_rows: for idx, col in enumerate(row): _col_l = len(col) if _col_l > _widths[idx]: _widths[idx] = _col_l return _widths
python
{ "resource": "" }
q41036
PrintTable._marker_line
train
def _marker_line(self): # type: () -> str """Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str """ output = '' for col in sorted(self.col_widths): line = self.COLUMN_MARK + (self.DASH * (self.col_widths[col] + self.PADDING * 2)) output += line output += self.COLUMN_MARK + '\n' return output
python
{ "resource": "" }
q41037
PrintTable._row_to_str
train
def _row_to_str(self, row): # type: (List[str]) -> str """Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str """ _row_text = '' for col, width in self.col_widths.items(): _row_text += self.COLUMN_SEP l_pad, r_pad = self._split_int(width - len(row[col])) _row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING), row[col], ' ' * (r_pad + self.PADDING)) _row_text += self.COLUMN_SEP + '\n' return _row_text
python
{ "resource": "" }
q41038
PrintTable._table_to_str
train
def _table_to_str(self): # type: () -> str """Return single formatted table string. :return: str """ _marker_line = self._marker_line() output = _marker_line + self._row_to_str(self.headers) + _marker_line for row in self._rows: output += self._row_to_str(row) output += _marker_line return output
python
{ "resource": "" }
q41039
sift4
train
def sift4(s1, s2, max_offset=5): """ This is an implementation of general Sift4. """ t1, t2 = list(s1), list(s2) l1, l2 = len(t1), len(t2) if not s1: return l2 if not s2: return l1 # Cursors for each string c1, c2 = 0, 0 # Largest common subsequence lcss = 0 # Local common substring local_cs = 0 # Number of transpositions ('ab' vs 'ba') trans = 0 # Offset pair array, for computing the transpositions offsets = [] while c1 < l1 and c2 < l2: if t1[c1] == t2[c2]: local_cs += 1 # Check if current match is a transposition is_trans = False i = 0 while i < len(offsets): ofs = offsets[i] if c1 <= ofs['c1'] or c2 <= ofs['c2']: is_trans = abs(c2-c1) >= abs(ofs['c2'] - ofs['c1']) if is_trans: trans += 1 elif not ofs['trans']: ofs['trans'] = True trans += 1 break elif c1 > ofs['c2'] and c2 > ofs['c1']: del offsets[i] else: i += 1 offsets.append({ 'c1': c1, 'c2': c2, 'trans': is_trans }) else: lcss += local_cs local_cs = 0 if c1 != c2: c1 = c2 = min(c1, c2) for i in range(max_offset): if c1 + i >= l1 and c2 + i >= l2: break elif c1 + i < l1 and s1[c1+i] == s2[c2]: c1 += i - 1 c2 -= 1 break elif c2 + i < l2 and s1[c1] == s2[c2 + i]: c2 += i - 1 c1 -= 1 break c1 += 1 c2 += 1 if c1 >= l1 or c2 >= l2: lcss += local_cs local_cs = 0 c1 = c2 = min(c1, c2) lcss += local_cs return round(max(l1, l2) - lcss + trans)
python
{ "resource": "" }
q41040
Cohort.save
train
def save(self, *args, **kwargs): '''The slug field is auto-populated during the save from the name field.''' if not self.id: self.slug = slugify(self.name) super(Cohort, self).save(*args, **kwargs)
python
{ "resource": "" }
q41041
FilePicker.pick
train
def pick(self, filenames: Iterable[str]) -> str: """Pick one filename based on priority rules.""" filenames = sorted(filenames, reverse=True) # e.g., v2 before v1 for priority in sorted(self.rules.keys(), reverse=True): patterns = self.rules[priority] for pattern in patterns: for filename in filenames: if pattern.search(filename): return filename return filenames[0]
python
{ "resource": "" }
q41042
count_tf
train
def count_tf(tokens_stream): """ Count term frequencies for a single file. """ tf = defaultdict(int) for tokens in tokens_stream: for token in tokens: tf[token] += 1 return tf
python
{ "resource": "" }
q41043
Cluster.validate_config
train
def validate_config(cls, config): """ Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available. """ if "discovery" not in config: raise ValueError("No discovery method defined.") installed_balancers = Balancer.get_installed_classes().keys() if not any([balancer in config for balancer in installed_balancers]): raise ValueError("No available balancer configs defined.")
python
{ "resource": "" }
q41044
Cluster.apply_config
train
def apply_config(self, config): """ Sets the `discovery` and `meta_cluster` attributes, as well as the configured + available balancer attributes from a given validated config. """ self.discovery = config["discovery"] self.meta_cluster = config.get("meta_cluster") for balancer_name in Balancer.get_installed_classes().keys(): if balancer_name in config: setattr(self, balancer_name, config[balancer_name])
python
{ "resource": "" }
q41045
command
train
def command(state, args): """Search Animanager database.""" args = parser.parse_args(args[1:]) where_queries = [] params = {} if args.watching or args.available: where_queries.append('regexp IS NOT NULL') if args.query: where_queries.append('title LIKE :title') params['title'] = _compile_sql_query(args.query) if not where_queries: print('Must include at least one filter.') return where_query = ' AND '.join(where_queries) logger.debug('Search where %s with params %s', where_query, params) results = list() all_files = [ filename for filename in _find_files(state.config['anime'].getpath('watchdir')) if _is_video(filename) ] for anime in query.select.select(state.db, where_query, params): logger.debug('For anime %s with regexp %s', anime.aid, anime.regexp) if anime.regexp is not None: anime_files = AnimeFiles(anime.regexp, all_files) logger.debug('Found files %s', anime_files.filenames) query.files.cache_files(state.db, anime.aid, anime_files) available = anime_files.available_string(anime.watched_episodes) else: available = '' if not args.available or available: results.append(( anime.aid, anime.title, anime.type, '{}/{}'.format(anime.watched_episodes, anime.episodecount), 'yes' if anime.complete else '', available, )) state.results['db'].set(results) state.results['db'].print()
python
{ "resource": "" }
q41046
_is_video
train
def _is_video(filepath) -> bool: """Check filename extension to see if it's a video file.""" if os.path.exists(filepath): # Could be broken symlink extension = os.path.splitext(filepath)[1] return extension in ('.mkv', '.mp4', '.avi') else: return False
python
{ "resource": "" }
q41047
_find_files
train
def _find_files(dirpath: str) -> 'Iterable[str]': """Find files recursively. Returns a generator that yields paths in no particular order. """ for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
python
{ "resource": "" }
q41048
Peer.current
train
def current(cls): """ Helper method for getting the current peer of whichever host we're running on. """ name = socket.getfqdn() ip = socket.gethostbyname(name) return cls(name, ip)
python
{ "resource": "" }
q41049
Peer.serialize
train
def serialize(self): """ Serializes the Peer data as a simple JSON map string. """ return json.dumps({ "name": self.name, "ip": self.ip, "port": self.port }, sort_keys=True)
python
{ "resource": "" }
q41050
Peer.deserialize
train
def deserialize(cls, value): """ Generates a Peer instance via a JSON string of the sort generated by `Peer.deserialize`. The `name` and `ip` keys are required to be present in the JSON map, if the `port` key is not present the default is used. """ parsed = json.loads(value) if "name" not in parsed: raise ValueError("No peer name.") if "ip" not in parsed: raise ValueError("No peer IP.") if "port" not in parsed: parsed["port"] = DEFAULT_PEER_PORT return cls(parsed["name"], parsed["ip"], parsed["port"])
python
{ "resource": "" }
q41051
CryoEncoder.default
train
def default(self, obj): """ if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded """ if isinstance(obj, np.ndarray): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) elif sps.issparse(obj): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape, indices=obj.indices, indptr=obj.indptr) elif hasattr(obj, '__dict__'): return obj.__dict__ # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
python
{ "resource": "" }
q41052
Package._extract_meta_value
train
def _extract_meta_value(self, tag): # type: (str, List[str]) -> str """Find a target value by `tag` from given meta data. :param tag: str :param meta_data: list :return: str """ try: return [l[len(tag):] for l in self.meta_data if l.startswith(tag)][0] except IndexError: return '* Not Found *'
python
{ "resource": "" }
q41053
get_markup_choices
train
def get_markup_choices(): """ Receives available markup options as list. """ available_reader_list = [] module_dir = os.path.realpath(os.path.dirname(__file__)) module_names = filter( lambda x: x.endswith('_reader.py'), os.listdir(module_dir)) for module_name in module_names: markup = module_name.split('_')[0] reader = get_reader(markup=markup) if reader.enabled is True: available_reader_list.append((markup, reader.name)) return available_reader_list
python
{ "resource": "" }
q41054
ZookeeperDiscovery.apply_config
train
def apply_config(self, config): """ Takes the given config dictionary and sets the hosts and base_path attributes. If the kazoo client connection is established, its hosts list is updated to the newly configured value. """ self.hosts = config["hosts"] old_base_path = self.base_path self.base_path = config["path"] if not self.connected.is_set(): return logger.debug("Setting ZK hosts to %s", self.hosts) self.client.set_hosts(",".join(self.hosts)) if old_base_path and old_base_path != self.base_path: logger.critical( "ZNode base path changed!" + " Lighthouse will need to be restarted" + " to watch the right znodes" )
python
{ "resource": "" }
q41055
ZookeeperDiscovery.connect
train
def connect(self): """ Creates a new KazooClient and establishes a connection. Passes the client the `handle_connection_change` method as a callback to fire when the Zookeeper connection changes state. """ self.client = client.KazooClient(hosts=",".join(self.hosts)) self.client.add_listener(self.handle_connection_change) self.client.start_async()
python
{ "resource": "" }
q41056
ZookeeperDiscovery.disconnect
train
def disconnect(self): """ Stops and closes the kazoo connection. """ logger.info("Disconnecting from Zookeeper.") self.client.stop() self.client.close()
python
{ "resource": "" }
q41057
ZookeeperDiscovery.handle_connection_change
train
def handle_connection_change(self, state): """ Callback for handling changes in the kazoo client's connection state. If the connection becomes lost or suspended, the `connected` Event is cleared. Other given states imply that the connection is established so `connected` is set. """ if state == client.KazooState.LOST: if not self.shutdown.is_set(): logger.info("Zookeeper session lost!") self.connected.clear() elif state == client.KazooState.SUSPENDED: logger.info("Zookeeper connection suspended!") self.connected.clear() else: logger.info("Zookeeper connection (re)established.") self.connected.set()
python
{ "resource": "" }
q41058
ZookeeperDiscovery.start_watching
train
def start_watching(self, cluster, callback): """ Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down. """ logger.debug("starting to watch cluster %s", cluster.name) wait_on_any(self.connected, self.shutdown) logger.debug("done waiting on (connected, shutdown)") znode_path = "/".join([self.base_path, cluster.name]) self.stop_events[znode_path] = threading.Event() def should_stop(): return ( znode_path not in self.stop_events or self.stop_events[znode_path].is_set() or self.shutdown.is_set() ) while not should_stop(): try: if self.client.exists(znode_path): break except exceptions.ConnectionClosedError: break wait_on_any( self.stop_events[znode_path], self.shutdown, timeout=NO_NODE_INTERVAL ) logger.debug("setting up ChildrenWatch for %s", znode_path) @self.client.ChildrenWatch(znode_path) def watch(children): if should_stop(): return False logger.debug("znode children changed! (%s)", znode_path) new_nodes = [] for child in children: child_path = "/".join([znode_path, child]) try: new_nodes.append( Node.deserialize(self.client.get(child_path)[0]) ) except ValueError: logger.exception("Invalid node at path '%s'", child) continue cluster.nodes = new_nodes callback()
python
{ "resource": "" }
q41059
ZookeeperDiscovery.stop_watching
train
def stop_watching(self, cluster): """ Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`. """ znode_path = "/".join([self.base_path, cluster.name]) if znode_path in self.stop_events: self.stop_events[znode_path].set()
python
{ "resource": "" }
q41060
ZookeeperDiscovery.report_down
train
def report_down(self, service, port): """ Reports the given service's present node as down by deleting the node's znode in Zookeeper if the znode is present. Waits for the Zookeeper connection to be established before further action is taken. """ wait_on_any(self.connected, self.shutdown) node = Node.current(service, port) path = self.path_of(service, node) try: logger.debug("Deleting znode at %s", path) self.client.delete(path) except exceptions.NoNodeError: pass
python
{ "resource": "" }
q41061
ZookeeperDiscovery.path_of
train
def path_of(self, service, node): """ Helper method for determining the Zookeeper path for a given cluster member node. """ return "/".join([self.base_path, service.name, node.name])
python
{ "resource": "" }
q41062
rating_score
train
def rating_score(obj, user): """ Returns the score a user has given an object """ if not user.is_authenticated() or not hasattr(obj, '_ratings_field'): return False ratings_descriptor = getattr(obj, obj._ratings_field) try: rating = ratings_descriptor.get(user=user).score except ratings_descriptor.model.DoesNotExist: rating = None return rating
python
{ "resource": "" }
q41063
rate_url
train
def rate_url(obj, score=1): """ Generates a link to "rate" the given object with the provided score - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_rate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, score, ))
python
{ "resource": "" }
q41064
unrate_url
train
def unrate_url(obj): """ Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_unrate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, ))
python
{ "resource": "" }
q41065
command
train
def command(state, args): """Reset anime watched episodes.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') query.update.reset(state.db, aid, args.episode)
python
{ "resource": "" }
q41066
cancel_job
train
def cancel_job(agent, project_name, job_id): """ cancel a job. If the job is pending, it will be removed. If the job is running, it will be terminated. """ prevstate = agent.cancel(project_name, job_id)['prevstate'] if prevstate == 'pending': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,))
python
{ "resource": "" }
q41067
get_job_amounts
train
def get_job_amounts(agent, project_name, spider_name=None): """ Get amounts that pending job amount, running job amount, finished job amount. """ job_list = agent.get_job_list(project_name) pending_job_list = job_list['pending'] running_job_list = job_list['running'] finished_job_list = job_list['finished'] job_amounts = {} if spider_name is None: job_amounts['pending'] = len(pending_job_list) job_amounts['running'] = len(running_job_list) job_amounts['finished'] = len(finished_job_list) else: job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name]) job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name]) job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name]) return job_amounts
python
{ "resource": "" }
q41068
corba_name_to_string
train
def corba_name_to_string(name): '''Convert a CORBA CosNaming.Name to a string.''' parts = [] if type(name) is not list and type(name) is not tuple: raise NotCORBANameError(name) if len(name) == 0: raise NotCORBANameError(name) for nc in name: if not nc.kind: parts.append(nc.id) else: parts.append('{0}.{1}'.format(nc.id, nc.kind)) return '/'.join(parts)
python
{ "resource": "" }
q41069
Directory.reparse
train
def reparse(self): '''Reparse all children of this directory. This effectively rebuilds the tree below this node. This operation takes an unbounded time to complete; if there are a lot of objects registered below this directory's context, they will all need to be parsed. ''' self._remove_all_children() self._parse_context(self._context, self.orb)
python
{ "resource": "" }
q41070
Directory.unbind
train
def unbind(self, name): '''Unbind an object from the context represented by this directory. Warning: this is a dangerous operation. You may unlink an entire section of the tree and be unable to recover it. Be careful what you unbind. The name should be in the format used in paths. For example, 'manager.mgr' or 'ConsoleIn0.rtc'. ''' with self._mutex: id, sep, kind = name.rpartition('.') if not id: id = kind kind = '' name = CosNaming.NameComponent(id=str(id), kind=str(kind)) try: self.context.unbind([name]) except CosNaming.NamingContext.NotFound: raise exceptions.BadPathError(name)
python
{ "resource": "" }
q41071
LemmaTokenizer.tokenize
train
def tokenize(self, docs): """ Tokenizes a document, using a lemmatizer. Args: | doc (str) -- the text document to process. Returns: | list -- the list of tokens. """ if self.n_jobs == 1: return [self._tokenize(doc) for doc in docs] else: return parallel(self._tokenize, docs, self.n_jobs)
python
{ "resource": "" }
q41072
command
train
def command(state, args): """Register watching regexp for an anime.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') if args.query: # Use regexp provided by user. regexp = '.*'.join(args.query) else: # Make default regexp. title = query.select.lookup(state.db, aid, fields=['title']).title # Replace non-word, non-whitespace with whitespace. regexp = re.sub(r'[^\w\s]', ' ', title) # Split on whitespace and join with wildcard regexp. regexp = '.*?'.join(re.escape(x) for x in regexp.split()) # Append episode matching regexp. regexp = '.*?'.join(( regexp, r'\b(?P<ep>[0-9]+)(v[0-9]+)?', )) query.files.set_regexp(state.db, aid, regexp)
python
{ "resource": "" }
q41073
AlterTable.from_definition
train
def from_definition(self, table: Table, version: int): """Add all columns from the table added in the specified version""" self.table(table) self.add_columns(*table.columns.get_with_version(version)) return self
python
{ "resource": "" }
q41074
CamCrypt.keygen
train
def keygen(self, keyBitLength, rawKey): """ This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane. """ if keyBitLength not in ACCEPTABLE_KEY_LENGTHS: raise Exception("keyBitLength must be 128, 192, or 256") self.bitlen = keyBitLength if len(rawKey) <= 0 or len(rawKey) > self.bitlen/8: raise Exception("rawKey must be less than or equal to keyBitLength/8 (%d) characters long" % (self.bitlen/8)) rawKey = zero_pad(rawKey, self.bitlen/8) keytable = ctypes.create_string_buffer(TABLE_BYTE_LEN) self.ekeygen(self.bitlen, rawKey, keytable) self.keytable = keytable self.initialized = True
python
{ "resource": "" }
q41075
CamCrypt.encrypt
train
def encrypt(self, plainText): """Encrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `plainText`. code that assumed this should still work fine, but can optionally be modified to call `encrypt_block` instead. Args: plainText (str): data to encrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. Returns: encrypted data. Note that this will always be a multiple of 16 bytes long. """ encryptedResult = '' for index in range(0, len(plainText), BLOCK_SIZE): block = plainText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) encryptedResult += self.encrypt_block(block) return encryptedResult
python
{ "resource": "" }
q41076
CamCrypt.decrypt
train
def decrypt(self, cipherText): """Decrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `cipherText`. code that assumed this should still work fine, but can optionally be modified to call `decrypt_block` instead. Args: cipherText (str): data to decrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. WARNING: This is almost certainty never need to happen for correctly-encrypted data. Returns: decrypted data. Note that this will always be a multiple of 16 bytes long. If the original data was not a multiple of 16 bytes, the result will contain trailing null bytes, which can be removed with `.rstrip('\x00')` """ decryptedResult = '' for index in range(0, len(cipherText), BLOCK_SIZE): block = cipherText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) decryptedResult += self.decrypt_block(block) return decryptedResult
python
{ "resource": "" }
q41077
CamCrypt.encrypt_block
train
def encrypt_block(self, plainText): """Encrypt a 16-byte block of data. NOTE: This function was formerly called `encrypt`, but was changed when support for encrypting arbitrary-length strings was added. Args: plainText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(plainText) != BLOCK_SIZE: raise ValueError("plainText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(plainText))) cipher = ctypes.create_string_buffer(BLOCK_SIZE) self.encblock(self.bitlen, plainText, self.keytable, cipher) return cipher.raw
python
{ "resource": "" }
q41078
CamCrypt.decrypt_block
train
def decrypt_block(self, cipherText): """Decrypt a 16-byte block of data. NOTE: This function was formerly called `decrypt`, but was changed when support for decrypting arbitrary-length strings was added. Args: cipherText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(cipherText) != BLOCK_SIZE: raise ValueError("cipherText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(cipherText))) plain = ctypes.create_string_buffer(BLOCK_SIZE) self.decblock(self.bitlen, cipherText, self.keytable, plain) return plain.raw
python
{ "resource": "" }
q41079
HAProxyControl.restart
train
def restart(self): """ Performs a soft reload of the HAProxy process. """ version = self.get_version() command = [ "haproxy", "-f", self.config_file_path, "-p", self.pid_file_path ] if version and version >= (1, 5, 0): command.extend(["-L", self.peer.name]) if os.path.exists(self.pid_file_path): with open(self.pid_file_path) as fd: command.extend(["-sf", fd.read().replace("\n", "")]) try: output = subprocess.check_output(command) except subprocess.CalledProcessError as e: logger.error("Failed to restart HAProxy: %s", str(e)) return if output: logging.error("haproxy says: %s", output) logger.info("Gracefully restarted HAProxy.")
python
{ "resource": "" }
q41080
HAProxyControl.get_version
train
def get_version(self): """ Returns a tuple representing the installed HAProxy version. The value of the tuple is (<major>, <minor>, <patch>), e.g. if HAProxy version 1.5.3 is installed, this will return `(1, 5, 3)`. """ command = ["haproxy", "-v"] try: output = subprocess.check_output(command) version_line = output.split("\n")[0] except subprocess.CalledProcessError as e: logger.error("Could not get HAProxy version: %s", str(e)) return None match = version_re.match(version_line) if not match: logger.error("Could not parse version from '%s'", version_line) return None version = ( int(match.group("major")), int(match.group("minor")), int(match.group("patch")) ) logger.debug("Got HAProxy version: %s", version) return version
python
{ "resource": "" }
q41081
HAProxyControl.get_info
train
def get_info(self): """ Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results. """ info_response = self.send_command("show info") if not info_response: return {} def convert_camel_case(string): return all_cap_re.sub( r'\1_\2', first_cap_re.sub(r'\1_\2', string) ).lower() return dict( (convert_camel_case(label), value) for label, value in [ line.split(": ") for line in info_response.split("\n") ] )
python
{ "resource": "" }
q41082
HAProxyControl.get_active_nodes
train
def get_active_nodes(self): """ Returns a dictionary of lists, where the key is the name of a service and the list includes all active nodes associated with that service. """ # the -1 4 -1 args are the filters <proxy_id> <type> <server_id>, # -1 for all proxies, 4 for servers only, -1 for all servers stats_response = self.send_command("show stat -1 4 -1") if not stats_response: return [] lines = stats_response.split("\n") fields = lines.pop(0).split(",") # the first field is the service name, which we key off of so # it's not included in individual node records fields.pop(0) active_nodes = collections.defaultdict(list) for line in lines: values = line.split(",") service_name = values.pop(0) active_nodes[service_name].append( dict( (fields[i], values[i]) for i in range(len(fields)) ) ) return active_nodes
python
{ "resource": "" }
q41083
HAProxyControl.enable_node
train
def enable_node(self, service_name, node_name): """ Enables a given node name for the given service name via the "enable server" HAProxy command. """ logger.info("Enabling server %s/%s", service_name, node_name) return self.send_command( "enable server %s/%s" % (service_name, node_name) )
python
{ "resource": "" }
q41084
HAProxyControl.disable_node
train
def disable_node(self, service_name, node_name): """ Disables a given node name for the given service name via the "disable server" HAProxy command. """ logger.info("Disabling server %s/%s", service_name, node_name) return self.send_command( "disable server %s/%s" % (service_name, node_name) )
python
{ "resource": "" }
q41085
HAProxyControl.send_command
train
def send_command(self, command): """ Sends a given command to the HAProxy control socket. Returns the response from the socket as a string. If a known error response (e.g. "Permission denied.") is given then the appropriate exception is raised. """ logger.debug("Connecting to socket %s", self.socket_file_path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(self.socket_file_path) except IOError as e: if e.errno == errno.ECONNREFUSED: logger.error("Connection refused. Is HAProxy running?") return else: raise sock.sendall((command + "\n").encode()) response = b"" while True: try: chunk = sock.recv(SOCKET_BUFFER_SIZE) if chunk: response += chunk else: break except IOError as e: if e.errno not in (errno.EAGAIN, errno.EINTR): raise sock.close() return self.process_command_response(command, response)
python
{ "resource": "" }
q41086
HAProxyControl.process_command_response
train
def process_command_response(self, command, response): """ Takes an HAProxy socket command and its response and either raises an appropriate exception or returns the formatted response. """ if response.startswith(b"Unknown command."): raise UnknownCommandError(command) if response == b"Permission denied.\n": raise PermissionError(command) if response == b"No such backend.\n": raise UnknownServerError(command) response = response.decode() return response.rstrip("\n")
python
{ "resource": "" }
q41087
alphafilter
train
def alphafilter(request, queryset, template): """ Render the template with the filtered queryset """ qs_filter = {} for key in list(request.GET.keys()): if '__istartswith' in key: qs_filter[str(key)] = request.GET[key] break return render_to_response( template, {'objects': queryset.filter(**qs_filter), 'unfiltered_objects': queryset}, context_instance=RequestContext(request) )
python
{ "resource": "" }
q41088
RawVolume.to_array
train
def to_array(self, channels=2): """Return the array of multipliers for the dynamic""" if channels == 1: return self.volume_frames.reshape(-1, 1) if channels == 2: return np.tile(self.volume_frames, (2, 1)).T raise Exception( "RawVolume doesn't know what to do with %s channels" % channels)
python
{ "resource": "" }
q41089
SimpleCrawler.generate_simhash
train
def generate_simhash(self, item): """ Generate simhash based on title, description, keywords, p_texts and links_text. """ list = item['p_texts'] + item['links_text'] list.append(item['title']) list.append(item['description']) list.append(item['keywords']) return Simhash(','.join(list).strip()).hash
python
{ "resource": "" }
q41090
train_phrases
train
def train_phrases(paths, out='data/bigram_model.phrases', tokenizer=word_tokenize, **kwargs): """ Train a bigram phrase model on a list of files. """ n = 0 for path in paths: print('Counting lines for {0}...'.format(path)) n += sum(1 for line in open(path, 'r')) print('Processing {0} lines...'.format(n)) # Change to use less memory. Default is 40m. kwargs = { 'max_vocab_size': 40000000, 'threshold': 8. }.update(kwargs) print('Training bigrams...') bigram = Phrases(_phrase_doc_stream(paths, n, tokenizer=word_tokenize), **kwargs) print('Saving...') bigram.save(out)
python
{ "resource": "" }
q41091
_phrase_doc_stream
train
def _phrase_doc_stream(paths, n, tokenizer=word_tokenize): """ Generator to feed sentences to the phrase model. """ i = 0 p = Progress() for path in paths: with open(path, 'r') as f: for line in f: i += 1 p.print_progress(i/n) for sent in sent_tokenize(line.lower()): tokens = tokenizer(sent) yield tokens
python
{ "resource": "" }
q41092
_default_hashfunc
train
def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x
python
{ "resource": "" }
q41093
_default_tokenizer_func
train
def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
python
{ "resource": "" }
q41094
Simhash.simhash
train
def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content))
python
{ "resource": "" }
q41095
Simhash.is_equal
train
def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
python
{ "resource": "" }
q41096
Simhash.hamming_distance
train
def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result
python
{ "resource": "" }
q41097
_validate_date_str
train
def _validate_date_str(str_): """Validate str as a date and return string version of date""" if not str_: return None # Convert to datetime so we can validate it's a real date that exists then # convert it back to the string. try: date = datetime.strptime(str_, DATE_FMT) except ValueError: msg = 'Invalid date format, should be YYYY-MM-DD' raise argparse.ArgumentTypeError(msg) return date.strftime(DATE_FMT)
python
{ "resource": "" }
q41098
_parse_args
train
def _parse_args(): """Parse sys.argv arguments""" token_file = os.path.expanduser('~/.nikeplus_access_token') parser = argparse.ArgumentParser(description='Export NikePlus data to CSV') parser.add_argument('-t', '--token', required=False, default=None, help=('Access token for API, can also store in file %s' ' to avoid passing via command line' % (token_file))) parser.add_argument('-s', '--since', type=_validate_date_str, help=('Only process entries starting with YYYY-MM-DD ' 'and newer')) args = vars(parser.parse_args()) if args['token'] is None: try: with open(token_file, 'r') as _file: access_token = _file.read().strip() except IOError: print 'Must pass access token via command line or store in file %s' % ( token_file) sys.exit(-1) args['token'] = access_token return args
python
{ "resource": "" }
q41099
EntKeySimilarity.similarity
train
def similarity(self, d, d_): """ Compute a similarity score for two documents. Optionally pass in a `term_sim_ref` dict-like, which should be able to take `term1, term2` as args and return their similarity. """ es = set([e.name for e in d.entities]) es_ = set([e.name for e in d_.entities]) e_weight = (len(es) + len(es_) - abs(len(es) - len(es_)))/2 e_score = sum(self.idf_entity[t] for t in es & es_) toks = set(d.tokens) toks_ = set(d_.tokens) t_weight = (len(toks) + len(toks_) - abs(len(toks) - len(toks_)))/2 # If no term similarity reference is passed, # look only at surface form overlap (i.e. exact overlap) shared_toks = toks & toks_ overlap = [(t, t, self.idf[t]) for t in shared_toks] t_score = sum(self.idf[t] for t in shared_toks) if self.term_sim_ref is not None: # Double-count exact overlaps b/c we are # comparing bidirectional term pairs here t_score *= 2 for toks1, toks2 in [(toks, toks_), (toks_, toks)]: for t in toks1 - shared_toks: best_match = max(toks2, key=lambda t_: self.term_sim_ref[t, t_]) sim = self.term_sim_ref[t, best_match] t_score += sim * ((self.idf[t] + self.idf[best_match])/2) if sim > 0: overlap.append((t, best_match, sim * ((self.idf[t] + self.idf[best_match])/2))) # Adjust term weight #t_weight /= 2 t_weight = 1/t_weight if t_weight != 0 else 0 e_weight = 1/e_weight if e_weight != 0 else 0 t_score *= t_weight e_score *= e_weight if self.debug: print('\n-------------------------') print((d.id, d_.id)) print('DOC:', d.id) print('DOC:', d_.id) print('\tEntities:') print('\t', es) print('\t', es_) print('\t\tEntity overlap:', es & es_) print('\t\tEntity weight:', e_weight) print('\t\tEntity score:', e_score) print('\tTokens:') print('\t\t', toks) print('\t\t', toks_) print('\t\tToken overlap:', overlap) print('\t\tToken weight:', t_weight) print('\t\tToken score:', t_score) print('\tTotal score:', t_score + e_score) return t_score + e_score
python
{ "resource": "" }