code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def test_attr(self): <NEW_LINE> <INDENT> ev = self._ev() <NEW_LINE> self.assertRaises(AttributeError, lambda: ev.foo) <NEW_LINE> self.assertRaises(KeyError, lambda: ev['foo']) <NEW_LINE> ev.foo = 'bar' <NEW_LINE> self.assertEqual('bar', ev.foo) <NEW_LINE> self.assertEqual('bar', ev['foo']) <NEW_LINE> self.assertRaises(AttributeError, lambda: ev.bar) <NEW_LINE> self.assertRaises(KeyError, lambda: ev['bar']) <NEW_LINE> ev['bar'] = 'foo' <NEW_LINE> self.assertEqual('foo', ev.bar) <NEW_LINE> self.assertEqual('foo', ev['bar'])
Attibutes and indexed keys are equivalent.
625941bd6fb2d068a760ef8d
def daterange(start, stop, steps): <NEW_LINE> <INDENT> delta = (stop - start) / steps <NEW_LINE> current = start <NEW_LINE> while current + delta <= stop: <NEW_LINE> <INDENT> yield current, current + delta <NEW_LINE> current += delta
A generator for stepping through time.
625941bdb57a9660fec33774
def test_bytes_rw(self): <NEW_LINE> <INDENT> jobj = self.read_file("testBytes.ser") <NEW_LINE> pobj = javaobj.loads(jobj) <NEW_LINE> _logger.debug("Read bytes: %s", pobj) <NEW_LINE> self.assertEqual(pobj, "HelloWorld") <NEW_LINE> self._try_marshalling(jobj, pobj)
Reads testBytes.ser and checks the serialization process
625941bd76d4e153a657ea23
def combine_moves(board_state_val, x, y, new_x, new_y, x2, y2, new_x2, new_y2): <NEW_LINE> <INDENT> board_state = copy.deepcopy(board_state_val) <NEW_LINE> player_val = board_state[x][y] <NEW_LINE> ai_val = board_state[x2][y2] <NEW_LINE> if new_x == new_x2 and new_y == new_y2: <NEW_LINE> <INDENT> piece_type1 = board_state[x][y] <NEW_LINE> piece_type2 = board_state[x2][y2] <NEW_LINE> if piece_type1 == "p" and piece_type2 == "P": <NEW_LINE> <INDENT> board_state[x][y] = "W" <NEW_LINE> board_state[x2][y2] = "W" <NEW_LINE> <DEDENT> elif piece_type1 == "k" and piece_type2 == "K": <NEW_LINE> <INDENT> board_state[y][x] = "W" <NEW_LINE> board_state[x2][y2] = "W" <NEW_LINE> <DEDENT> elif piece_type1 == "p" and piece_type2 == "K": <NEW_LINE> <INDENT> board_state[x][y] = "W" <NEW_LINE> board_state[new_x2][new_y2] = board_state[y2][x2] <NEW_LINE> board_state[x2][y2] = "W" <NEW_LINE> <DEDENT> elif piece_type1 == "k" and piece_type2 == "P": <NEW_LINE> <INDENT> board_state[x2][y2] = "W" <NEW_LINE> board_state[new_x][new_y] = board_state[y][x] <NEW_LINE> board_state[x][y] = "W" <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> board_state[new_x][new_y] = player_val <NEW_LINE> board_state[x][y] = "W" <NEW_LINE> board_state[new_x2][new_y2] = ai_val <NEW_LINE> board_state[x2][y2] = "W" <NEW_LINE> <DEDENT> if ai_val == "P" and new_x2 == 4: <NEW_LINE> <INDENT> board_state[new_x2][new_y2] = "K" <NEW_LINE> <DEDENT> if player_val == "p" and new_x == 0: <NEW_LINE> <INDENT> board_state[new_x][new_y] = "k" <NEW_LINE> <DEDENT> return board_state
Combines two move onto a given board state without any drawing functionality Uses the rules of simultaneous movement in Apocalypse when combining the moves :param board_state_val: **multi-dimensional list** Board state :param x: **int** current x coord of the first piece to move :param y: **int** current y coord of the first piece to move :param new_x: **int** new x coord of the first piece to move :param new_y: **int** new y coord of the first piece to move :param x2: **int** current x coord of the second piece to move :param y2: **int** current y coord of the second piece to move :param new_x2: **int** new y coord of the second piece to move :param new_y2: **int** new y coord of the second piece to move :return: **multi-dimensional list** Board state with the moves combined
625941bdde87d2750b85fc82
def index_queryset(self, using=None): <NEW_LINE> <INDENT> return Test.objects.filter(pub_date__lte=datetime.datetime.now())
Used when the entire index for model is updated.
625941bd16aa5153ce36236b
def createProfile(colorSpace, colorTemp=-1): <NEW_LINE> <INDENT> if colorSpace not in ["LAB", "XYZ", "sRGB"]: <NEW_LINE> <INDENT> raise PyCMSError("Color space not supported for on-the-fly profile creation (%s)" % colorSpace) <NEW_LINE> <DEDENT> if colorSpace == "LAB": <NEW_LINE> <INDENT> if type(colorTemp) == type(5000.0): <NEW_LINE> <INDENT> colorTemp = int(colorTemp + 0.5) <NEW_LINE> <DEDENT> if type (colorTemp) != type (5000): <NEW_LINE> <INDENT> raise PyCMSError("Color temperature must be a positive integer, \"%s\" not valid" % colorTemp) <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> return core.createProfile(colorSpace, colorTemp) <NEW_LINE> <DEDENT> except (TypeError, ValueError) as v: <NEW_LINE> <INDENT> raise PyCMSError(v)
ImageCms.createProfile(colorSpace, [colorTemp]) Returns a CmsProfile class object colorSpace = string, the color space of the profile you wish to create. Currently only "LAB", "XYZ", and "sRGB" are supported. colorTemp = positive integer for the white point for the profile, in degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50 illuminant if omitted (5000k). colorTemp is ONLY applied to LAB profiles, and is ignored for XYZ and sRGB. If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised If using LAB and colorTemp != a positive integer, a PyCMSError is raised. If an error occurs while creating the profile, a PyCMSError is raised. Use this function to create common profiles on-the-fly instead of having to supply a profile on disk and knowing the path to it. It returns a normal CmsProfile object that can be passed to ImageCms.buildTransformFromOpenProfiles() to create a transform to apply to images.
625941bdb5575c28eb68def1
def load_model(self, verbose, dummy, ratio): <NEW_LINE> <INDENT> if self.initialized: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.verbose = verbose <NEW_LINE> if self.verbose: <NEW_LINE> <INDENT> print("Initializing keras model...") <NEW_LINE> <DEDENT> keras_graph = Graph() <NEW_LINE> with keras_graph.as_default(): <NEW_LINE> <INDENT> config = ConfigProto() <NEW_LINE> if ratio: <NEW_LINE> <INDENT> config.gpu_options.per_process_gpu_memory_fraction = ratio <NEW_LINE> <DEDENT> self.session = Session(config=config) <NEW_LINE> with self.session.as_default(): <NEW_LINE> <INDENT> self.model = keras.models.load_model( self.model_path, custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D}) <NEW_LINE> self.model.predict(dummy) <NEW_LINE> <DEDENT> <DEDENT> keras_graph.finalize() <NEW_LINE> self.initialized = True
Load the Keras Model
625941bd8e71fb1e9831d69d
def search(self, locationpath_string, details=False): <NEW_LINE> <INDENT> locationpath = parse(locationpath_string) <NEW_LINE> if details: <NEW_LINE> <INDENT> return [(str(x.locationpath), x.dump()) for x in self._search(locationpath)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [x.dump() for x in self._search(locationpath)]
Search target path and get json data list. Args: locationpath_string(str): XPath format search string. details(bool): Return searched path with value, default: False(value only). Returns: list: List of json data at target path (details=False). With details True, list of set like {target_path(str), dict or list at target path}. Node: This method must be implemented in RootBranch only.
625941bdec188e330fd5a697
def __init__(self, movie, ad_reel=None): <NEW_LINE> <INDENT> self.movie = movie <NEW_LINE> self.ad_reel = ad_reel
movie and ad_reel are initialised
625941bd44b2445a33931f92
def database_load(name): <NEW_LINE> <INDENT> accessfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.mdcs') <NEW_LINE> if os.path.isfile(accessfile): <NEW_LINE> <INDENT> with open(accessfile) as fp: <NEW_LINE> <INDENT> access_info = json.load(fp) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('No access info stored as ' + name) <NEW_LINE> <DEDENT> if name in access_info: <NEW_LINE> <INDENT> host = access_info[name]['host'] <NEW_LINE> user = access_info[name]['user'] <NEW_LINE> pswd = access_info[name]['pswd'] <NEW_LINE> cert = access_info[name]['cert'] <NEW_LINE> return MDCS(host, user, pswd=pswd, cert=cert) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('No access info stored as ' + name)
Loads saved database system information based on the database's assigned name. If password is not saved, a prompt will ask for it. Parameters ---------- name : str The name assigned to stored access information. Returns ------- MDCS An MDCS object with the loaded database access information.
625941bd460517430c39407f
@blueprint.route('/ajax/editor/save', methods=['GET', 'POST']) <NEW_LINE> @PageView.logged <NEW_LINE> def save_post(): <NEW_LINE> <INDENT> data = request.get_json() <NEW_LINE> path = data['path'] <NEW_LINE> prefixes = current_app.config['WEB_EDITOR_PREFIXES'] <NEW_LINE> if prefixes == []: <NEW_LINE> <INDENT> raise Exception("Web editing is not configured") <NEW_LINE> <DEDENT> if prefixes is not None: <NEW_LINE> <INDENT> if not any([path.startswith(prefix) for prefix in prefixes]): <NEW_LINE> <INDENT> return json.dumps({'msg': ("Your post path must begin with one of {}").format(prefixes), 'success': False}) <NEW_LINE> <DEDENT> <DEDENT> kp = None <NEW_LINE> if path in current_repo: <NEW_LINE> <INDENT> kp = current_repo.post(path) <NEW_LINE> if g.user.username not in kp.headers['authors'] and g.user.username not in current_repo.config.editors: <NEW_LINE> <INDENT> return json.dumps({'msg': ("Post with path {} already exists and you are not an author!" "\nPlease try a different path").format(path), 'success': False}) <NEW_LINE> <DEDENT> <DEDENT> kp = kp or KnowledgePost(path=path) <NEW_LINE> headers = {} <NEW_LINE> headers['created_at'] = datetime.strptime(data['created_at'], '%Y-%m-%d').date() <NEW_LINE> headers['updated_at'] = datetime.strptime(data['updated_at'], '%Y-%m-%d').date() <NEW_LINE> headers['title'] = data['title'] <NEW_LINE> headers['path'] = data['path'] <NEW_LINE> headers['thumbnail'] = data.get('feed_image', '') <NEW_LINE> headers['authors'] = [auth.strip() for auth in data['author']] <NEW_LINE> headers['tldr'] = data['tldr'] <NEW_LINE> headers['tags'] = [tag.strip() for tag in data.get('tags', [])] <NEW_LINE> if 'proxy' in data: <NEW_LINE> <INDENT> headers['proxy'] = data['proxy'] <NEW_LINE> <DEDENT> kp.write(urlunquote(data['markdown']), headers=headers) <NEW_LINE> current_repo.add(kp, update=True, message=headers['title']) <NEW_LINE> update_index() <NEW_LINE> return json.dumps({'path': path})
Save the post
625941bda79ad161976cc038
def rainbow(strip, wait_ms=55, iterations=1): <NEW_LINE> <INDENT> for j in range(256*iterations): <NEW_LINE> <INDENT> for i in range(strip.count): <NEW_LINE> <INDENT> strip.setPixelColor(i, wheel((i+j) & 255)) <NEW_LINE> <DEDENT> strip.show() <NEW_LINE> time.sleep(wait_ms/1000.0)
Draw rainbow that fades across all pixels at once.
625941bd38b623060ff0ace1
def get_or_create_user(self, cas_data, **overrides): <NEW_LINE> <INDENT> user_model = get_user_model() <NEW_LINE> username = cas_data['username'] <NEW_LINE> try: <NEW_LINE> <INDENT> return user_model.objects.get(username=username) <NEW_LINE> <DEDENT> except user_model.DoesNotExist: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if settings.get('auto_create_user'): <NEW_LINE> <INDENT> return self.create_user(cas_data, **overrides)
Get user. ``cas_data`` must contain a 'username' key. If the corresponding user already exists, it will be returned as is; if it doesn't, a new user record will be created and returned. .. note:: The ``CAS.auto_create_user`` setting can be set to ``False`` to disable the auto-creation of users. ``overrides`` are passed through to :meth:`create_user`.
625941bd50812a4eaa59c217
def play(self): <NEW_LINE> <INDENT> open_movies_page(self.movies)
Use fresh_tomatoes.open_movies_page to generate the page, and open it in browser.
625941bd5fcc89381b1e15b0
def coords2Bearing(self, coordsA, coordsB): <NEW_LINE> <INDENT> startLat = math.radians(float(coordsA[0])) <NEW_LINE> startLong = math.radians(float(coordsA[1])) <NEW_LINE> endLat = math.radians(float(coordsB[0])) <NEW_LINE> endLong = math.radians(float(coordsB[1])) <NEW_LINE> dLong = endLong - startLong <NEW_LINE> dPhi = math.log(math.tan(endLat / 2.0 + math.pi / 4.0) / math.tan(startLat / 2.0 + math.pi / 4.0)) <NEW_LINE> if abs(dLong) > math.pi: <NEW_LINE> <INDENT> if dLong > 0.0: <NEW_LINE> <INDENT> dLong = -(2.0 * math.pi - dLong) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dLong = (2.0 * math.pi + dLong) <NEW_LINE> <DEDENT> <DEDENT> return ((math.degrees(math.atan2(dLong, dPhi)) + 360.0) % 360.0)
Get a bearing given two sets of GPS coords, assuming A is the beginning coordinate in the line segment and B is the last coordinate received. Returns a floating point int.
625941bd377c676e9127209d
def calculate_spo2(self): <NEW_LINE> <INDENT> self.ir_ac2_sum += self.ir_ac ** 2 <NEW_LINE> self.red_ac2_sum += self.red_ac ** 2 <NEW_LINE> self.samples_recorded += 1 <NEW_LINE> if self.beats_detected == SPO2_N_BEATS: <NEW_LINE> <INDENT> self.ac_sq_ratio = 100.0 * math.log(self.red_ac2_sum/self.samples_recorded) / math.log(self.ir_ac2_sum/self.samples_recorded) <NEW_LINE> index = 0 <NEW_LINE> if self.ac_sq_ratio > 66: <NEW_LINE> <INDENT> index = self.ac_sq_ratio - 66 <NEW_LINE> <DEDENT> elif self.ac_sq_ratio > 50: <NEW_LINE> <INDENT> index = self.ac_sq_ratio - 50 <NEW_LINE> <DEDENT> self.samples_recorded = 0 <NEW_LINE> self.ir_ac2_sum = 0.0 <NEW_LINE> self.red_ac2_sum = 0.0 <NEW_LINE> self.beats_detected = 0.0 <NEW_LINE> self.spo2 = 0.0 <NEW_LINE> self.spo2 = spo2_lut[int(index)] <NEW_LINE> <DEDENT> return self.spo2
Calculates the SPO2 value (Not sure how reliable this is) Source: https://github.com/oxullo/Arduino-MAX30100
625941bd2eb69b55b151c79f
def run_kmeans(term_freq_matrix, num_clusters, dist_metric, term_cond='centroids', num_iter=None): <NEW_LINE> <INDENT> centroids = term_freq_matrix[ ( numpy.random.choice(term_freq_matrix.shape[0], num_clusters, False) ), :] <NEW_LINE> iteration = 0 <NEW_LINE> terminate = False <NEW_LINE> assigned_clusters = None <NEW_LINE> while not terminate: <NEW_LINE> <INDENT> iteration += 1 <NEW_LINE> prev_centroid = centroids <NEW_LINE> distances = get_center_distances(term_freq_matrix, centroids, dist_metric) <NEW_LINE> assigned_clusters = numpy.array([dist.argmin() for dist in distances]) <NEW_LINE> centroids = get_cluster_centers(term_freq_matrix, assigned_clusters, num_clusters) <NEW_LINE> if iteration % 10 == 0: <NEW_LINE> <INDENT> logger.debug("Finished iteration {}".format(iteration)) <NEW_LINE> <DEDENT> if (centroids != prev_centroid).nnz == 0: <NEW_LINE> <INDENT> terminate = True <NEW_LINE> <DEDENT> <DEDENT> return assigned_clusters, iteration
Performs k means clustering on the term frequency matrix :param csr_matrix term_freq_matrix: the term frequency matrix :param int num_clusters: the number of article clusters :param str dist_metric: the distance metric to use (`euclidean`, `cosine`, or `jaccard`) :param str term_cond: the termination condition (`centroids`, `sse`, `iter`) - `centroids`: terminate when there is no change in centroid position - `sse`: terminate when the SSE value increases in the next iteration - `iter`: terminate when the maximum preset value of iterations is complete :param int num_iter: the number of iterations to terminate at if ``term_cond`` is set to `iter` :return: the cluster assignments, number of iterations taken to complete :rtype: numpy.ndarray, int
625941bd6fece00bbac2d62f
def __ne__(self, other: 'ConfigCACors') -> bool: <NEW_LINE> <INDENT> return not self == other
Return `true` when self and other are not equal, false otherwise.
625941bd287bf620b61d395a
def on_ffmpeg(self, event): <NEW_LINE> <INDENT> with wx.FileDialog(self, _("Choose the {} " "executable").format(self.ffmpeg), "", "", f"ffmpeg binary (*{self.ffmpeg})|*{self.ffmpeg}| " f"All files (*.*)|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as dlgfile: <NEW_LINE> <INDENT> if dlgfile.ShowModal() == wx.ID_OK: <NEW_LINE> <INDENT> if os.path.basename(dlgfile.GetPath()) == self.ffmpeg: <NEW_LINE> <INDENT> self.ffmpegTxt.Clear() <NEW_LINE> ffmpegpath = PageThree.GETPATH(dlgfile.GetPath()) <NEW_LINE> self.ffmpegTxt.write(ffmpegpath) <NEW_LINE> self.parent.ffmpeg = ffmpegpath
Open filedialog to locate ffmpeg executable
625941bd498bea3a759b99a3
def key_pressed(self, key: str): <NEW_LINE> <INDENT> super().key_pressed(key) <NEW_LINE> if self.stage == Stage.character_selection: <NEW_LINE> <INDENT> if key == "KEY_RIGHT": <NEW_LINE> <INDENT> self.monsters[0].set_selected(False) <NEW_LINE> self.rotate_monsters(1) <NEW_LINE> self.monsters[0].set_selected(True) <NEW_LINE> <DEDENT> elif key == "KEY_LEFT": <NEW_LINE> <INDENT> self.monsters[0].set_selected(False) <NEW_LINE> self.rotate_monsters(-1) <NEW_LINE> self.monsters[0].set_selected(True) <NEW_LINE> <DEDENT> elif key == "\n" or key == " ": <NEW_LINE> <INDENT> self.select_monster(self.currently_selecting, self.monsters[0]) <NEW_LINE> <DEDENT> <DEDENT> elif self.stage == Stage.restart: <NEW_LINE> <INDENT> if key == "KEY_RIGHT": <NEW_LINE> <INDENT> self.restart_dialog.next_selection() <NEW_LINE> <DEDENT> if key == "KEY_LEFT": <NEW_LINE> <INDENT> self.restart_dialog.next_selection() <NEW_LINE> <DEDENT> if key == "\n" or key == " ": <NEW_LINE> <INDENT> if self.restart_dialog.selection == "Yes": <NEW_LINE> <INDENT> self.set_stage(Stage.character_selection) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> menu_scene = Scenes.Menu.MenuScene.MenuScene() <NEW_LINE> self.change_scene(menu_scene)
If we're in character selection, check move selection right or left based on user input. If we're in restart section, move selection in the restart dialog. :param key: Key that the user pressed. :return: None
625941bd7c178a314d6ef34d
def test_successful(self): <NEW_LINE> <INDENT> url = '/%s/jobs/' % self.api <NEW_LINE> response = self.client.generic('GET', url) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) <NEW_LINE> result = json.loads(response.content) <NEW_LINE> self.assertEqual(len(result['results']), 3) <NEW_LINE> for entry in result['results']: <NEW_LINE> <INDENT> expected = None <NEW_LINE> if entry['id'] == self.job1.id: <NEW_LINE> <INDENT> expected = self.job1 <NEW_LINE> <DEDENT> elif entry['id'] == self.job2.id: <NEW_LINE> <INDENT> expected = self.job2 <NEW_LINE> <DEDENT> elif entry['id'] == self.job3.id: <NEW_LINE> <INDENT> expected = self.job3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.fail('Found unexpected result: %s' % entry['id']) <NEW_LINE> <DEDENT> self.assertEqual(entry['job_type']['name'], expected.job_type.name) <NEW_LINE> self.assertEqual(entry['job_type_rev']['job_type']['id'], expected.job_type.id) <NEW_LINE> self.assertEqual(entry['is_superseded'], expected.is_superseded)
Tests successfully calling the jobs view.
625941bdfff4ab517eb2f32d
def set_scint_fraction(self, isotope, fraction): <NEW_LINE> <INDENT> if isotope in self._fractions: <NEW_LINE> <INDENT> self._fractions[isotope] = fraction <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise("Isotope not part of the Te set.")
Set the fraction of an isotope in the scintillator.
625941bd9c8ee82313fbb668
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Requires: Python 3+") <NEW_LINE> def test_dictionary_nested(): <NEW_LINE> <INDENT> encoded = bencode({'foo': 42, 'bar': {'sketch': 'parrot', 'foobar': 23}}) <NEW_LINE> assert encoded == 'd3:bard6:foobari23e6:sketch6:parrote3:fooi42ee'.encode('utf-8')
Test the handling of nested dictionaries.
625941bd3317a56b86939b59
def remove_chroms(inbam, outbam, rmchroms, log): <NEW_LINE> <INDENT> treatment = AlignmentFile(inbam, 'rb') <NEW_LINE> header = treatment.header <NEW_LINE> new_chroms = [] <NEW_LINE> chrnames = [] <NEW_LINE> tid_map = [-1 for i in range(len(header['SQ']))] <NEW_LINE> N = 0 <NEW_LINE> chr_to_remove_reason = {} <NEW_LINE> for i, seq in enumerate(header['SQ']): <NEW_LINE> <INDENT> keep = True <NEW_LINE> for chrom in rmchroms: <NEW_LINE> <INDENT> if chrom in seq['SN']: <NEW_LINE> <INDENT> keep = False <NEW_LINE> chr_to_remove_reason[seq['SN']] = chrom <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if keep: <NEW_LINE> <INDENT> tid_map[i] = N <NEW_LINE> N += 1 <NEW_LINE> new_chroms.append(seq) <NEW_LINE> chrnames.append(seq['SN']) <NEW_LINE> <DEDENT> <DEDENT> new_header = {'SQ': new_chroms} <NEW_LINE> bam_writer = AlignmentFile(outbam, 'wb', header=new_header) <NEW_LINE> log_content = {chrom: 0 for chrom in rmchroms} <NEW_LINE> log_content['remaining'] = 0 <NEW_LINE> log_content['unmapped'] = 0 <NEW_LINE> log_content['total'] = 0 <NEW_LINE> for aln in treatment.fetch(until_eof=True): <NEW_LINE> <INDENT> log_content['total'] += 1 <NEW_LINE> if aln.is_unmapped: <NEW_LINE> <INDENT> log_content['unmapped'] += 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> if aln.reference_name in chrnames: <NEW_LINE> <INDENT> aln.reference_id = tid_map[aln.reference_id] <NEW_LINE> if aln.is_paired and aln.is_proper_pair: <NEW_LINE> <INDENT> aln.next_reference_id = tid_map[aln.next_reference_id] <NEW_LINE> <DEDENT> bam_writer.write(aln) <NEW_LINE> log_content['remaining'] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log_content[chr_to_remove_reason[aln.reference_name]] += 1 <NEW_LINE> <DEDENT> <DEDENT> bam_writer.close() <NEW_LINE> treatment.close() <NEW_LINE> with open(log, 'w') as f: <NEW_LINE> <INDENT> f.write('Readgroup\tcounts\n') <NEW_LINE> for icnt in log_content: <NEW_LINE> <INDENT> f.write('{}\t{}\n'.format(icnt, log_content[icnt]))
This function takes a bam-file and outputs a bam-file in which the specified chromosomes have been removed. The function searches for matching chromosomes using regular expressions. For example, rmchroms=['chrM', '_random'] would remove 'chrM' as well as all random chromsomes. E.g. chr1_KI270706v1_random.
625941bd45492302aab5e1b4
def getX(self): <NEW_LINE> <INDENT> return self.x
Returns the x-coordinate of Alien
625941bd5fc7496912cc3871
def get_file_list(dir): <NEW_LINE> <INDENT> rtn_list = [] <NEW_LINE> num_files = sum( (len(file_list) for _, _, file_list in os.walk(dir)) ) <NEW_LINE> logger.info('Scanning directory %s' % dir) <NEW_LINE> logger.info('Files to scan: %d' % num_files) <NEW_LINE> n = 0 <NEW_LINE> for dir_name, subdir_list, file_list in os.walk(dir): <NEW_LINE> <INDENT> for fname in file_list: <NEW_LINE> <INDENT> abs_fname = os.path.abspath(os.path.join(dir_name, fname)) <NEW_LINE> fstat = os.stat(abs_fname) <NEW_LINE> fhash = hash_file(abs_fname) <NEW_LINE> f = FileInfo(filename = abs_fname, mtime = fstat.st_mtime, size = fstat.st_size, hash = fhash) <NEW_LINE> rtn_list.append(f) <NEW_LINE> n += 1 <NEW_LINE> if n%100 == 0: <NEW_LINE> <INDENT> logger.info('Scan progress: %d / %d (%.1f%%)' % (n, num_files, n/num_files*100.)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> logger.info('Scanning complete.') <NEW_LINE> return rtn_list
Scan directory tree and generate a list of files. File information is saved in objects of class FileInfo.
625941bd0a50d4780f666d83
def _validate_predefined_entity_value(self, entity, entity_name, allowed_property_map, required_properties): <NEW_LINE> <INDENT> _assert_condition(not entity.HasField('key'), 'The %s entity has a key.' % entity_name) <NEW_LINE> property_map = {} <NEW_LINE> for prop in entity.property: <NEW_LINE> <INDENT> property_name = prop.name <NEW_LINE> _assert_condition(property_name in allowed_property_map, 'The %s entity property "%s" is not allowed.' % (entity_name, property_name)) <NEW_LINE> value = prop.value <NEW_LINE> hasser = '%s_value' % allowed_property_map[property_name] <NEW_LINE> _assert_condition( value.HasField(hasser), ('The %s entity property "%s" is the wrong type.' % (entity_name, property_name))) <NEW_LINE> _assert_condition(not value.HasField('meaning'), 'The %s entity property "%s" has a meaning.' % (entity_name, property_name)) <NEW_LINE> _assert_condition(not value.indexed, 'The %s entity property "%s" is indexed.' % (entity_name, property_name)) <NEW_LINE> property_map[property_name] = value <NEW_LINE> <DEDENT> for required_property_name in required_properties: <NEW_LINE> <INDENT> _assert_condition(required_property_name in property_map, 'The %s entity is missing required property "%s".' % (entity_name, required_property_name)) <NEW_LINE> <DEDENT> return property_map
Validates a predefined entity (e.g. a user or a point). Args: entity: the predefined entity (an entity_v4_pb.Entity) entity_name: the name of the entity (used in error messages) allowed_property_map: a dict whose keys are property names allowed in the entity and values are the expected types of these properties required_properties: a list of required property names Returns: a dict of entity_v4_pb2.Value objects keyed by property name Raises: ValidationError: if the entity is invalid
625941bd63f4b57ef0001013
def reverseList(self, head): <NEW_LINE> <INDENT> if head == None or head.next == None: <NEW_LINE> <INDENT> return head <NEW_LINE> <DEDENT> node = self.reverseList(head.next) <NEW_LINE> head.next.next = head <NEW_LINE> head.next = None <NEW_LINE> return node
:type head: ListNode :rtype: ListNode
625941bd31939e2706e4cd61
def error_408(message = None): <NEW_LINE> <INDENT> if not isinstance(message, str) or len(message) < 1: <NEW_LINE> <INDENT> message = 'Request Timeout' <NEW_LINE> <DEDENT> return _error(code = 408, message = message)
JSON error handler for HTTP error 408
625941bdd164cc6175782c41
@should_show.command("fetch-data") <NEW_LINE> @click.option("--db-url", envvar="PGDATABASE") <NEW_LINE> @click.argument("filename", type=click.File("r")) <NEW_LINE> @click.argument("output", type=click.File("w")) <NEW_LINE> def fetch_training_data(filename, output, db_url=None): <NEW_LINE> <INDENT> r2dt.write_training_data(filename, db_url, output)
This builds a CSV file of training data to use for the model building. I keep it separate so I can build a training csv and play with it interactivly before committing the final modeling building logic to the pipeline.
625941bd07f4c71912b1137a
def test_add_needs_release(self): <NEW_LINE> <INDENT> bug = Bug("1", "kanban", MEDIUM, FIX_COMMITTED, "A title", merge_proposal="url", merge_proposal_status=MERGED, tags=["verified"]) <NEW_LINE> kanban_board = self.create_test_class() <NEW_LINE> kanban_board.add(bug) <NEW_LINE> self.assertEqual([bug], kanban_board.bugs) <NEW_LINE> self.assertEqual([], kanban_board.queued) <NEW_LINE> self.assertEqual([], kanban_board.in_progress) <NEW_LINE> self.assertEqual([], kanban_board.needs_review) <NEW_LINE> self.assertEqual([], kanban_board.needs_testing) <NEW_LINE> self.assertEqual([bug], kanban_board.needs_release) <NEW_LINE> self.assertEqual([], kanban_board.released)
A L{Bug} in the 'Needs release' category is stored in the L{BugCollectionMixin.bugs} and the L{Story.needs_release} lists, in the default story.
625941bdff9c53063f47c0e8
def __init__(self, timestamp, fel, warehouse, name=""): <NEW_LINE> <INDENT> self.timestamp = timestamp <NEW_LINE> self.fel = fel <NEW_LINE> self.warehouse = warehouse <NEW_LINE> self.name = name
Initializes event object :param event_data: data specific to the event instance :param event_handler: a method to handle the event, takes event_data as a paramter
625941bd097d151d1a222d4f
def driverHandle(self): <NEW_LINE> <INDENT> return QVariant()
QVariant Solid.AudioInterface.driverHandle()
625941bd50812a4eaa59c218
def reset_session(self): <NEW_LINE> <INDENT> if not self.is_open(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self._active_result is not None: <NEW_LINE> <INDENT> self._active_result.fetch_all() <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.protocol.send_reset() <NEW_LINE> <DEDENT> except (InterfaceError, OperationalError) as err: <NEW_LINE> <INDENT> _LOGGER.warning("Warning: An error occurred while attempting to " "reset the session: {}".format(err))
Reset a sucessfully authenticated session.
625941bd0fa83653e4656eb0
def count_attribute_set(self, attr): <NEW_LINE> <INDENT> attr_present = 0 <NEW_LINE> for item in self.data: <NEW_LINE> <INDENT> if attr in item: <NEW_LINE> <INDENT> if item[attr]['values']: <NEW_LINE> <INDENT> attr_present += 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.unset[attr] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return attr_present
count the number of items in the data where the attribute `attr` is set :param attr: str identifying a property
625941bd8da39b475bd64e64
def test_explicitly_set_in_schema_subqueryload(self): <NEW_LINE> <INDENT> u1, u2, u3, p11, p12, p21, p22, cm11, cm12, cm21, cm22, cm_empty = self._seed() <NEW_LINE> self.query_count = 0 <NEW_LINE> res = Comment.smart_query( filters=dict(post___public=True, post___user___name__like='Bi%'), schema={ 'post': { 'comments': SUBQUERY } } ).all() <NEW_LINE> self.assertEqual(self.query_count, 2) <NEW_LINE> _ = res[0].post <NEW_LINE> self.assertEqual(self.query_count, 2) <NEW_LINE> _ = res[0].post.user <NEW_LINE> self.assertEqual(self.query_count, 2) <NEW_LINE> _ = res[0].post.comments <NEW_LINE> self.assertEqual(self.query_count, 2)
here we explicitly set in schema that we additionally want to load post___comments
625941bdf9cc0f698b1404f1
@cbook.deprecated("3.2") <NEW_LINE> def mx2num(mxdates): <NEW_LINE> <INDENT> scalar = False <NEW_LINE> if not np.iterable(mxdates): <NEW_LINE> <INDENT> scalar = True <NEW_LINE> mxdates = [mxdates] <NEW_LINE> <DEDENT> ret = epoch2num([m.ticks() for m in mxdates]) <NEW_LINE> if scalar: <NEW_LINE> <INDENT> return ret[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ret
Convert mx :class:`datetime` instance (or sequence of mx instances) to the new date format.
625941bda05bb46b383ec718
def set_working_volume(self, mount: Mount, tip_volume: int) -> None: <NEW_LINE> <INDENT> ...
Inform the hardware how much volume a pipette can aspirate. This will set the limit of aspiration for the pipette, and is necessary for backcompatibility.
625941bd82261d6c526ab38f
def test_write_only(self): <NEW_LINE> <INDENT> name = self.mktemp() <NEW_LINE> f = File(name, 'w') <NEW_LINE> f.close() <NEW_LINE> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> f = h5py.File(name, 'r', userblock_size=512) <NEW_LINE> <DEDENT> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> f = h5py.File(name, 'r+', userblock_size=512)
User block only allowed for write
625941bd3346ee7daa2b2c5d
def get_env(self): <NEW_LINE> <INDENT> env = {} <NEW_LINE> env['SSH_AUTH_SOCK'] = self._get_filename() <NEW_LINE> return env
Helper for the environnement under unix :return: a dict containing the ``SSH_AUTH_SOCK`` environnement variables
625941bd236d856c2ad446cd
def populateBbox(self, margin=0.1): <NEW_LINE> <INDENT> glist = self.getxmlelement('geolocationGrid/geolocationGridPointList') <NEW_LINE> lat = [] <NEW_LINE> lon = [] <NEW_LINE> for child in glist: <NEW_LINE> <INDENT> lat.append( float(child.find('latitude').text)) <NEW_LINE> lon.append( float(child.find('longitude').text)) <NEW_LINE> <DEDENT> self.product.bbox = [min(lat) - margin, max(lat) + margin, min(lon) - margin, max(lon) + margin] <NEW_LINE> print(self.product.bbox) <NEW_LINE> return
Populate the bounding box from metadata.
625941bd283ffb24f3c557fe
def connect_handler(data): <NEW_LINE> <INDENT> post_data = { 'username': username, 'password': password, 'connection_info': data} <NEW_LINE> resp = requests.post(GOALFEED_AUTH_ENDPOINT, post_data, timeout=30).json() <NEW_LINE> channel = pusher.subscribe('private-goals', resp['auth']) <NEW_LINE> channel.bind('goal', goal_handler)
Handle connection.
625941bd2ae34c7f2600d025
def handle(req): <NEW_LINE> <INDENT> if req=='train\n': <NEW_LINE> <INDENT> train() <NEW_LINE> print('Modelo entrenado') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> binary = os.fsencode(req) <NEW_LINE> image_64_decode = base64.decodebytes(binary) <NEW_LINE> result_file = 'image' <NEW_LINE> with open(result_file, 'wb') as file_handler: <NEW_LINE> <INDENT> file_handler.write(image_64_decode) <NEW_LINE> <DEDENT> Image.open(result_file).save(result_file + '.png', 'PNG') <NEW_LINE> os.remove(result_file) <NEW_LINE> image = Image.open(result_file + '.png') <NEW_LINE> image = np.asarray(image) <NEW_LINE> input_image=np.array([np.reshape(image,np.prod(image.shape)),]) <NEW_LINE> res = get_number(input_image) <NEW_LINE> print('La imagen tiene el numero: ',res) <NEW_LINE> <DEDENT> pass
handle a request to the function Args: req (str): request body
625941bd1d351010ab855a10
@given('two positive integer values 0 and 600') <NEW_LINE> def given1(context): <NEW_LINE> <INDENT> context.range_int = (0, 600)
Given two positive integer values 0 and 600').
625941bdac7a0e7691ed3fcc
def train_model_validation(filename_train_validation_set, filename_labels_train_validation_set, filter_density, dropout, input_shape, output_shape, file_path_model, filename_log, channel=1): <NEW_LINE> <INDENT> filenames_train, Y_train, filenames_validation, Y_validation, filenames_features, Y_train_validation = load_data(filename_labels_train_validation_set) <NEW_LINE> model_0 = model_switcher(filter_density, dropout, input_shape, output_shape) <NEW_LINE> batch_size = 256 <NEW_LINE> patience = 15 <NEW_LINE> model_train_validation(model_0, batch_size, patience, input_shape, filename_train_validation_set, filenames_train, Y_train, filenames_validation, Y_validation, file_path_model, filename_log, channel)
train model with validation
625941bd099cdd3c635f0b50
def repo(): <NEW_LINE> <INDENT> if 'SUDO_USER' in os.environ: <NEW_LINE> <INDENT> return os.path.expanduser('~{0}/.blueprints.git'. format(os.environ['SUDO_USER'])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return os.path.expanduser('~/.blueprints.git')
Return the full path to the Git repository.
625941bd32920d7e50b280c1
def get_base_upgrade_order(packages): <NEW_LINE> <INDENT> upgrade_order = pisilinux.operations.upgrade.upgrade_base <NEW_LINE> order = upgrade_order(packages) <NEW_LINE> return list(order)
Return a list of packages of the system.base component that needs to be upgraded or installed in install order -> list_of_strings All the packages of the system.base component must be installed on the system @param packages: list of package names -> list_of_strings
625941bd56ac1b37e62640c8
def copy_from(self, other): <NEW_LINE> <INDENT> if (self.parent is None) != (other.parent is None): <NEW_LINE> <INDENT> raise ValueError('cannot copy scopes of different structures') <NEW_LINE> <DEDENT> if other.parent is not None: <NEW_LINE> <INDENT> self.parent.copy_from(other.parent) <NEW_LINE> <DEDENT> self.isolated = other.isolated <NEW_LINE> self.modified = copy.copy(other.modified) <NEW_LINE> self.used = copy.copy(other.used) <NEW_LINE> self.params = copy.copy(other.params) <NEW_LINE> self.returned = copy.copy(other.returned)
Recursively copies the contents of this scope from another scope.
625941bdd4950a0f3b08c245
def checkDirs(self): <NEW_LINE> <INDENT> for folder in [MCBUP_LOGDIR,MCBUP_MASTER_STORE,MCBUP_WORKING_STORE]: <NEW_LINE> <INDENT> if not os.path.exists(folder): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> os.makedirs(folder) <NEW_LINE> <DEDENT> except OSError as e: <NEW_LINE> <INDENT> print("Unable to create logging directory {0:s} ".format(folder)) <NEW_LINE> sys.exit(1)
Checks if required directory structure exists, and if not - creates.
625941bdc432627299f04b37
def create_nite(data): <NEW_LINE> <INDENT> col = 'caldat' <NEW_LINE> if not len(data): return np.array([],dtype='S8') <NEW_LINE> dtype ='S%i'%(len(max(data[col], key=len))) <NEW_LINE> nite = data[col].values.astype(dtype) <NEW_LINE> nite = np.char.replace(nite,'-','') <NEW_LINE> return nite
Convert 'caldat' to 'nite'. This is the faster option since it relies on NOAO to calculate the nite.
625941bdc4546d3d9de72925
def shoot(self, **kwargs): <NEW_LINE> <INDENT> self._validate_shoot_args() <NEW_LINE> options = self._lua.globals.util.serialize( self._lua.table(**self._parse_shoot_args(**kwargs))) <NEW_LINE> if not kwargs.get('stream', True): <NEW_LINE> <INDENT> return self._shoot_nonstreaming( options, wait=kwargs.get('wait', True), download=kwargs.get('download_after', False), remove=kwargs.get('remove_after', False)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._shoot_streaming(options, dng=kwargs.get('dng', False))
Shoot a picture For all arguments where `None` is a legal type, it signifies that the current value from the camera should be used and not be overriden. :param shutter_speed: Shutter speed in APEX96 (default: None) :type shutter_speed: int/float/None :param real_iso: Canon 'real' ISO (default: None) :type real_iso: int/float/None :param market_iso: Canon 'market' ISO (default: None) :type market_iso: int/float/None :param aperture: Aperture value in APEX96 (default: None) :type aperture: int/float/None :param isomode: Must conform to ISO value in Canon UI, shooting mode must have manual ISO (default: None) :type isomode: int/None :param nd_filter: Toggle Neutral Density filter (default: None) :type nd_filter: boolean/None :param distance: Subject distance. If specified as an integer, the value is interpreted as the distance in milimeters. You can also pass a string that contains a number followed by one of the following units: 'mm', 'cm', 'm', 'ft' or 'in' (default: None) :type distance: str/unicode/int :param dng: Dump raw framebuffer in DNG format (default: False) :type dng: boolean :param wait: Wait for capture to complete (default: True) :type wait: boolean :param download_after: Download and return image data after capture (default: False) :type download_after: boolean :param remove_after: Remove image data after shooting (default: False) :type remove_after: boolean :param stream: Stream and return image data directly from device (will not be saved on camera storage) (default: True) :type stream: boolean
625941bda4f1c619b28aff33
def mirror_targets(self, mirrorlist): <NEW_LINE> <INDENT> if not type(mirrorlist) is list: <NEW_LINE> <INDENT> mirrorlist = [mirrorlist] <NEW_LINE> <DEDENT> elif len(mirrorlist) > 3: <NEW_LINE> <INDENT> raise AttributeError("Mirror list has greater than 3 items") <NEW_LINE> <DEDENT> self['mirror_targets'] = mirrorlist <NEW_LINE> return self
Will set/update the mirror target list of the object. Arguments: mirror_targets -- a list of IPs that can have up to 3 items
625941bd55399d3f055885a7
def push(self, node: SearchNode, placed_by: SearchNode = None) -> bool: <NEW_LINE> <INDENT> if node.name in self.visited: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> cost = 0.0 <NEW_LINE> if placed_by: <NEW_LINE> <INDENT> pcost = self.path_cost(placed_by) <NEW_LINE> wcost = placed_by.weight(node.name) <NEW_LINE> cost = pcost + wcost <NEW_LINE> <DEDENT> self.structure.append((node, placed_by, cost)) <NEW_LINE> self.structure.sort(key=lambda tup: tup[2]) <NEW_LINE> return True
Push node onto priority queue :param node: :param placed_by: :return:
625941bd30bbd722463cbcb7
def connect(self): <NEW_LINE> <INDENT> self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) <NEW_LINE> server_address = (self.host, self.port) <NEW_LINE> try: <NEW_LINE> <INDENT> self.sock.settimeout(self.connect_timeout) <NEW_LINE> self.sock.connect(server_address) <NEW_LINE> <DEDENT> except socket.error: <NEW_LINE> <INDENT> _LOGGER.debug('Connection timeout: %s:%s', self.host, self.port) <NEW_LINE> return False <NEW_LINE> <DEDENT> self.sock.settimeout(None) <NEW_LINE> self.stream = self.sock.makefile(mode='rwb') <NEW_LINE> service_name = gssapi.Name( self.service_name, name_type=gssapi.NameType.hostbased_service ) <NEW_LINE> self.ctx = gssapi.SecurityContext(name=service_name, usage='initiate') <NEW_LINE> in_token = None <NEW_LINE> while not self.ctx.complete: <NEW_LINE> <INDENT> out_token = self.ctx.step(in_token) <NEW_LINE> if out_token: <NEW_LINE> <INDENT> out_encoded = base64.standard_b64encode(out_token) <NEW_LINE> self._write_line(out_encoded) <NEW_LINE> <DEDENT> if self.ctx.complete: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> in_encoded = self._read_line() <NEW_LINE> in_token = base64.standard_b64decode(in_encoded) <NEW_LINE> if not in_token: <NEW_LINE> <INDENT> raise GSSError('No response from server.') <NEW_LINE> <DEDENT> <DEDENT> _LOGGER.debug('Successfully authenticated.') <NEW_LINE> return True
Connect and authenticate to the server.
625941bd6fb2d068a760ef8e
def insert(self, key, val): <NEW_LINE> <INDENT> if key in self._dict: <NEW_LINE> <INDENT> self._dict[key] = val <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._dict[key] = val
:type key: str :type val: int :rtype: void
625941bda8370b7717052794
def sum_them_all(number): <NEW_LINE> <INDENT> pass
2. Given a number, return the sum of all digits from zero to the number (inclusive).
625941bdb7558d58953c4e0d
def minPathSum(self, grid: List[List[int]]) -> int: <NEW_LINE> <INDENT> m, n = len(grid), len(grid[0]) <NEW_LINE> dp = [[None]*n for _ in range(m)] <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> for j in range(n): <NEW_LINE> <INDENT> if i==0 and j==0: dp[i][j]=grid[i][j] <NEW_LINE> else: <NEW_LINE> <INDENT> dp[i][j] = min(dp[i-1][j] if i>=1 else float('inf'), dp[i][j-1] if j>=1 else float('inf')) + grid[i][j] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return dp[-1][-1]
Aim in this question being, to find shortest path. We need to break it down and see which is shorter. Whether the left one or top one. And we can conclude on our answer.
625941bd7b180e01f3dc46f7
def set_memory_limit(self, memory): <NEW_LINE> <INDENT> self._validate_memory_string(memory) <NEW_LINE> self.memory_limit = memory
Set memory limit (maximum) for this operator. Args: memory: a string which can be a number or a number followed by one of "E", "P", "T", "G", "M", "K".
625941bde1aae11d1e749ba9
def __init__(self, plantype): <NEW_LINE> <INDENT> self.plantype = plantype
Constructor
625941bd0c0af96317bb80dc
@with_setup(pretest, posttest) <NEW_LINE> @retry_on_except() <NEW_LINE> def test_iter_overhead_hard(): <NEW_LINE> <INDENT> total = int(1e5) <NEW_LINE> with closing(MockIO()) as our_file: <NEW_LINE> <INDENT> a = 0 <NEW_LINE> with trange(total, file=our_file, leave=True, miniters=1, mininterval=0, maxinterval=0) as t: <NEW_LINE> <INDENT> with relative_timer() as time_tqdm: <NEW_LINE> <INDENT> for i in t: <NEW_LINE> <INDENT> a += i <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> assert a == (total * total - total) / 2.0 <NEW_LINE> a = 0 <NEW_LINE> with relative_timer() as time_bench: <NEW_LINE> <INDENT> for i in _range(total): <NEW_LINE> <INDENT> a += i <NEW_LINE> our_file.write(("%i" % a) * 40) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> assert_performance(125, 'trange', time_tqdm(), 'range', time_bench())
Test overhead of iteration based tqdm (hard)
625941bdf8510a7c17cf95ef
def colorPicker(self, event=None): <NEW_LINE> <INDENT> dlg = PsychoColorPicker(None) <NEW_LINE> dlg.ShowModal() <NEW_LINE> dlg.Destroy() <NEW_LINE> if event is not None: <NEW_LINE> <INDENT> event.Skip()
Open color-picker, sets clip-board to string [r,g,b]. Note: units are psychopy -1..+1 rgb units to three decimal places, preserving 24-bit color.
625941bd6aa9bd52df036c97
def knoepfe_menü(user): <NEW_LINE> <INDENT> alle = { 'index': ('/', 'Startseite'), 'olymp': (reverse('Wettbewerbe:index'), 'Wettbewerbe'), 'ehemalige': (reverse('Ehemalige:index'), 'Ehemalige'), 'impressum': (reverse('impressum'), 'Impressum'), 'db': ('https://olymp.piokg.de/static/db.pdf', 'Datenbanklayout'), 'todo': ('/todo/', 'ToDo-Liste'), } <NEW_LINE> if user.username == 'admin': <NEW_LINE> <INDENT> return [alle[name] for name in ('index', 'olymp', 'ehemalige', 'todo', 'db')] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [alle[name] for name in ('index', 'olymp', 'db', 'impressum')]
gibt Knöpfe für Menüleiste als Liste von Tupeln zurück
625941bdd10714528d5ffbd4
def _get_permutations_draw(draw, len): <NEW_LINE> <INDENT> result = itertools.permutations(draw, len) <NEW_LINE> return list(result)
Helper to get all permutations of a draw (list of letters), hint: use itertools.permutations (order of letters matters)
625941bd5fc7496912cc3872
def draw_step_and_sigmoid(): <NEW_LINE> <INDENT> draw_function(f=lambda x: 1 / (1 + np.exp(-x)), save=False) <NEW_LINE> draw_function(f=lambda x: np.abs(x) / (2 * x) + 1 / 2, save=True, name="step and sigmoid", clear_plot=False)
Plot a smooth curve of the sigmoid and step function on the same figure.
625941bd187af65679ca5012
def get_oci_account(name, user_id): <NEW_LINE> <INDENT> response = handle_request( current_app.config['DATABASE_API_URL'], 'oci_accounts/', 'get', job_data={'name': name, 'user_id': user_id} ) <NEW_LINE> account = response.json() <NEW_LINE> if not account: <NEW_LINE> <INDENT> raise MashException( 'OCI account {account} not found. '.format( account=name ) ) <NEW_LINE> <DEDENT> return account
Get OCI account for given user.
625941bd9f2886367277a784
def wells_from(self, start, num, columnwise=False): <NEW_LINE> <INDENT> start = self.robotize(start) <NEW_LINE> if columnwise: <NEW_LINE> <INDENT> row, col = self.decompose(start) <NEW_LINE> num_rows = self.container_type.row_count() <NEW_LINE> start = col * num_rows + row <NEW_LINE> <DEDENT> return WellGroup(self.all_wells(columnwise).wells[start:start + num])
Return a WellGroup of Wells belonging to this Container starting from the index indicated (in integer or string form) and including the number of proceeding wells specified. Wells are counted from the starting well rowwise unless columnwise is True. Parameters ---------- start : Well, int, str Starting well specified as a Well object, a human-readable well index or an integer well index num : int Number of wells to include columnwise : bool, optional Specifies whether the wells included should be counted columnwise instead of the default rowwise.
625941bd3d592f4c4ed1cf6a
def binary_erosion(x, radius=3): <NEW_LINE> <INDENT> mask = disk(radius) <NEW_LINE> x = _binary_erosion(x, selem=mask) <NEW_LINE> return x
Return binary morphological erosion of an image, see `skimage.morphology.binary_erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion>`__. Parameters ----------- x : 2D array A binary image. radius : int For the radius of mask. Returns ------- numpy.array A processed binary image.
625941bdeab8aa0e5d26da52
def test_token_null(self): <NEW_LINE> <INDENT> payload = {"token":'', 'oldPassword': '123456', 'newPassword': '654321'} <NEW_LINE> r2 = self.s.post(self.base_url, data=payload) <NEW_LINE> self.result = r2.json() <NEW_LINE> self.assertEqual(self.result['result'], False) <NEW_LINE> pwd = test_data.ua_emp_search(value="PASSWORD", type='β') <NEW_LINE> self.assertEqual(pwd, '508df4cb2f4d8f80519256258cfb975f')
空的token
625941bd85dfad0860c3ad4e
def set_thread_priority(self, *args, **kwargs): <NEW_LINE> <INDENT> return _howto_swig.chan_info_parser_sptr_set_thread_priority(self, *args, **kwargs)
set_thread_priority(chan_info_parser_sptr self, int priority) -> int
625941bd66673b3332b91f85
def p_multelsif(p): <NEW_LINE> <INDENT> getRule(p,'multelsif')
multelsif : elsif expr pthen compstmt multelsif | empty
625941bd293b9510aa2c318d
def extract_data(self, GT_mask, s_idx): <NEW_LINE> <INDENT> data = np.array([]).reshape((-1, self.cn + 2)) <NEW_LINE> for i in range(1, self.cn + 1): <NEW_LINE> <INDENT> mask = (GT_mask == i) <NEW_LINE> idx = np.where(mask.flatten())[0].reshape((-1, 1)) <NEW_LINE> T1 = self.T1[s_idx, :, :][mask].reshape((-1, 1)) <NEW_LINE> T2 = self.T2[s_idx, :, :][mask].reshape((-1, 1)) <NEW_LINE> PD = self.PD[s_idx, :, :][mask].reshape((-1, 1)) <NEW_LINE> label = np.ones(T1.shape) * i <NEW_LINE> one_class = np.hstack((idx, T1, T2, PD, label)) <NEW_LINE> data = np.vstack((data, one_class)) <NEW_LINE> <DEDENT> new_idx = np.random.permutation(data.shape[0]) <NEW_LINE> return data[new_idx, :]
EXTRACT_DATA On the basis of given indices of slices, extract point value from T1, T2 and PD to for feature matrix. Each point has 5 dimensions of information: index (position in slice), T1 value, T2 value, PD value, label (1 for CSF, 2 for GM, 3 for WM).
625941bd71ff763f4b54957b
def compare_yv(yv, control_yv, mode, is_control=False): <NEW_LINE> <INDENT> y, y_var = yv <NEW_LINE> if mode == 'ratio': <NEW_LINE> <INDENT> if is_control: <NEW_LINE> <INDENT> return np.ones_like(y), np.zeros_like(y_var) <NEW_LINE> <DEDENT> control_y, control_y_var = (1, 0) if control_yv is None else control_yv <NEW_LINE> return divide_var(y, y_var, control_y, control_y_var) <NEW_LINE> <DEDENT> elif mode == 'difference': <NEW_LINE> <INDENT> if is_control: <NEW_LINE> <INDENT> return np.zeros_like(y), np.zeros_like(y_var) <NEW_LINE> <DEDENT> control_y, control_y_var = (0, 0) if control_yv is None else control_yv <NEW_LINE> return minus_var(y, y_var, control_y, control_y_var) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError(f"Unsupported compare mode {mode}")
:return: c, c_var
625941bd925a0f43d2549d68
def load_latest_checkpoint_from_bucket(tensorboard_run, bucket, train_dir): <NEW_LINE> <INDENT> import numpy as np <NEW_LINE> checkpoints = gsutil_ls(bucket, filter=tensorboard_run) <NEW_LINE> if "BucketNotFoundException" in checkpoints: <NEW_LINE> <INDENT> raise ValueError( "ERROR: bucket not found, path={}".format(bucket)) <NEW_LINE> <DEDENT> if not checkpoints: <NEW_LINE> <INDENT> raise ValueError("Checkpoint not found, tensorboard_run={}".format(tensorboard_run)) <NEW_LINE> <DEDENT> steps = [re.findall(".*\.(\d+)\.zip$", f)[0] for f in checkpoints ] <NEW_LINE> if not steps: <NEW_LINE> <INDENT> raise ValueError("Checkpoint not found, tensorboard_run={}".format(tensorboard_run)) <NEW_LINE> <DEDENT> latest_step = np.max(np.asarray(steps).astype(int)) <NEW_LINE> if not latest_step: <NEW_LINE> <INDENT> raise ValueError("Checkpoint not found, tensorboard_run={}".format(tensorboard_run)) <NEW_LINE> <DEDENT> latest_checkpoint = [f for f in checkpoints if latest_step.astype(str) in f ] <NEW_LINE> print("latest checkpoint found, checkpoint={}".format(latest_checkpoint[0])) <NEW_LINE> zip_filename = os.path.basename(latest_checkpoint[0]) <NEW_LINE> return load_from_bucket(zip_filename, bucket, train_dir)
find latest zipped 'checkpoint' in bucket and download similar to tf.train.latest_checkpoint() Args: tensorboard_run: filter for zip files from the same run e.g. "y-tensorboard-run" for "my-tensorboard-run.6000.zip" bucket: "gs://[bucket]" train_dir: a diretory path to restore the checkpoint files, usually TRAIN_LOG, e.g. "/my-project/log/my-tensorboard-run" Return: checkpoint_name, e.g. `/my-project/log/my-tensorboard-run/model.ckpt-6000`
625941bd1d351010ab855a11
def populateMethods(self): <NEW_LINE> <INDENT> self.retrieval_models = {} <NEW_LINE> all_doc_methods = None <NEW_LINE> if self.exp.get("doc_methods", None): <NEW_LINE> <INDENT> all_doc_methods = getDictOfTestingMethods(self.exp["doc_methods"]) <NEW_LINE> if self.exp["full_corpus"]: <NEW_LINE> <INDENT> all_files = ["ALL_FILES"] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> all_files = list(self.files_dict.keys()) <NEW_LINE> <DEDENT> self.generateRetrievalModels(all_doc_methods, all_files) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> all_doc_methods = self.files_dict["ALL_FILES"]["doc_methods"] <NEW_LINE> <DEDENT> if self.exp["full_corpus"]: <NEW_LINE> <INDENT> for model in self.files_dict["ALL_FILES"]["tfidf_models"]: <NEW_LINE> <INDENT> self.retrieval_models[model["method"]] = self.retrieval_class( model["actual_dir"], model["method"], logger=None, use_default_similarity=self.exp["use_default_similarity"], max_results=self.exp["max_results_recall"], save_terms=self.save_terms, multi_match_type=all_doc_methods[model["method"]].get("multi_match_type")) <NEW_LINE> <DEDENT> <DEDENT> self.main_all_doc_methods = all_doc_methods
Fills dict with all the test methods, parameters and options, including the retrieval instances
625941bd7d43ff24873a2b92
def test_url_remote_http(self): <NEW_LINE> <INDENT> url = 'http://somewhere' <NEW_LINE> is_remote = is_remote_url(url) <NEW_LINE> self.assertTrue(is_remote)
verify that a remote http url is identified.
625941bdbe7bc26dc91cd4f9
def __init__(self): <NEW_LINE> <INDENT> self.GatewayId = None
:param GatewayId: 网关实例ID,目前我们支持的网关实例类型有, 专线网关实例ID,形如,`dcg-ltjahce6`; Nat网关实例ID,形如,`nat-ltjahce6`; VPN网关实例ID,形如,`vpn-ltjahce6`。 :type GatewayId: str
625941bd8a349b6b435e8068
def test_stage_single_bundle(self): <NEW_LINE> <INDENT> bundle = self.create_run_bundle( state=State.STAGED, metadata=dict(request_memory="0", request_time="", request_cpus=1, request_gpus=0), ) <NEW_LINE> self.save_bundle(bundle) <NEW_LINE> self.mock_worker_checkin(cpus=1, user_id=self.user_id) <NEW_LINE> self.bundle_manager._schedule_run_bundles() <NEW_LINE> bundle = self.bundle_manager._model.get_bundle(bundle.uuid) <NEW_LINE> self.assertEqual(bundle.state, State.STARTING)
When a worker with the right specs is available, a bundle should be staged.
625941bd21a7993f00bc7bdf
def _update(self, data): <NEW_LINE> <INDENT> for k, v in data.iteritems(): <NEW_LINE> <INDENT> new_value = v <NEW_LINE> if isinstance(v, dict): <NEW_LINE> <INDENT> new_value = type(self)(v) <NEW_LINE> <DEDENT> elif isinstance(v, list): <NEW_LINE> <INDENT> new_value = [(type(self)(e) if isinstance(e, dict) else e) for e in v] <NEW_LINE> <DEDENT> setattr(self, k, new_value)
Update the object with new data.
625941bd21a7993f00bc7be0
def submit_gradient(self, from_addr, model_id, grad): <NEW_LINE> <INDENT> ipfs_address = self.ipfs.store(grad) <NEW_LINE> self.get_transaction(from_addr).addGradient( model_id, IPFSAddress().to_ethereum(ipfs_address)) <NEW_LINE> return self.call.getNumGradientsforModel(model_id) - 1
This accepts gradients for a model from syft.nn and uploads them to the blockchain (via IPFS), linked to a model by it's id. TODO: modify syft.nn to actually have a "getGradients()" method call so that there can be checks that keep people from uploading junk. Currently any python object could be uploaded (which is obviously dangerous).
625941bd29b78933be1e55a5
def function(self, data): <NEW_LINE> <INDENT> deg = sp.diags(a1ifmat(data.sum(1))**-.5, 0) <NEW_LINE> lap = deg @ data @ deg <NEW_LINE> return lap
Normalized Symmetric Graph Laplacian Parameters ---------- data : :obj:`sp.csr_matrix` or :obj:`np.ndarray` Graph affinity/similarity matrix. Returns ------- :obj:`sp.csr_matrix` Sparse representation of a symmetric graph laplacian matrix
625941bd091ae35668666e58
def print_warning(content): <NEW_LINE> <INDENT> print(COLOR_YELLOW_FORMAT % (WARNING_INFO % content))
Print warning information to screen
625941bd4c3428357757c21f
def part(self, channel): <NEW_LINE> <INDENT> self.irc.send(self.encode('PART {0}'.format(channel)))
Leaves channel
625941bd24f1403a92600a5e
@register.inclusion_tag('cclikes/inclusion_tags/cclikes_extender.html', takes_context=True) <NEW_LINE> def likes(context, obj, template=None): <NEW_LINE> <INDENT> if template is None: <NEW_LINE> <INDENT> template = 'cclikes/inclusion_tags/cclikes.html' <NEW_LINE> <DEDENT> request = context['request'] <NEW_LINE> import_js = False <NEW_LINE> if not hasattr(request, '_django_likes_js_imported'): <NEW_LINE> <INDENT> setattr(request, '_django_likes_js_imported', 1) <NEW_LINE> import_js = True <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> model_name = obj._meta.model_name <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> model_name = obj._meta.module_name <NEW_LINE> <DEDENT> context.update({ 'template': template, 'content_obj': obj, 'likes_enabled': likes_enabled(obj, request), 'can_vote': can_vote(obj, request.user, request), 'content_type': "-".join((obj._meta.app_label, model_name)), 'import_js': import_js }) <NEW_LINE> return context <NEW_LINE> def inclusion_tag(self, filename, func=None, takes_context=None, name=None): <NEW_LINE> <INDENT> def dec(func): <NEW_LINE> <INDENT> params, varargs, varkw, defaults = getargspec(func) <NEW_LINE> function_name = (name or getattr(func, '_decorated_function', func).__name__) <NEW_LINE> @functools.wraps(func) <NEW_LINE> def compile_func(parser, token): <NEW_LINE> <INDENT> bits = token.split_contents()[1:] <NEW_LINE> args, kwargs = parse_bits( parser, bits, params, varargs, varkw, defaults, takes_context, function_name, ) <NEW_LINE> return InclusionNode( func, takes_context, args, kwargs, filename, ) <NEW_LINE> <DEDENT> self.tag(function_name, compile_func) <NEW_LINE> return func <NEW_LINE> <DEDENT> return dec
Register a callable as an inclusion tag: @register.inclusion_tag('results.html') def show_results(poll): choices = poll.choice_set.all() return {'choices': choices}
625941bd94891a1f4081b99d
def start_interpreter(self, namespace): <NEW_LINE> <INDENT> self.clear() <NEW_LINE> if self.interpreter is not None: <NEW_LINE> <INDENT> self.interpreter.closing() <NEW_LINE> <DEDENT> self.interpreter = Interpreter( namespace, self.exitfunc, SysOutput, WidgetProxy, self.debug ) <NEW_LINE> self.interpreter.stdout_write.data_avail.connect(self.stdout_avail) <NEW_LINE> self.interpreter.stderr_write.data_avail.connect(self.stderr_avail) <NEW_LINE> self.interpreter.widget_proxy.sig_set_readonly.connect(self.setReadOnly) <NEW_LINE> self.interpreter.widget_proxy.sig_new_prompt.connect(self.new_prompt) <NEW_LINE> self.interpreter.widget_proxy.sig_edit.connect(self.edit_script) <NEW_LINE> self.interpreter.widget_proxy.sig_wait_input.connect(self.wait_input) <NEW_LINE> if self.multithreaded: <NEW_LINE> <INDENT> self.interpreter.start() <NEW_LINE> <DEDENT> banner = create_banner(self.message) <NEW_LINE> self.write(banner, prompt=True) <NEW_LINE> for cmd in self.commands: <NEW_LINE> <INDENT> self.run_command(cmd, history=False, new_prompt=False) <NEW_LINE> <DEDENT> self.new_prompt(self.interpreter.p1) <NEW_LINE> self.refresh.emit() <NEW_LINE> return self.interpreter
Start Python interpreter
625941bd796e427e537b04b8
def issue_date(self): <NEW_LINE> <INDENT> pattern = r'^\s*dated as of (.*)\s*$' <NEW_LINE> m = re.search(pattern, self.content, flags= re.IGNORECASE|re.MULTILINE) <NEW_LINE> if m: <NEW_LINE> <INDENT> print('date string', m.group(1)) <NEW_LINE> return dateparser.parse(m.group(1)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
DATED AS OF SEPTEMBER 13, 1994
625941bd656771135c3eb761
def _create_event_ch(self, events, n_samples=None): <NEW_LINE> <INDENT> n_dropped = len(events[:, 0]) - len(set(events[:, 0])) <NEW_LINE> if n_dropped > 0: <NEW_LINE> <INDENT> warn(str(n_dropped) + " events will be dropped because they " "occur on the same time sample as another event. " "`mne.io.Raw` objects store events on an event channel, " "which cannot represent two events on the same sample. You " "can extract the original event structure using " "`mne.io.eeglab.read_events_eeglab`. Then, you can e.g. " "subset the extracted events for constructing epochs.") <NEW_LINE> <DEDENT> if n_samples is None: <NEW_LINE> <INDENT> n_samples = self.last_samp - self.first_samp + 1 <NEW_LINE> <DEDENT> events = np.array(events, int) <NEW_LINE> if events.ndim != 2 or events.shape[1] != 3: <NEW_LINE> <INDENT> raise ValueError("[n_events x 3] shaped array required") <NEW_LINE> <DEDENT> self._event_ch = _synthesize_stim_channel(events, n_samples)
Create the event channel.
625941bd6e29344779a62509
def map_(f): <NEW_LINE> <INDENT> return lambda xs: list(map(f, xs))
The list obtained by applying f to each element of xs.
625941bd29b78933be1e55a6
def user_input(connection: connect_server.GameConnection, game_state: connectfour.GameState) -> connectfour.GameState: <NEW_LINE> <INDENT> user_command = input() <NEW_LINE> while c4_shared_function.game_move(game_state, user_command) is None: <NEW_LINE> <INDENT> user_command = input() <NEW_LINE> <DEDENT> connect_server.input_user_command(connection, user_command) <NEW_LINE> game_state = c4_shared_function.game_move(game_state, user_command) <NEW_LINE> print(c4_shared_function.board(game_state)) <NEW_LINE> return game_state
Asks for the user command. If user command is invalid, no changes will be done on the game state. If valid, sends the user command to the server, updates the game state, and prints out the updated game board. User will be promtp continuously until command is valid.
625941bdde87d2750b85fc84
def parseIntegratedTestScriptCommands(source_path, keywords): <NEW_LINE> <INDENT> keywords_re = re.compile( to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),))) <NEW_LINE> f = open(source_path, 'rb') <NEW_LINE> try: <NEW_LINE> <INDENT> data = f.read() <NEW_LINE> if not data.endswith(to_bytes('\n')): <NEW_LINE> <INDENT> data = data + to_bytes('\n') <NEW_LINE> <DEDENT> line_number = 1 <NEW_LINE> last_match_position = 0 <NEW_LINE> for match in keywords_re.finditer(data): <NEW_LINE> <INDENT> match_position = match.start() <NEW_LINE> line_number += data.count(to_bytes('\n'), last_match_position, match_position) <NEW_LINE> last_match_position = match_position <NEW_LINE> keyword,ln = match.groups() <NEW_LINE> yield (line_number, to_string(keyword.decode('utf-8')), to_string(ln.decode('utf-8'))) <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> f.close()
parseIntegratedTestScriptCommands(source_path) -> commands Parse the commands in an integrated test script file into a list of (line_number, command_type, line).
625941bd1f037a2d8b9460f3
def part1() -> int: <NEW_LINE> <INDENT> coords = list(all_coords) <NEW_LINE> def calculate_areas( coords: List[Tuple[int, int]], min_x: int, min_y: int, max_x: int, max_y: int) -> Dict[Tuple[int, int], int]: <NEW_LINE> <INDENT> coords_and_area = {c: 0 for c in coords} <NEW_LINE> for x in range(min_x, max_x): <NEW_LINE> <INDENT> for y in range(min_y, max_y): <NEW_LINE> <INDENT> if (x, y) in coords: <NEW_LINE> <INDENT> coords_and_area[x, y] += 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> manhattan_distances = [] <NEW_LINE> for cx, cy in coords: <NEW_LINE> <INDENT> manhattan_distances.append( (cx, cy, abs(x - cx) + abs(y - cy))) <NEW_LINE> <DEDENT> manhattan_distances.sort(key=lambda c: c[2]) <NEW_LINE> if manhattan_distances[0][2] != manhattan_distances[1][2]: <NEW_LINE> <INDENT> cx, cy = manhattan_distances[0][:2] <NEW_LINE> coords_and_area[cx, cy] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return coords_and_area <NEW_LINE> <DEDENT> areas = calculate_areas(coords, min_x, min_y, max_x, max_y) <NEW_LINE> areas2 = calculate_areas(coords, min_x, min_y, max_x + 1, max_y + 1) <NEW_LINE> for (x, y), area in areas2.items(): <NEW_LINE> <INDENT> if area != areas[x, y]: <NEW_LINE> <INDENT> coords.remove((x, y)) <NEW_LINE> <DEDENT> <DEDENT> _min_x, _min_y, _max_x, _max_y = calculate_grid_size(coords) <NEW_LINE> areas3 = calculate_areas(coords, _min_x - 1, _min_y - 1, _max_x, _max_y) <NEW_LINE> for (x, y), area in areas3.items(): <NEW_LINE> <INDENT> if area != areas[x, y]: <NEW_LINE> <INDENT> coords.remove((x, y)) <NEW_LINE> <DEDENT> <DEDENT> area_sizes = [areas[x, y] for x, y in coords] <NEW_LINE> return max(area_sizes)
Using only the Manhattan distance, determine the area around each coordinate by counting the number of integer X,Y locations that are closest to that coordinate (and aren't tied in distance to any other coordinate). What is the size of the largest area that isn't infinite?
625941bd7047854f462a1301
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): <NEW_LINE> <INDENT> total_size_1 = 1 <NEW_LINE> total_size_2 = 1 <NEW_LINE> for s in tensor_in_sizes: <NEW_LINE> <INDENT> total_size_1 *= s <NEW_LINE> <DEDENT> for s in filter_in_sizes: <NEW_LINE> <INDENT> total_size_2 *= s <NEW_LINE> <DEDENT> x1 = np.array([f for f in range(1, total_size_1 + 1)]) <NEW_LINE> x1 = x1.astype(np.uint8).reshape(tensor_in_sizes) <NEW_LINE> x1_min = 0.0 <NEW_LINE> x1_max = 255.0 <NEW_LINE> x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8) <NEW_LINE> x2 = x2.astype(np.uint8).reshape(filter_in_sizes) <NEW_LINE> x2_min = 0.0 <NEW_LINE> x2_max = 255.0 <NEW_LINE> with self.test_session(use_gpu=False) as sess: <NEW_LINE> <INDENT> t1 = tf.constant(x1, shape=tensor_in_sizes, dtype=tf.quint8) <NEW_LINE> t2 = tf.constant(x2, shape=filter_in_sizes, dtype=tf.quint8) <NEW_LINE> conv = tf.contrib.quantization.quantized_conv2d(t1, t2, out_type=tf.qint32, strides=[1, stride, stride, 1], padding=padding, min_input=x1_min, max_input=x1_max, min_filter=x2_min, max_filter=x2_max) <NEW_LINE> value = sess.run(conv) <NEW_LINE> <DEDENT> quantized_output = value[0] <NEW_LINE> output_min = value[1] <NEW_LINE> output_max = value[2] <NEW_LINE> float_output = self._QuantizedOutputToFloat(quantized_output, output_min, output_max) <NEW_LINE> self.assertArrayNear(expected, float_output.flatten(), 1.0) <NEW_LINE> self.assertEqual(value[0].shape, conv[0].get_shape())
Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs.
625941bdfbf16365ca6f60b3
def _test_delete_subnet_with_ports(self, mode): <NEW_LINE> <INDENT> slaac_network = self.create_network() <NEW_LINE> subnet_slaac = self.create_subnet(slaac_network, **{'ipv6_ra_mode': mode, 'ipv6_address_mode': mode}) <NEW_LINE> port = self.create_port(slaac_network) <NEW_LINE> self.assertIsNotNone(port['fixed_ips'][0]['ip_address']) <NEW_LINE> self.subnets_client.delete_subnet(subnet_slaac['id']) <NEW_LINE> self.subnets.pop() <NEW_LINE> subnets = self.subnets_client.list_subnets() <NEW_LINE> subnet_ids = [subnet['id'] for subnet in subnets['subnets']] <NEW_LINE> self.assertNotIn(subnet_slaac['id'], subnet_ids, "Subnet wasn't deleted") <NEW_LINE> self.assertRaisesRegex( lib_exc.Conflict, "There are one or more ports still in use on the network", self.networks_client.delete_network, slaac_network['id'])
Create subnet and delete it with existing ports
625941bd63d6d428bbe443e4
def max(self) -> Key: <NEW_LINE> <INDENT> if self.is_empty(): <NEW_LINE> <INDENT> raise NoSuchElementException("Priority queue underflow") <NEW_LINE> <DEDENT> assert self._pq[1] is not None <NEW_LINE> return self._pq[1]
Returns a largest key on this priority queue. :return: a largest key on the priority queue :raises NoSuchElementException: if this priority queue is empty
625941bd4e696a04525c9341
def find_operands(entity_list, lemma): <NEW_LINE> <INDENT> if lemma.name in all_axioms.keys(): <NEW_LINE> <INDENT> operands = lemma.infer_operands(entity_list=entity_list) <NEW_LINE> if operands is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> elif operands is False: <NEW_LINE> <INDENT> perms = list(itertools.permutations(entity_list, r=lemma.input_no)) <NEW_LINE> random.shuffle(perms) <NEW_LINE> return perms <NEW_LINE> <DEDENT> elif isinstance(operands, list): <NEW_LINE> <INDENT> random.shuffle(operands) <NEW_LINE> return operands <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError
Given the lemma and the entity, find operands that can be applied with the given lemma. :param lemma: :return: operands for the lemma
625941bda79ad161976cc03a
def update_search_parameters(self, selected_gender, selected_category, selected_subcategory): <NEW_LINE> <INDENT> self.model.set_gender(selected_gender) <NEW_LINE> self.model.set_category(selected_category) <NEW_LINE> self.model.set_subcategory(selected_subcategory) <NEW_LINE> self.model.fetch_results()
Pass parameters selected by user to the model :return: N/A
625941bd090684286d50ebd7
def __init__(__self__, *, destination_vault_arn: pulumi.Input[str], lifecycle: Optional[pulumi.Input['PlanRuleCopyActionLifecycleArgs']] = None): <NEW_LINE> <INDENT> pulumi.set(__self__, "destination_vault_arn", destination_vault_arn) <NEW_LINE> if lifecycle is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "lifecycle", lifecycle)
:param pulumi.Input[str] destination_vault_arn: An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. :param pulumi.Input['PlanRuleCopyActionLifecycleArgs'] lifecycle: The lifecycle defines when a protected resource is copied over to a backup vault and when it expires. Fields documented above.
625941bd57b8e32f5248338e
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): <NEW_LINE> <INDENT> w, b = initialize_with_zeros(num_px * num_px * 3) <NEW_LINE> parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False) <NEW_LINE> w = parameters["w"] <NEW_LINE> b = parameters["b"] <NEW_LINE> Y_prediction_test = predict(w, b, X_test) <NEW_LINE> Y_prediction_train = predict(w, b, X_train) <NEW_LINE> print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) <NEW_LINE> print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) <NEW_LINE> d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} <NEW_LINE> return d
Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model.
625941bd5fcc89381b1e15b2
def __init__(self, root_directory, sha1_chunk=10): <NEW_LINE> <INDENT> self._root_dir = os.path.abspath(os.path.expanduser(root_directory)) <NEW_LINE> self._sha1_chunk = sha1_chunk <NEW_LINE> self._log.debug("Initializing FileSet under root dir: %s", self._root_dir) <NEW_LINE> self._element_map = {} <NEW_LINE> self._element_map_lock = multiprocessing.RLock() <NEW_LINE> self._discover_data_elements() <NEW_LINE> self._new_elem_added = False
Initialize a new or existing file set from a root directory. :param root_directory: Directory that this file set is based in. For relative path resolution, see the ``work_relative`` parameter description. :type root_directory: str :param sha1_chunk: Number of segments to split data element SHA1 sum into when saving element serializations. :type sha1_chunk: int
625941bd2eb69b55b151c7a1
def get_activations( self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False ) -> np.ndarray: <NEW_LINE> <INDENT> raise NotImplementedError
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by calling `layer_names`. :param x: Input for computing the activations. :param layer: Layer for computing the activations. :param batch_size: Size of batches. :param framework: If true, return the intermediate tensor representation of the activation. :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`. :raises `NotImplementedException`: This method is not supported for detector-classifiers.
625941bd7c178a314d6ef34f
def get_reddit_data(subreddit, date): <NEW_LINE> <INDENT> titles = [] <NEW_LINE> url = "https://web.archive.org/web/" + date + "/reddit.com/" + subreddit <NEW_LINE> print(url) <NEW_LINE> driver.get(url) <NEW_LINE> try: <NEW_LINE> <INDENT> sitetable = driver.find_element_by_id("siteTable") <NEW_LINE> posts = sitetable.find_elements_by_tag_name("div") <NEW_LINE> for post in posts: <NEW_LINE> <INDENT> if len(post.find_elements_by_class_name("title")) > 0: <NEW_LINE> <INDENT> title = post.find_element_by_class_name("title").text <NEW_LINE> titles.append(title) <NEW_LINE> <DEDENT> <DEDENT> titles = set(titles) <NEW_LINE> return titles <NEW_LINE> <DEDENT> except NoSuchElementException: <NEW_LINE> <INDENT> return ['0'] * 26
Gets top 26 frontpage titles from 'subreddit' on 'date :param subreddit: ex: 'r/bitcoin' :param date: in 'YYYYMMDD' :return titles: a list of strings of titles
625941bd956e5f7376d70d64