query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Check that custom delimiters can be used to start a section.
def test_custom_delimiters__sections(self): template = '{{=[[ ]]=}}[[#foo]]bar[[/foo]]' context = {'foo': True} self._assert_render(u'bar', template, context)
[ "def test_prologue_bad_delimiter():\n # Use an empty delimiter\n with pytest.raises(PrologueError) as excinfo:\n Prologue(delimiter=\"\")\n assert \"Delimiter should be at least one character\" in str(excinfo.value)\n # Use just whitespace\n with pytest.raises(PrologueError) as excinfo:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that changing custom delimiters back is not "retroactive."
def test_custom_delimiters__not_retroactive(self): expected = u' {{foo}} ' self._assert_render(expected, '{{=$ $=}} {{foo}} ') self._assert_render(expected, '{{=$ $=}} {{foo}} $={{ }}=$') # was yielding u' '.
[ "def test_filter_invalid_delimiters(self, utility):\n log = self._prepare_logger()\n\n utility.return_value = {styles: [\n 'no_delimiter',\n 'valid delimiter|Valid delimiter',\n 'too|much|delimiters',\n ]}\n terms = self._get_terms()\n\n # Apart fr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test dot notation with missing keys or attributes. Check that if a key or attribute in a dotted name does not exist, then the tag renders as the empty string.
def test_dot_notation__missing_attributes_or_keys(self): template = """I cannot see {{person.name}}'s age: {{person.age}}. Nor {{other_person.name}}'s: .""" expected = u"""I cannot see Biggles's age: . Nor Mr. Bradshaw's: .""" context = {'person': {'name': 'Biggles'}, ...
[ "def test_dot_notation__missing_part_terminates_search(self):\n template = '{{a.b}} :: ({{#c}}{{a}} :: {{a.b}}{{/c}})'\n context = {'a': {'b': 'A.B'}, 'c': {'a': 'A'} }\n self._assert_render(u'A.B :: (A :: )', template, context)", "def test_dot_dict_raises_key_error_on_missing_key():\n d =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test dot notation with multiple levels.
def test_dot_notation__multiple_levels(self): template = """Hello, Mr. {{person.name.lastname}}. I see you're back from {{person.travels.last.country.city}}. I'm missing some of your details: {{person.details.private.editor}}.""" expected = u"""Hello, Mr. Pither. I see you're bac...
[ "def Dot():\r\n return Leaf(token.DOT, \".\")", "def test_multilevel_attributes():\n prefix = Prefix(os.sep + \"usr\" + os.sep)\n\n assert prefix.share.man == os.sep + os.path.join(\"usr\", \"share\", \"man\")\n assert prefix.man.man8 == os.sep + os.path.join(\"usr\", \"man\", \"man8\")\n assert pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that dotted name resolution terminates on a later part not found. Check that if a later dotted name part is not found in the result from the former resolution, then name resolution terminates rather than starting the search over with the next element of the context stack. From the spec (interpolation section) 5) I...
def test_dot_notation__missing_part_terminates_search(self): template = '{{a.b}} :: ({{#c}}{{a}} :: {{a.b}}{{/c}})' context = {'a': {'b': 'A.B'}, 'c': {'a': 'A'} } self._assert_render(u'A.B :: (A :: )', template, context)
[ "def test_dot_prefixed_label_expansion(self):\n self._test(\"3.2.5 Label Expansion with Dot-Prefix\")", "def resolve_dotted_name(name: str) -> typing.Any:\n if not isinstance(name, str):\n return name # already an object\n names = name.split(\".\")\n used = names.pop(0)\n found = __impo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pack weight matrices into a single vector
def pack_weights(w_hid, b_hid, w_out, b_out, params): n_in, n_hid, n_out = params[0], params[1], params[2] g_j = hstack((reshape(w_hid,(1,n_in*n_hid)), reshape(b_hid,(1,n_hid)), reshape(w_out,(1,n_hid*n_out)), reshape(b_out,(1,n_out))))[0] g_j = array(g_j[0,:])[0] return g_j
[ "def model_weights_as_vector(model):\r\n weights_vector = []\r\n\r\n for layer in model.layers: # model.get_weights():\r\n if layer.trainable:\r\n layer_weights = layer.get_weights()\r\n for l_weights in layer_weights:\r\n vector = numpy.reshape(l_weights, newshape=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
unpack weights from single vector into weight matrices
def unpack_weights(x, params): n_in, n_hid, n_out = params[0], params[1], params[2] pat_in, pat_out = params[3], params[4] n_pat = shape(pat_in)[0] i1,i2 = 0,n_in*n_hid w_hid = reshape(x[i1:i2], (n_in,n_hid)) i1,i2 = i2,i2+n_hid b_hid = reshape(x[i1:i2],(1,n_hid)) i1,i2 = i2,i2+(n_hid*n_out) w_out = reshape(x[...
[ "def apply(self, vector):\n weights = self.load()\n result = [0, 0]\n for i in xrange(len(vector)):\n for j in (0, 1):\n result[j] = result[j] + (vector[i][j] * weights[i][j])\n return result", "def normalizeWeightsData(weights):\n def normalize(wts):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the given FirefoxRunner with the given Profile, waits for completion, then returns the process exit code
def run(self, profile=None, timeout=PROCESS_TIMEOUT, env=None, args=None): if profile is None: profile = Profile() self.profile = profile if self.binary is None and self.url: self.binary = self.download_build() if self.runner is None: self....
[ "def RunCygprofileTests(self):\n device_path = '/data/local/tmp/cygprofile_unittests'\n self._device.PushChangedFiles([(self._cygprofile_tests, device_path)])\n try:\n self._device.RunShellCommand(device_path, check_return=True)\n except (device_errors.CommandFailedError,\n device_errors...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the file of the map and return all x y of any piace of the map return 2 list of tuples of x and y walls and ports return 2 tuple of xy of exit port and the bot xy
def get_map(self, map_name): # ports = [] # exit_ports = [] # bot = [] # print("hello") y = 0 with open("cartes/" + map_name + ".txt") as f: for line in f: x = 0 for char in line: if char == CART_PEACES["wal...
[ "def read_mask():\n \n diri = '/disks/arctic5_raid/abarrett/ETOPO1'\n fili = 'etopo1_land_ocean_mask.nc'\n\n ds = xr.open_dataset(os.path.join(diri,fili))\n tmp = ds['__xarray_dataarray_variable__'].values\n lat = ds['lat'].values\n lon = ds['lon'].values\n ds.close()\n \n xs = tmp[lat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the stuff under test This abstract property exists because in this way IDE (like PyCharm) can detect an abstract method to implement. In this way the developer can remember to implement this property. Furthermore, this is an opportunity for the developer to specify the type of IStuffUnderTest for her purpose. In...
def ut(self) -> "IStuffUnderTest": pass
[ "def _get_tests(self):\n return self.__tests", "def distribute_test_kits(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "def my_abstract_property(self):", "def test_method_basics():\n @abstractmethod\n def footer(self):\n \"\"\"Return self. Abstract.\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the test environment This abstract property exists because in this way IDE (like PyCharm) can detect an abstract method to implement. In this way the developer can remember to implement this property. Furthermore, this is an opportunity for the developer to specify the type of ITestEnviroment for her purpose. In...
def te(self) -> "ITestEnvironment": pass
[ "def test_environment(self):\n pass", "def environment(self) -> rl_environment.Environment:\n return self._environment", "def environnement(self):\n return self.__environnement", "def env(self) -> \"OperatorEnv\":\n if self._env is None:\n self._env = OperatorEnv()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
like query_by_mask but we autyomatically check if there is only one result and, if so, we return it
def query_by_finding_mask(self, m: "ITestContextMask") -> "ITestContextRepoView": result = list(self.query_by_mask(m)) if len(result) != 1: logging.critical("We obtained {} elements:\ntest context mask: {}\nelements:{}".format(len(result), str(m), "\n".join(map(str, result)))) ra...
[ "def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()", "def query_one(self, conditions):\n rows = self.query_all(conditions, limit=1)\n try:\n return next(rows)\n except StopIteration:\n return None, None", "async def queryone(self, stm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initializing iupac nucleotide dictionary
def initialize_2nucl(): bases = defaultdict(dict) bases['A']['A'] = 'A' bases['T']['T'] = 'T' bases['G']['G'] = 'G' bases['C']['C'] = 'C' bases['N']['N'] = 'N' bases['A']['T'] = 'W' bases['T']['A'] = 'W' bases['A']['G'] = 'R' bases['G']['A'] = 'R' bases['A']['C'] = 'M' ba...
[ "def initialize_1nucl():\n bases = defaultdict(dict)\n bases['A'] = ['A', 'A']\n bases['T'] = ['T', 'T']\n bases['G'] = ['G', 'G']\n bases['C'] = ['C', 'C']\n bases['N'] = ['N', 'N']\n bases['W'] = ['A', 'T']\n bases['R'] = ['A', 'G']\n bases['M'] = ['A', 'C']\n bases['K'] = ['G', 'T']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initializing iupac 2 letter nucleotide dictionary
def initialize_1nucl(): bases = defaultdict(dict) bases['A'] = ['A', 'A'] bases['T'] = ['T', 'T'] bases['G'] = ['G', 'G'] bases['C'] = ['C', 'C'] bases['N'] = ['N', 'N'] bases['W'] = ['A', 'T'] bases['R'] = ['A', 'G'] bases['M'] = ['A', 'C'] bases['K'] = ['G', 'T'] bases['Y']...
[ "def initialize_2nucl():\n bases = defaultdict(dict)\n bases['A']['A'] = 'A'\n bases['T']['T'] = 'T'\n bases['G']['G'] = 'G'\n bases['C']['C'] = 'C'\n bases['N']['N'] = 'N'\n bases['A']['T'] = 'W'\n bases['T']['A'] = 'W'\n bases['A']['G'] = 'R'\n bases['G']['A'] = 'R'\n bases['A']['...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initialize baseCount dictionary for each hapmap entry
def initialize_baseCount(): baseCount = defaultdict(dict) baseCount['A'] = 0 baseCount['T'] = 0 baseCount['G'] = 0 baseCount['C'] = 0 baseCount['N'] = 0 return baseCount
[ "def init_count():\n count = {}\n count['posts_attempted'] = 0\n count['already_in_db'] = 0\n count['already_in_errors'] = 0\n count['rows_added'] = 0\n count['errors_added'] = 0\n return count", "def __init__(\n self,\n observation_space,\n action_space,\n paralle...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
just add a f frequency to the indexNode list
def addindexnode(self, f): node = indexNode(f) self.indexmap[f] = node if not self.head and not self.tail: self.head = node self.tail = node return if f < self.head.f: self.head.pre = node node.next = self.head ...
[ "def freq(node):\n return node.value[0]", "def new_frequency_node(self, node):\n temp = FrequencyNode()\n temp.value = node.value + 1\n temp.prev = node\n temp.next = node.next\n node.next = temp\n temp.next.prev = temp\n return temp", "def add_to_freq...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
when the cnt equals the capacity, if add a new key to the LFUCache we need to remove the first indexNode's first Node at the same time we will return a int f, this f will be used in lazy delete because when we addindexNode we will need the f1's indexNode, so after we addindexNode, then we can use the f to adjustIndexNo...
def removenode(self): if not self.head: return -1 else: indexnode = self.head node = indexnode.head indexnode.cnt -= 1 if node.next: indexnode.head = indexnode.head.next indexnode.head.pre = None ...
[ "def adjustIndexNode(self,f):\r\n if not self.indexmap.has_key(f):\r\n return\r\n inode = self.indexmap[f]\r\n if inode.cnt == 0:\r\n self.removeindexnode(f)", "def addindexnode(self, f):\r\n node = indexNode(f)\r\n self.indexmap[f] = node\r\n\r\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete the f indexNode if the indexnode's cnt == 0
def adjustIndexNode(self,f): if not self.indexmap.has_key(f): return inode = self.indexmap[f] if inode.cnt == 0: self.removeindexnode(f)
[ "def deleteAtIndex(self, index: int) -> None:\n if index == 0:\n node = self.head\n self.head = node.nextNode\n else:\n cur = self.head\n i=0\n prevNode = None\n while cur is not None:\n if i==index-1:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
according the node.f to move the node to the node.f indexNode, if there is no node.f indexNode then addindexnode(f)
def forward(self, node): f = node.f if not self.indexmap.has_key(f): self.addindexnode(f) indexnode = self.indexmap[f] indexnode.cnt += 1 if not indexnode.head and not indexnode.tail: indexnode.head = node indexnode.tail = node ...
[ "def adjustIndexNode(self,f):\r\n if not self.indexmap.has_key(f):\r\n return\r\n inode = self.indexmap[f]\r\n if inode.cnt == 0:\r\n self.removeindexnode(f)", "def addindexnode(self, f):\r\n node = indexNode(f)\r\n self.indexmap[f] = node\r\n\r\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate quadrature rule (points, weights) for given shape that will integrate an polynomial of order 'degree' exactly.
def create_quadrature(shape, degree, scheme="default"): # FIXME: KBO: Can this be handled more elegantly? # Handle point case if isinstance(shape, int) and shape == 0 or cellname2dim[shape] == 0: return ([()], array([1.0,])) if scheme == "default": if shape == "tetrahedron": ...
[ "def quadrilateral_scheme(degree):\n\n if degree == 0 or degree == 1:\n # Scheme from FIAT\n x = np.array([[1.0/2.0, 1.0/2.0]])\n w = np.array([1.0])\n elif degree == 2 or degree == 3:\n # Scheme from FIAT\n x = np.array([[0.2113248654051871, 0.2113248654051871]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get quadrature scheme from FIAT interface
def _fiat_scheme(shape, degree): # Number of points per axis for exact integration num_points_per_axis = (degree + 1 + 1) / 2 # Create and return FIAT quadrature rulet return fiat_create_quadrature(shape, num_points_per_axis)
[ "def _quadrilateral_scheme(degree):\n\n if degree == 0 or degree == 1:\n # Scheme from FIAT\n x = np.array([[1.0/2.0, 1.0/2.0]])\n w = np.array([1.0])\n elif degree == 2 or degree == 3:\n # Scheme from FIAT\n x = np.array([[0.2113248654051871, 0.2113248654051871]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a quadrature scheme on a triangle of specified order. Falls back on canonical rule for higher orders.
def _triangle_scheme(degree): if degree == 0 or degree == 1: # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 x = array([ [1.0/3.0, 1.0/3.0] ]) w = array([0.5]) elif degree == 2: # Scheme from Strang and Fix, 3 points, degree of precision 2 x = array(...
[ "def create_quadrature(shape, degree, scheme=\"default\"):\n\n # FIXME: KBO: Can this be handled more elegantly?\n # Handle point case\n if isinstance(shape, int) and shape == 0 or cellname2dim[shape] == 0:\n return ([()], array([1.0,]))\n\n if scheme == \"default\":\n if shape == \"tetrah...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a quadrature scheme on a tetrahedron of specified degree. Falls back on canonical rule for higher orders
def _tetrahedron_scheme(degree): if degree == 0 or degree == 1: # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 x = array([ [1.0/4.0, 1.0/4.0, 1.0/4.0] ]) w = array([1.0/6.0]) elif degree == 2: # Scheme from Zienkiewicz and Taylor, 4 points, degree of precis...
[ "def tetrahedron_scheme(degree):\n\n if degree == 0 or degree == 1:\n # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1\n x = np.array([[1.0/4.0, 1.0/4.0, 1.0/4.0]])\n w = np.array([1.0/6.0])\n elif degree == 2:\n # Scheme from Zienkiewicz and Taylor, 4 points, d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a single client connection.
def handle_client(self, client, addr): ip, port = addr port = str(port) while True: try: msg = client.recv(1024).decode() except: return if msg == "connect": # initial message for when a client attempts to connec...
[ "def handle(self):\n #Send connection confirmation to client\n msg = \"CONNECTED {}\".format(self.client_address[0])\n self.queue.put(msg)\n while True:\n msg = self.queue.get(block=True)\n self.request.sendall(msg)", "def _handle_client_requests(self, client_obj,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
connects to cluster on local machine creates keyspace sparkifydb creates session on keyspace sparkifydb
def create_keyspace(): cluster = Cluster(['127.0.0.1']) session = cluster.connect() session.execute("""CREATE KEYSPACE IF NOT EXISTS sparkifydb WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor' : 1}""") session.set_keyspace('sparkifydb') return session, cluster
[ "def cassandra_connection():\n cluster = Cluster(['127.0.0.1'], port=9042)\n session = cluster.connect()\n session.execute(\"\"\"CREATE KEYSPACE IF NOT EXISTS songs WITH REPLICATION ={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\")\n session.set_keyspace('songs')\n return session, clus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load annotated labels from csv file.
def load_labels(csv_file): with open(csv_file, "r") as f: labels = f.read() labels = labels.split(",") labels = [int(label) for label in labels] return labels
[ "def read_labels(csv_file):\n image_names, tags = read_csv(csv_file)\n label_list = labels_unique()\n y = tags2onehot(tags, label_list)\n \n return y, label_list, image_names, tags", "def loadLabels(self, filename):\n file = \"/Users/Septien/Documents/Tesis/code/data/\" + filename + \".label...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Target delay in RNN or LSTM.
def delay(labels, target_delay): delay_labels = torch.zeros(target_delay) labels = torch.cat((delay_labels, labels)) return labels[: labels.shape[0] - target_delay]
[ "def delay_time(self) -> ConfigNodePropertyInteger:\n return self._delay_time", "def delay(source: Observable) -> Observable:\n return observable_delay_timespan(source, duetime, scheduler)", "def target_duration(self) -> float:\n return self._target_duration", "def get_max_delay():\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store minimal output in an HDF5 file (output unique to OUTCAR)
def to_hdf_minimal(self, hdf, group_name="outcar"): with hdf.open(group_name) as hdf5_output: for k, v in self.to_dict_minimal().items(): hdf5_output[k] = v
[ "def writeHD5():\n global Data1\n\n store = HDFStore('.\\store.h5')\n store['listCrisis'] = Data1\n store.close()", "def open_halo_output(self):\n \n \n try:\n self.halo_output_file = h5.File(self.HDF_output_filepath, \"w\")\n\n except OSError:\n for obj in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the forces and positions for every ionic step from the OUTCAR file
def get_positions_and_forces(self, filename="OUTCAR", lines=None, n_atoms=None): if n_atoms is None: n_atoms = self.get_number_of_atoms(filename=filename, lines=lines) trigger_indices, lines = _get_trigger( lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)" ...
[ "def parse_OUTCAR():\n for index, fullpath in enumerate(Path.cwd().rglob(\"OUTCAR\")):\n specie = fullpath.parts[-2]\n \n with open(fullpath, \"r\") as fo:\n for line in fo:\n if line.startswith(\" free energy TOTEN\"):\n toten = re.search(pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the forces for every ionic step from the OUTCAR file
def get_forces(self, filename="OUTCAR", lines=None, n_atoms=None): if n_atoms is None: n_atoms = self.get_number_of_atoms(filename=filename, lines=lines) trigger_indices, lines = _get_trigger( lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)" ) retu...
[ "def parse_OUTCAR():\n for index, fullpath in enumerate(Path.cwd().rglob(\"OUTCAR\")):\n specie = fullpath.parts[-2]\n \n with open(fullpath, \"r\") as fo:\n for line in fo:\n if line.startswith(\" free energy TOTEN\"):\n toten = re.search(pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to extract the irreducible kpoints from the OUTCAR file
def get_irreducible_kpoints( filename="OUTCAR", reciprocal=True, weight=True, planewaves=True, lines=None ): kpoint_lst = [] weight_lst = [] planewaves_lst = [] trigger_number_str = "Subroutine IBZKPT returns following result:" trigger_plane_waves_str = "k-point 1 :"...
[ "def read_kpts_info(self, kpt=0, spin=0, mode='eigenvalues'):\n # output may look like this (or without occupation entries); 8 entries per line:\n #\n # Eigenvalues (hartree) for nkpt= 20 k points:\n # kpt# 1, nband= 3, wtk= 0.01563, kpt= 0.0625 0.0625 0.0625 (reduced coord)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the total energy for every ionic step from the OUTCAR file
def get_total_energies(filename="OUTCAR", lines=None): def get_total_energies_from_line(line): return float(_clean_line(line.strip()).split()[-2]) trigger_indices, lines = _get_trigger( lines=lines, filename=filename, trigger="FREE ENERGIE OF THE ION-ELE...
[ "def extract_final_electronic_energy(filename):\n\n for line in reversed(open(filename, 'r').readlines()):\n if 'FINAL SINGLE POINT ENERGY' in line:\n return Constants.ha_to_j * Constants.n_a * float(line.split()[4])\n\n raise Exception('Final electronic energy not found')", "def parse_OUT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getting the Fermilevel (Kohn_Sham) from the OUTCAR file
def get_fermi_level(filename="OUTCAR", lines=None): trigger = "E-fermi :" trigger_indices, lines = _get_trigger( lines=lines, filename=filename, trigger=trigger ) if len(trigger_indices) != 0: try: return float(lines[trigger_indices[-1]].split(trig...
[ "def read_fermi(self):\n E_f=None\n filename = self.label + '.txt'\n text = open(filename).read().lower()\n assert 'error' not in text\n for line in iter(text.split('\\n')):\n if line.rfind('fermi (or homo) energy (hartree) =') > -1:\n E_f = float(line.sp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the individual components of the free energy energy for every electronic step from the OUTCAR file alpha Z PSCENC = 0.19957337 Ewald energy TEWEN = 73.03212173 Hartree energ DENC = 0.10933240 exchange EXHF = 0.00000000 V(xc)+E(xc) XCENC = 26.17018410 PAW double counting = 168.82497547 136.88269783 entropy TS EENTR...
def get_energy_components(filename="OUTCAR", lines=None): ind_ionic_lst, lines = _get_trigger( trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)", filename=filename, lines=lines, return_lines=True, ) ind_elec_lst = _get_trigger( tri...
[ "def extract_final_electronic_energy(filename):\n\n for line in reversed(open(filename, 'r').readlines()):\n if 'FINAL SINGLE POINT ENERGY' in line:\n return Constants.ha_to_j * Constants.n_a * float(line.split()[4])\n\n raise Exception('Final electronic energy not found')", "def read_free...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the lines where a specific trigger appears.
def _get_trigger(trigger, filename=None, lines=None, return_lines=True): lines = _get_lines_from_file(filename=filename, lines=lines) trigger_indicies = [i for i, line in enumerate(lines) if trigger in line.strip()] if return_lines: return trigger_indicies, lines else: return trigger_ind...
[ "def get_trigger_set_line_up(self):\n ## WATCH AND SEE THAT WE COULD CACHE HERE AND GAIN A LOT OF SPEED during construction\n ## if self.__dict__.has_key(\"NONSENSE\"): \n ## self.NONSENSE += 1\n ## print \"## called\", self.NONSENSE\n ## else:\n ## self.NONSENSE =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If lines is None read the lines from the file with the filename filename.
def _get_lines_from_file(filename, lines=None): if lines is None: with open(filename, "r") as f: lines = f.readlines() return lines
[ "def read_file(filename):\n\n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n \n return lines", "def read_file_lines(file_name):\n reading_file = io.open(file_name, 'r', encoding='utf8')\n\n lines = reading_file.readlines()\n reading_file.close()\n return lines",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch VI and allow editing the configuration file. VI is the default text editor in Unix systems.
def editConfig(): subprocess.call(['vi', CONFIG_FILE_NAME])
[ "def edit(inventory):\n EDITOR = os.environ.get('EDITOR', 'vim')\n editor = shlex.split(EDITOR)\n editor.append(os.path.join(inventory_path, inventory))\n\n from subprocess import call\n\n try:\n call(editor)\n except OSError:\n raise Exception(\"Failed to open editor (%s): %s\" % (E...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load image from a given path and pad it on the sides, so that eash side is divisible by 32 (newtwork requirement)
def load_image(path, pad=True): img = path if not pad: return img height, width, _ = img.shape if height % 32 == 0: y_min_pad = 0 y_max_pad = 0 else: y_pad = 32 - height % 32 y_min_pad = int(y_pad / 2) y_max_pad = y_pad - y_min_pad if width % 3...
[ "def padding_image_square(image, padd_value=(0,0,0)):\r\n width, height = image.size\r\n long_edge_size = width if width >= height else height\r\n\r\n img_padd = Image.new('RGB', (long_edge_size, long_edge_size), padd_value)\r\n if width > height:\r\n h_st = int((long_edge_size - height)/2)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates sidebar for both new and old reddit.
def update_sidebar(): if not UPDATE_SIDEBAR: return print(f"{os.path.basename(__file__)}: Updating sidebar @ {datetime.now().strftime('%H:%M')}") standings = get_standings() # Old Reddit old_reddit_sidebar = reddit.subreddit(TARGET_SUB).wiki['config/sidebar'].content_md record_regex...
[ "def sidebar(self, subreddit, text, section):\n sub = self.get(\n 'http://www.reddit.com/r/{}/wiki/config/sidebar.json'.format(subreddit))['data']\n regex = r'''{}.*?{}'''.format(re.escape(section['start']), re.escape(section['stop']))\n text = section['start'] + text + section['stop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces destination ip addresses to helper ip address
def _replace_dst_ip_with_helper_ip_address(path=None): if path is None: path = ['10.0.0.3', '00:00:00:00:00:00:00:1b', '00:00:00:00:00:00:00:09', '00:00:00:00:00:00:00:11', '00:00:00:00:00:00:00:0a', '00:00:00:00:00:00:00:13', '00:00:00:00:00:00:00:0b', '00:00:00:00:00:00:00:...
[ "def reverse_IP(self):\n pass", "def test_ip_addresses_update(self):\n pass", "def test_replace(self):\n\n intf_pfxs = [[], [], [], []]\n\n # add prefixes to each of the interfaces\n for i in range(len(self.pg_interfaces)):\n intf = self.pg_interfaces[i]\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor Modbus server constructor.
def __init__(self, host='localhost', port=const.MODBUS_PORT, no_block=False, ipv6=False, register_width=16): # public self.host = host self.port = port self.no_block = no_block self.ipv6 = ipv6 self.register_width = register_width # private self._running =...
[ "def __init__(self, host='', port=4329):\n\n # create the SBP server\n self.conn = Server(host, port)\n\n # read the ID\n self.id = self.conn.receive()\n\n # CP2K should be ready now\n self.get_ready()", "def __init__(self):\n # Create a TCP/IP socket\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the server. Do nothing if server is already running. This function will block if no_block is not set to True.
def start(self): if not self.is_run: # set class attribute ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET ThreadingTCPServer.daemon_threads = True # init server self._service = ThreadingTCPServer((self.host, self.p...
[ "def start(self, blocking=True):\n self.setup_zmq()\n if blocking:\n self.serve()\n else:\n eventlet.spawn(self.serve)\n # ensure that self.serve runs now as calling code will\n # expect start() to have started the server even non-blk\n eve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing Policy resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Policy': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = PolicyArgs.__new__(PolicyArgs) __props__.__dict__["arn"] = None __...
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n desired_state: Optional[pulumi.Input[str]] = None,\n properties: Optional[pulumi.Input[str]] = None,\n role_arn: Optional[pulumi.Input[str]] = None,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the capabilities payload response
def __init__(self, conf=deepcopy(default_conf)): self.conf = LurkConf(conf) self.struct = LURKCapabilitiesResponsePayload self.struct_name = 'LURKCapabilitiesResponsePayload'
[ "def capabilities(self):\n logger.info(f'Collecting Capabilities...')\n\n try:\n gnmi_message_request = CapabilityRequest()\n\n if self.__debug:\n print(\"gNMI request:\\n------------------------------------------------\")\n print(gnmi_message_reques...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Swaps the first two elements of the tuple.
def swap(arg): if len(arg) < 2: raise IndexError("swap() tuple too short") def index(x): return 1 - x if x < 2 else x return tuple(arg[index(n)] for n in range(len(arg)))
[ "def _swap(list_, a, b):\n list_[a], list_[b] = list_[b], list_[a]", "def reverse_tuple(a):\n pass", "def swap(nums: List[int], i: int, j: int) -> None:\n nums[i], nums[j] = nums[j], nums[i]", "def _swap(array, a, b):\n\n if len(array) and a != b:\n tmp = array[a]\n array[a] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the first falsy value among the arguments, or the final argument if all values are truthy. If no arguments are provided, True is returned.
def and_(*xs): final = True for x in xs: if not x: return x final = x return final
[ "def _first_true(iterable, default=False, pred=None):\n # first_true([a,b,c], x) --> a or b or c or x\n # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x\n return next(filter(pred, iterable), default)", "def get_optional_boolean_arg(args: Dict, argument_name: str) -> Optional[bool]:\n argu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the AND, OR and NOT operators
def parse_query( query ) -> dict: token_value = dict() # If not operators defined in the query if (not (QUERY_SYMBOLS.OR in query)) and (not (QUERY_SYMBOLS.AND in query) and (not (QUERY_SYMBOLS.NOT in query))): print("No operator in usage") tokens = [ pipeline_input(token...
[ "def parsequery(q, implicitOr=False):\n\n if implicitOr:\n return orParser.parse(q)\n else:\n # implicit AND\n return andParser.parse(q)", "def logical_and(self):\n\n expr = self.equality()\n while self.match(TokenType.AND):\n operator = self.previous()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively validates a payload of a message.
def validate_payload(payload: Any, payload_specification: Dict, full_specification: Dict) -> True: if "type" not in payload_specification: # no type in specification -> look for $ref or oneOf if "$ref" in payload_specification: payload_specification = dereference(pa...
[ "def validate_payload(self, payload):\n for p in self.required:\n if p not in payload:\n message = \"Incoming payload is missing the required field, %s.\" % (\n p)\n raise RuntimeError(message)\n return True", "def _recursive_validity_check...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to reset resources during unit testing of the server
def _reset_resources(self): UnitTestServer._resources = ResourcePool()
[ "def tearDown(self):\n self.mock_server.shutdown()", "def tearDown(self):\n del self.portal\n del self.credFactory\n del self.protectedResource", "def ResetResources(self):\n self.logger.info('Resetting all resource knowledge')\n for n in self.nodes.values():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper that checks for a valid authorization token. If this is a namespace rule test request, the authorization token will be ignored and True will always be returned.
def _authorization_valid(self, auth_token: str, dyn_objects: list) -> bool: if NAMESPACE_RULE_RESOURCE not in dyn_objects: if auth_token is not None and auth_token == VALID_UNIT_TEST_TOKEN: return True return False return True
[ "def check_authorization(self):\n self.token", "def check_token(self, token=None):\r\n if token is None:\r\n token = self.token\r\n resp, resp_body = self.method_head(\"tokens/%s\" % token, admin=True)\r\n if resp.status_code in (401, 403):\r\n raise exc.Authoriza...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits an endpoint string into a list of dynamic objects.
def _get_dyn_objects(self, endpoint) -> list: try: vals = endpoint.split('/') return vals[1:] except: raise UnknownRequest()
[ "def retrieveObjects(self, data_string) :\n\t\ttokens = data_string.split()\n\t\tdata_objects = []\n\n\t\tfor token in tokens:\n\t\t\tif token == '{': data_object = []\n\t\t\telif token == '}': data_objects.append(data_object)\n\t\t\telif token == ':': continue\n\t\t\telse: data_object.append(token)\n\t\treturn dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the number of features where goal_explained_variance is reached. Double checks to see if any remaining features
def set_n_feats_by_intrinsic_dimensionality(self, goal_explained_variance = 0.95, check_if_excluded_features_correlate_or_have_interactions = False): pass
[ "def n_features(self) -> int:\n n_deleted = len([f for f in self.features.values() if f.deleted])\n return len(self.features.keys()) - n_deleted", "def number_of_features(self):\n\n if self.feature_list is None: return 0\n return len(self.feature_list)", "def explained_variance(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write config_file with values from args if they are specified, otherwise use the defaults. If sections are specified, write values from args only to those sections, use the defaults on the remaining ones.
def write(config_file, args=None, sections=None): config = configparser.ConfigParser() for section in SECTIONS: config.add_section(section) for name, opts in SECTIONS[section].items(): if args and sections and section in sections and hasattr(args, name.replace('-', '_')): ...
[ "def write(config_file, args=None, sections=None):\n config = configparser.ConfigParser()\n for section in SECTIONS:\n config.add_section(section)\n for name, opts in SECTIONS[section].items():\n if args and sections and section in sections and hasattr(args, name.replace('-', '_')):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write in the hdf raw data file the content of config_file with values from args if they are specified, otherwise use the defaults. If sections are specified, write values from args only to those sections, use the defaults on the remaining ones.
def write_hdf(config_file, args=None, sections=None): if not args.dx_update: log.warning(" *** Not saving log data to the projection HDF file.") return with h5py.File(args.file_name,'r+') as hdf_file: #If the group we will write to already exists, remove it if hdf_file.get('/pro...
[ "def write(config_file, args=None, sections=None):\n config = configparser.ConfigParser()\n for section in SECTIONS:\n config.add_section(section)\n for name, opts in SECTIONS[section].items():\n if args and sections and section in sections and hasattr(args, name.replace('-', '_')):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log all values set in the args namespace. Arguments are grouped according to their section and logged alphabetically using the DEBUG log level thus verbose is required.
def log_values(args): args = args.__dict__ log.warning('tomopy-cli status start') for section, name in zip(SECTIONS, NICE_NAMES): entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section])) # print('log_values', section, name, entries) if entries: ...
[ "def log_args(args, level=INFO):\n for arg, value in sorted(vars(args).items()):\n logger.log(level, \"arg %s = %r\", arg, value)", "def add_args_to_log(all_args):\n log.info('input_file='+all_args.in_file)\n log.info('taxonomic_filter_level='+all_args.taxo_lvl)\n log.info('output_file_path='+a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute list of influence values for each combination of rule x data rules list of rule datas list of data
def databyrule2infs(self, rules, datas): data2infs = defaultdict(list) rule2infs = defaultdict(list) rule2datas = defaultdict(list) for idx, data in enumerate(datas): for rule in rules: filtered = rule.filter(data) infs = self.compute_infs(idx, filtered) rul...
[ "def _update_acc_by_rules(self, mut_influence: np.ndarray):\n for rule, coeff in self.rules.items():\n acc_delta = rule(self, mut_influence) # can't call self.rule\n self._update_acc(acc_delta, coeff)", "def apply_each_rule_till_no_change(rules, x, max_iterations, debug=False):\n y ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a SurfDaq from a bunch of calibration files with calFilePrefix (followed by lab number.npz).
def __init__(self, calFilePrefix="proto_cal"): self.dev = surf_board.do() self.pedestals=np.zeros((12, lab_length)) self.dev.labc.reset_fifo() self.dev.labc.testpattern_mode(0) self.dev.labc.readout_testpattern_mode(0) self.dev.set_vped(2500) if calFilePrefix is ...
[ "def import_das1_sip(filename, **kwargs):\n\n if 'corr_array' in kwargs:\n corr_array = kwargs.get('corr_array')\n else:\n corr_array = [0, 0, 0, 0]\n\n d_start, d_end = 0, 0\n # deduce the data block here\n # look for lines with #data_start/#data_end\n\n with open(filename, 'r') as ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the number of events for every trigger.
def eventsPerTrigger(self, per): if per < 1 or per > 3: print "must be between 1 and 3" return self.dev.labc.repeat_count(per-1)
[ "def set_trigger_count(self, count):\n self.count = count", "def update_event_numbers(self):\r\n for key, event in list(self.events.items()):\r\n event.set_event_number(key)", "def setup(self, aggregator, count, evCfg, db):\n self.aggregator = aggregator\n self.events = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables (if enable=True) or disables (if enable=False) the external trigger
def enableExtTrigger(self, enable): self.dev.extTrig(enable)
[ "def _enable_ext_trigger_in(self):\n self._log(\"Enable 'TRIG IN' external trigger.\")\n if not self._ext_trig_enabled:\n self._write_ppk_cmd([RTTCommand.EXT_TRIG_IN_TOGGLE])\n self._ext_trig_enabled = True", "def ultrasonic_enable(self, enable):\n self.comm('ultrasonic_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dataset of force triggers, returned raw.
def getRawForceTriggerData(self,count=1000): dat = self.dev.dma_events(count, 1024, force_trig=True) print "Acquisition complete." return surf_dataset.buildDataset(dat, count)
[ "def getTriggerField(self) -> \"SoField *\":\n return _coin.SoDataSensor_getTriggerField(self)", "def getStrippedForceTriggerData(self,count=10000):\n dataset = self.getRawForceTriggerData(count)\n return self.processStripHeaders(dataset)", "def get_triggers(self):\n\n trigger_axis =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dataset of force triggers, headers stripped.
def getStrippedForceTriggerData(self,count=1000): dataset = self.getRawForceTriggerData(count) print "Stripping headers." return self.processStripHeaders(dataset)
[ "def getStrippedForceTriggerData(self,count=10000):\n dataset = self.getRawForceTriggerData(count)\n return self.processStripHeaders(dataset)", "def getRawForceTriggerData(self,count=1000):\n dat = self.dev.dma_events(count, 1024, force_trig=True)\n print \"Acquisition complete.\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dataset of force triggers, pedestal subtracted.
def getSubtractedForceTriggerData(self,count=1000): dataset = self.getStrippedForceTriggerData(count) print "Subtracting pedestals." return self.processSubtractPedestals(dataset)
[ "def getSubtractedForceTriggerData(self,count=10000):\n dataset = self.getStrippedForceTriggerData(count)\n return self.processSubtractPedestals(dataset)", "def getRawForceTriggerData(self,count=1000):\n dat = self.dev.dma_events(count, 1024, force_trig=True)\n print \"Acquisition comp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
strip headers from dataset
def processStripHeaders(self, dataset): headers = stripHeaders(dataset['data']) # This is now an int16 (so it can be pedestal subtracted) dataset['data'] = dataset['data'].view('int16') dataset['headers'] = headers return dataset
[ "def dropColumnsByHeader(dataset, headerNameToRemove):\n\theaders = dataset.header;\n\tdata = dataset.data;\n\tindexesToRemove = list();\n\tfor toRemove in headerNameToRemove:\n\t\tindexesToRemove.extend(np.where(headers == toRemove)[0]);\n\tdata = np.delete(data, indexesToRemove, 1);\n\tdata = np.asarray(data, dty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
subtract pedestals from dataset
def processSubtractPedestals(self, dataset): count = dataset['count'] # keep track of where we are eventNumber=0 # find out what's the first buffer we have. startBuffer = (dataset['headers'][0] & 0xC000) >> 14 # special-case less than 4 events if count < 4: ...
[ "def getSubtractedForceTriggerData(self,count=1000):\n dataset = self.getStrippedForceTriggerData(count)\n print \"Subtracting pedestals.\"\n return self.processSubtractPedestals(dataset)", "def getSubtractedForceTriggerData(self,count=10000):\n dataset = self.getStrippedForceTriggerDa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
reorder windows in dataset
def processOrderWindows(self, dataset): count = dataset['count'] # get headers, recast into an event array headers = np.reshape(dataset['headers'], (count,8)) # get data, recast into an event array data = np.reshape(dataset['data'],(count,1024)) # create empty wi...
[ "def make_time_windows(dataset, w):\n num_participants, full_length, _ = np.shape(dataset)\n time_windows = []\n\n for i in list(range(num_participants)): # i = participant's position in dataset\n\n for j in list(range(full_length-w+1)): # j = row number of first row in window\n time_wind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dataset of force triggers, headers stripped.
def getStrippedForceTriggerData(self,count=10000): dataset = self.getRawForceTriggerData(count) return self.processStripHeaders(dataset)
[ "def getStrippedForceTriggerData(self,count=1000):\n dataset = self.getRawForceTriggerData(count)\n print \"Stripping headers.\"\n return self.processStripHeaders(dataset)", "def getRawForceTriggerData(self,count=1000):\n dat = self.dev.dma_events(count, 1024, force_trig=True)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dataset of force triggers, pedestal subtracted.
def getSubtractedForceTriggerData(self,count=10000): dataset = self.getStrippedForceTriggerData(count) return self.processSubtractPedestals(dataset)
[ "def getSubtractedForceTriggerData(self,count=1000):\n dataset = self.getStrippedForceTriggerData(count)\n print \"Subtracting pedestals.\"\n return self.processSubtractPedestals(dataset)", "def getRawForceTriggerData(self,count=1000):\n dat = self.dev.dma_events(count, 1024, force_tri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convenience function for reshaping datasets into events
def datasetEvents(self, dataset, eventSize=1024): return np.reshape(dataset['data'], (dataset['count'], eventSize))
[ "def reshape_events(self, debug=False):\n # The events file #\n path = self.input.paths.events\n # Optionally make a copy #\n if debug: path.copy(path.prefix_path + '_wide.csv')\n # Load it as a dataframe #\n wide = pandas.read_csv(str(path))\n # Reshape it #\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to update trims on a LabDaq.
def update_trims_one(daq, lab, trims, fb=None): daq.stopAcq() for i in xrange(128): daq.dev.labc.l4reg(lab,256+i,trims[i]) if fb is not None: daq.dev.labc.l4reg(lab,11, fb) daq.startAcq()
[ "def update_all_trims(daq, trims, fbs=None):\n daq.stopAcq()\n for lab in xrange(12):\n for cell in xrange(128):\n daq.dev.labc.l4reg(lab,256+cell, trims[lab][cell])\n if fbs is not None:\n for lab in xrange(12):\n print \"set vtrimfb %d to %d\" % (lab, fbs[lab])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to update all trims on a SurfDaq.
def update_all_trims(daq, trims, fbs=None): daq.stopAcq() for lab in xrange(12): for cell in xrange(128): daq.dev.labc.l4reg(lab,256+cell, trims[lab][cell]) if fbs is not None: for lab in xrange(12): print "set vtrimfb %d to %d" % (lab, fbs[lab]) daq.dev.l...
[ "def update_trims_one(daq, lab, trims, fb=None):\n daq.stopAcq()\n for i in xrange(128):\n daq.dev.labc.l4reg(lab,256+i,trims[i])\n if fb is not None:\n daq.dev.labc.l4reg(lab,11, fb)\n daq.startAcq()", "def run_update(self, **kw):\n\n for task in self.preprocessed_task_list:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleanup trim DAC pass. This function cleans up any outlier samples in the feedback tuning. This is usually just the slow sample. "Outliers" are times which are further than ~2 standard deviations or 10 picoseconds, whichever is larger. Note that the feedback sample is allowed to be a bit larger (3 standard deviations) ...
def tuneOutliers(daq, iterations, trims, fb, samples=10000, frequency=235.e6): lab = daq.lab it = 0 outliers=128 while it < iterations and outliers > 0: times = daq.getTimes(samples) stdev = np.std(times[0:127]) print "pass %d stdev %f" % (it, stdev) if stdev < 5: ...
[ "def remove_outliers():\n \n return", "def cleanSample(sample, maxReading, padLength):\n\treturn resample(taper(sample, maxReading), padLength)", "def _trim_adapters(self, sample):\n message = '{}: Trimming adapters'.format(sample.basename)\n command = 'cutadapt -q 20 -m 10 -j 18 -b file:{0}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries the Imgur API with GET requests for images, given a valid Imgur url. This method is called to extract image urls from albums/galleries of images.
def get_imgur(url): img_urls = [] imgur_hash = url.strip('/').split('/')[-1].split('?')[0] # Unpredictable API endpoint api_album = f'https://api.imgur.com/3/album/{imgur_hash}/images' api_gallery = f'https://api.imgur.com/3/gallery/album/{imgur_hash}' api_kwargs...
[ "def scraper(url : str) -> list:\n album_id = url.split('/')[-1]\n image_links = []\n raw_response = get_link_list(album_id)\n if raw_response[0] != 200:\n raise thereIsSomeProblemException\n for i in raw_response[1]['data']:\n image_links.append(i['link'])\n return image_links", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a wallpaper subreddit scraper instance with feed options. Stores wallpaper urls in a 'wallpaper' instance attribute'. Threaded extraction of urls allows for immediate access of wallpaper attribute while full extraction funs in the background.
def __init__(self, sort='top', time_filter='week', limit=1000): self.wallpapers = queue.Queue() self.done = False self.end = False with Reddit(**self.reddit_oath) as R: r_wallpapers = R.subreddit('wallpapers') # sort_options = {'controversial...
[ "def _get_wallpaper_url(self, r_submission):\n if isinstance(r_submission, Submission):\n if utils.is_img_ext(r_submission.url):\n self.wallpapers.put((r_submission.url, r_submission.thumbnail))\n elif 'imgur' in r_submission.url[:15]: # No need to search entire string\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract wallpaper url's from reddit submissions/posts. Appends to the instance's 'wallpaper' attribute.
def _get_wallpaper_url(self, r_submission): if isinstance(r_submission, Submission): if utils.is_img_ext(r_submission.url): self.wallpapers.put((r_submission.url, r_submission.thumbnail)) elif 'imgur' in r_submission.url[:15]: # No need to search entire string ...
[ "def _get_all_wallpaper_url(self, r_wallpapers):\n for r_submission in r_wallpapers:\n if not self.end:\n self._get_wallpaper_url(r_submission)\n else:\n raise StopIteration\n self.done = True", "def permalink_film(activity):\n if activity.film_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a fully looped extraction of the r_wallpaper generator, calling helper function self._get_wallpaper_url
def _get_all_wallpaper_url(self, r_wallpapers): for r_submission in r_wallpapers: if not self.end: self._get_wallpaper_url(r_submission) else: raise StopIteration self.done = True
[ "def auto_wallpaper(loop):\n # while True:\n _change_wallpaper()\n loop.call_later(1800, auto_wallpaper, loop)", "def _get_wallpaper_url(self, r_submission):\n if isinstance(r_submission, Submission):\n if utils.is_img_ext(r_submission.url):\n self.wallpapers.put((r_submi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms a column into new columns based on the unique value in the column. Al unique value show up as new columns in the data. This transformation is helpful for calculating meaningful similarity cofficient. Also see encode_feature
def transform_feature(store, featureName): unique_values = list(pd.unique(store[featureName])) unique_mappings = dict(enumerate(unique_values)) inv_unique_mappings = {v: k for k, v in unique_mappings.items()} features = [featureName + "_" + str(suffix) for suffix in list(unique_mappings.keys())] s...
[ "def _calculate_unique_in_columns(self):\n unique_in_col = pd.Series(np.empty(self._X.shape[1]), index=self._X.columns.values)\n for col in self._X.columns:\n unique_in_col[col] = self._X[col].unique()\n self._unique_in_columns = unique_in_col", "def transform_categorical(df):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Avoid calling duw to unnecessary copy of store object. Scales the features as per min max scaling.
def min_max_scaling(store, featureName): store[featureName] = (store[featureName] - store[featureName].min()) / (store[featureName].max() - store[featureName].min()) return store
[ "def feature_transform(self):\n if self.log_transform_flg:\n self.data['LOG10_'+self.feature_name] = np.log10(self.data[self.feature_name] - self.data[self.feature_name].min()+1)\n else:\n self.data['LOG10_'+self.feature_name] = self.data[self.feature_name] #not actually log tra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a controlfield node with given tag inside a record.
def get_controlfield(record, tag): path = "controlfield[@tag='" + tag + "']" try: controlfield = record.find(path).text except AttributeError: controlfield = np.nan return controlfield
[ "def field(self, tag):\n return self[self.index(tag)]", "def get_ds_field(record, tag, code, take_first=True):\n path = \"datafield[@tag='\" + tag + \"']\" + \"/subfield[@code='\" + code + \"']\"\n\n if take_first:\n try:\n ds_field = record.find(path).text\n except Attribute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a datafield/subfield node with given tag and code inside a record. If take_first is True, only first subfield with given code is returned.
def get_ds_field(record, tag, code, take_first=True): path = "datafield[@tag='" + tag + "']" + "/subfield[@code='" + code + "']" if take_first: try: ds_field = record.find(path).text except AttributeError: ds_field = np.nan return ds_field else: ds_...
[ "def get_subfield(datafield, code, take_first=True):\n path = \"subfield[@code='\" + code + \"']\"\n\n if take_first:\n try:\n subfield = datafield.find(path).text\n except AttributeError:\n subfield = np.nan\n\n return subfield\n\n else:\n subfield = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get content of subfield nodes with given code inside a datafield. If take_first is True, only first subfield with given code is returned.
def get_subfield(datafield, code, take_first=True): path = "subfield[@code='" + code + "']" if take_first: try: subfield = datafield.find(path).text except AttributeError: subfield = np.nan return subfield else: subfield = [] for element in ...
[ "def get_ds_field(record, tag, code, take_first=True):\n path = \"datafield[@tag='\" + tag + \"']\" + \"/subfield[@code='\" + code + \"']\"\n\n if take_first:\n try:\n ds_field = record.find(path).text\n except AttributeError:\n ds_field = np.nan\n\n return ds_field\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the training data from MongoDB. Returns two numpy arrays
def get_data(): mongo_data = training_data.find() tweets, labels = [], [] for entry in mongo_data: #print pc.remove_punct(pc.remove_links(pc.remove_identifiers(pc.clean_tweet(entry['content'])))) #TODO Remove tweets that have spaces between every word #TODO such as "للَّهَ وَمَلَائِكَتَهُ يُصَلُّون عَلَى النَ...
[ "def getTrainingData(self):", "def _convert_training_data_to_numpy(request):\n user_ratings_train = _process_data[\"user_ratings_train\"]\n als_movie_ids = _process_data[\"als_movie_ids\"]\n als_user_ids = _process_data[\"als_user_ids\"]\n movie_medians = _process_data[\"movie_medians\"]\n\n # find...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum the values of two numbers.
def sum(a, b): return a + b
[ "def sum(num1, num2):\n\treturn num1 + num2", "def suma_dos_numeros(a, b):\n resultado = a + b\n return resultado", "def sum_points(a, b):\n return a[0] + b[0], a[1] + b[1]", "def sumValues(a1,a2):\n x = np.add(a1,a2)\n print(x)\n return x", "def sum_ints(a: int, b: int) -> int:\n\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the report_options of this CreateReportSpecification.
def report_options(self, report_options): self._report_options = report_options
[ "def init_report_options(self):\n\n if self.category == CATEGORY_BOOK: # a Book Report has no \"menu\"\n for key in self.option_class.options_dict:\n self.options_dict[key] = self.option_class.options_dict[key]\n self.options_help[key] = \\\n self.o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the report_type of this CreateReportSpecification.
def report_type(self, report_type): if report_type is None: raise ValueError("Invalid value for `report_type`, must not be `None`") # noqa: E501 self._report_type = report_type
[ "def SetReportType(self, *args):\n return _wiimote.wiimote_SetReportType(self, *args)", "def define_report_type(report_type):\n\n def wrap(func):\n setattr(func, '_report_type', report_type)\n return func\n\n return wrap", "def setDriver_type(self, driver_type):\n\n if driver_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the data_start_time of this CreateReportSpecification.
def data_start_time(self, data_start_time): self._data_start_time = data_start_time
[ "def start_time(self, start_time: str):\n\n self._start_time = start_time", "def setStartTime(self, startTime):\n self.startTime = startTime", "def start_time(self, start_time):\n self.__start = start_time", "def start_datetime(self, start_datetime: datetime):\n\n self._start_datet...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the data_end_time of this CreateReportSpecification.
def data_end_time(self, data_end_time): self._data_end_time = data_end_time
[ "def setEndTime(self, endTime):\n self.endTime = endTime", "def end_dts(self, end_dts):\n\n self._end_dts = end_dts", "def end_datetime(self, end_datetime: datetime):\n\n self._end_datetime = end_datetime", "def max_end_date(self, max_end_date):\n\n self._max_end_date = max_end_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the marketplace_ids of this CreateReportSpecification.
def marketplace_ids(self, marketplace_ids): if marketplace_ids is None: raise ValueError("Invalid value for `marketplace_ids`, must not be `None`") # noqa: E501 self._marketplace_ids = marketplace_ids
[ "def betting_market_types(self, betting_market_types):\n\n self._betting_market_types = betting_market_types", "def betting_period_types(self, betting_period_types):\n\n self._betting_period_types = betting_period_types", "def set_ids(self, ids):\n log.debug(\"Ids for the descriptions set: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that specifying an empty milestone name results in the milestone being correctly detected as nonexistent.
def test_new_milestone_empty_name(self): milestone = Milestone(self.env, '') self.assertEqual(False, milestone.exists) self.assertEqual(None, milestone.name) self.assertEqual(0, milestone.due) self.assertEqual(0, milestone.completed) self.assertEqual('', milestone.descrip...
[ "def test_new_milestone_empty_name(self):\n milestone = Milestone(self.env, '')\n self.assertEqual(False, milestone.exists)\n self.assertEqual(None, milestone.name)\n self.assertEqual(None, milestone.due)\n self.assertEqual(None, milestone.completed)\n self.assertEqual('', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and return the index of target in sequence xs Linear Search Algorithm
def search_linear(xs, target): for (i, v) in enumerate(xs): if v == target: # Is referred to as a probe. return i return -1
[ "def linearSearch(target, my_list):\n \n indexOfItem = sentinel\n \n for idx, item in enumerate(my_list):\n #print(str(idx) + \", \")\n if (item == target):\n indexOfItem = idx\n break\n \n return indexOfItem", "def indexOf(self, target):\n index = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Daemonize class. UNIX double fork mechanism.
def daemonize(self): try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError as err: sys.stderr.write('fork #1 failed: {0}\...
[ "def _ForkWatchdog(self, new_process_group, emu_args, emu_env, emu_wd,\n services_dir):\n assert os.path.exists(emu_wd)\n assert os.path.exists(services_dir)\n assert os.path.exists(emu_args[0])\n\n fork_result = os.fork()\n if fork_result != 0:\n return fork_result\n else:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given two data vectors (x and y) and the max_parts and parts returned from calling cm, this function returns two arrays with scalars to draw the lines that separates clusters in x and y.
def get_cm_line_points(x, y, max_parts, parts): # get the ccc partitions that maximize the coefficient x_max_part = parts[0][max_parts[0]] x_unique_k = {} for k in np.unique(x_max_part): data = x[x_max_part == k] x_unique_k[k] = data.min(), data.max() x_unique_k = sorted(x_unique_k.i...
[ "def formLines(x,y):\r\n m = []\r\n c = []\r\n mpx = []\r\n mpy = []\r\n for i in range(len(x)):\r\n for j in range(i+1,len(y)):\r\n if (x[j]-x[i]) == 0:\r\n slope = 'inf'\r\n C = x[i]\r\n else:\r\n slope = (y[j]-y[i])/(x[j]-x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format the label for office into something we like for twitter.
def format_office_label(office, division_label): if office.body: if office.body.slug == "senate": return "the Senate in {}".format(division_label) else: if office.division.code == "00": return "the House seat in {} at-large district".format( ...
[ "def short_format_office_label(office, division_label):\n if office.body:\n if office.body.slug == \"senate\":\n return \"{} Senate\".format(division_label)\n else:\n if office.division.code == \"00\":\n return \"{} at-large district\".format(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format the label for office into something we like for twitter.
def short_format_office_label(office, division_label): if office.body: if office.body.slug == "senate": return "{} Senate".format(division_label) else: if office.division.code == "00": return "{} at-large district".format( possessive_state(...
[ "def format_office_label(office, division_label):\n if office.body:\n if office.body.slug == \"senate\":\n return \"the Senate in {}\".format(division_label)\n else:\n if office.division.code == \"00\":\n return \"the House seat in {} at-large district\".format(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the compile options to use, as derived from flag values.
def get_compile_options(): compile_options = None if FLAGS.jax_dump_hlo_graph is not None: compile_options = get_xla_client().CompileOptions() compile_options.generate_hlo_graph = FLAGS.jax_dump_hlo_graph if FLAGS.jax_hlo_profile: compile_options = compile_options or get_xla_client().CompileOptions() ...
[ "def GetPossibleBuildFlags(self):\n return self.possible_build_flags_", "def get_pip_cli_options(self):\n options = [\n \"--python-version\",\n self.python_version,\n \"--implementation\",\n self.implementation,\n ]\n for abi in self.abis:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configures and returns a handle to the XLA client.
def _get_xla_client(backend_name, platform_name, replica_count): global _platform_name xla_client.initialize_replica_count(replica_count) if backend_name == 'xla': if platform_name: xla_client.initialize_platform_name(platform_name) _platform_name = platform_name else: try: xla_c...
[ "def init_client(self, conf):\n if conf.password is not None:\n account = MyPlexAccount(conf.user, conf.password)\n return account.resource(conf.host).connect()\n else:\n return PlexServer(conf.host, conf.token)", "async def get_client(conf):\n from proxmoxer impo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert from a dtype to a canonical dtype based on FLAGS.jax_enable_x64.
def canonicalize_dtype(dtype): dtype = onp.dtype(dtype) if FLAGS.jax_enable_x64: return str(dtype) else: return str(_dtype_to_32bit_dtype.get(str(dtype), dtype))
[ "def _normalize_type(dtype: DTypeLike) -> \"numpy.dtype[Any]\":\n return numpy.dtype(dtype)", "def _to_dtype(numpy_dtype):\r\n if numpy_dtype == np.float32:\r\n return trt.tensorrt.DataType.FLOAT\r\n elif numpy_dtype == np.int32:\r\n return trt.tensorrt.DataType.INT32\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an xla_client.Shape, return a new instance with canonical dtypes.
def canonicalize_shape(shape): if shape.is_tuple(): return Shape.tuple_shape(tuple( canonicalize_shape(s) for s in shape.tuple_shapes())) else: return Shape.array_shape( canonicalize_dtype(shape.element_type()), shape.dimensions(), shape.minor_to_major())
[ "def to_ctypes(dshape):\n if len(dshape) == 1:\n if dshape == coretypes.int8:\n return ctypes.c_int8\n elif dshape == coretypes.int16:\n return ctypes.c_int16\n elif dshape == coretypes.int32:\n return ctypes.c_int32\n elif dshape == coretypes.int64:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate constant `py_val` to a constant for this ComputationBuilder.
def Constant(self, py_val): py_type = type(py_val) if py_type in _constant_handlers: return _constant_handlers[py_type](self, py_val) else: raise TypeError("No constant handler for type: {}".format(py_type))
[ "def __call__(self, value):\n from llvmlite.ir import Constant\n return Constant(self, value)", "def constant(self, val, ty):\n if isinstance(val, VexValue) and not isinstance(val, IRExpr):\n raise Exception(\"Constant cannot be made from VexValue or IRExpr\")\n rdt = self.i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constant handler for ndarray literals, handling zerosize strides. This function essentially calls c.NumpyArrayConstant(val) except it has
def _ndarray_constant_handler(c, val): # TODO(mattjj): revise this to use c.BroadcastInDim rather than Transpose if onp.any(onp.equal(0, val.strides)) and val.size > 0: zero_stride_axes, = onp.where(onp.equal(0, val.strides)) other_axes, = onp.where(onp.not_equal(0, val.strides)) collapsed_val = val[tup...
[ "def constant(const, axes=None, dtype=None):\n graph_label_type = \"<Const({})>\".format(const)\n val = AssignableTensorOp(axes=axes, constant=True, persistent=True, trainable=False,\n graph_label_type=graph_label_type, dtype=dtype)\n nptensor = np.asarray(const, dtype=val.dtype...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Time list to string
def time_to_string(time: list) -> str: assert len(time) == 3, "expect [hours, mins, secs]" time_string = '' for t in time: t_str = str(t) if len(t_str) == 1: t_str = '0' + t_str time_string += t_str + ':' return time_string[:-1]
[ "def format_split_times(split_times: List) -> str:\n return ', '.join([to_s(st['control_code']) + ': ' + to_s(st['time']) for st in split_times])", "def time_to_str(time: Timestamp) -> str:\n return time.strftime('%H:%M:%S')", "def time_str(self):\n return f'{datetime_to_str(self.time)}'", "def c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }