query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Lists the output modules. | def ListOutputModules(self):
self.PrintHeader(u'Output Modules')
manager = output_manager.OutputManager
for name, description in manager.GetOutputs():
self.PrintColumnValue(name, description, 10)
self.PrintSeparatorLine() | [
"def list_output_modules(self):\n try:\n return self._send_command(self._client.list_output_modules)\n except AttributeError:\n return ()\n except speechd.SSIPCommandError:\n return ()",
"def _list_modules(self):\n print(list(self.contents.keys()))",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get processes which are running on given path or sub path of given path. | def get_processes_by_path(path):
pinfos = []
for proc in psutil.process_iter():
pinfo = proc.as_dict(attrs=['pid', 'name', 'exe', 'cwd', 'open_files'])
using_paths = []
if pinfo['exe']:
using_paths.append(pinfo['exe'])
if pinfo['cwd']:
using_paths.append(... | [
"def findProcesses(self):\n procFinder = ChildProcs(self.process)\n procFinder()\n return procFinder[1:]",
"def find_processes(search_string):\n procs = []\n for p in psutil.process_iter():\n try:\n if search_string in ' '.join(p.cmdline()):\n procs.appe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the gnomAD vcf row corresponding to the given chrom, pos, ref, alt, and extracts the column values listed in NEEDED_GNOMAD_FIELDS | def get_gnomad_column_values(gnomad_f, chrom, pos, ref, alt):
if chrom == 'MT':
return GNOMAD_EMPTY_COLUMN_VALUES
counts['total_clinvar_variants'] += 1
# retrieve gnomAD variant - pysam.fetch(..) sometimes returns more than 1 vcf record, so need to filter here
position_found = False
gnoma... | [
"def read_HGVD(args, db):\n db[\"hgvd\"] = {}\n dbsnpfiles = [\"/\" + db[\"hgvd_freqfile\"]]\n for dbsnpfile in dbsnpfiles:\n with open(dbsnpfile, \"r\") as fin:\n for line in fin:\n allele = {}\n line_l = line.strip().split()\n chrom, pos, rs,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The BOSH URL for connections to the ejabberd chat server. | def chat_url(self):
return str(
'https://%s:80/http-bind/' % EJABBERD_CHAT_SERVER_IP_ADDRESS) | [
"def admin_url(self):\n return str('https://%s:5285/rest/' % EJABBERD_CHAT_SERVER_IP_ADDRESS)",
"def chat_bot_uri(self) -> str:\n return pulumi.get(self, \"chat_bot_uri\")",
"def server_url(self):\n pass",
"def bolt_uri(self):\n host, port = self.bolt_address\n return \"bolt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The mod_rest endpoint for the ejabberd chat server admin console. | def admin_url(self):
return str('https://%s:5285/rest/' % EJABBERD_CHAT_SERVER_IP_ADDRESS) | [
"def get():\n result = ctx.admin_api_srv()\n return _result_to_resource(result)",
"def main():\n\n # Set default logging level to debug\n logger.setLevel(logging.DEBUG)\n\n # Initialize paths\n setPaths(modulePath())\n\n # Parse command line options\n apipar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The name of the ejabberd chat server used for registering users. | def server_name(self):
return 'localhost' | [
"def room_server_name(self):\n return 'conference.localhost'",
"def nameserver(self):\n return self.pyre_nameserver",
"def name_server_set(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_server_set\")",
"def name_server_set(self) -> Optional[pulumi.Input[str]]:\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The name of the multiuser chat room server. | def room_server_name(self):
return 'conference.localhost' | [
"def server_name(self):\n return 'localhost'",
"def nameserver(self):\n return self.pyre_nameserver",
"def world_name(self):\n\n\t\treturn self.server_config.get('level-name')",
"def server_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_group_name\")",
"def s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of user_ids occupying a particular room. | def get_room_occupants(self, room_id):
response = self._make_post_request(
'get_room_occupants %s %s' % (room_id, self.room_server_name))
occupants = response.split('\n')
return [occupant[:occupant.find('@')] for occupant in occupants] | [
"def get_all_room_users(self) -> QueryType[User]:\n return self.users.all().order_by('membership__id')",
"def get_userlist(self, room):\n users = \"\"\n with Chat.lock:\n for user in room.users:\n users += \" * {}\".format(user)\n if user == self.name:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Import the Stookalert platform into a config entry. | async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
LOGGER.warning(
"Configuration of the Stookalert platform in YAML is deprecated and will be "
"removed in Home... | [
"async def async_forward_platform_entry_setups_to_ectoplasm(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n platform: Platform,\n) -> None:\n LOGGER.debug(\"Setting up Spook ectoplasm platform: %s\", platform)\n\n for module_file in Path(__file__).parent.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up Stookalert binary sensor from a config entry. | async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
client = hass.data[DOMAIN][entry.entry_id]
async_add_entities([StookalertBinarySensor(client, entry)], update_before_add=True) | [
"def __init__(self, tesla_device, controller, config_entry):\n super().__init__(tesla_device, controller, config_entry)\n self._target_temperature = None\n self._temperature = None",
"def load(self, config):\n sensor = SunSensor(self.mudpi, config)\n if sensor:\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method calculates the pdf and cdf and errors from a dataframe and L shell filtering. | def calc_cdf_pdf(self, df, L_lower, L_upper, bin_width=100):
self.bin_width = bin_width
self.filtered_catalog = df[(df.Lm_OPQ > L_lower) & (df.Lm_OPQ < L_upper)]
# Map to the magnetic equator
self.filtered_catalog.loc[:, 'd_equator'] = np.array([
self.map2equator(row.... | [
"def create_df(self):\n alldf= pd.DataFrame()\n pdffiles= glob.glob(self.input_lib+'/**/*.pdf', recursive=True)\n for pdf_file in pdffiles:\n pdf_page_count= self.count_pages(pdf_file)\n for pg in range(1,pdf_page_count+1):\n pg = str(pg)\n cm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the number of equatorial detections by n for n bins, calculate the PDF and CDF errors assuming Poisson statistics. The n array is normalized by an array of weights passed as w. | def _calc_errors(self, n, w, n_trials=10_000):
sqrt_n = np.sqrt(n) # Poisson error from the actual number of observations
# w*np.sqrt(n) term scales the error by the normalization.
pdf_std = w*sqrt_n/(self.bin_width*sum(n*w)) # Now normalize it to an actual PDF.
# Calculate the standard... | [
"def fNL_bispectrum_pdf(self, fNLlist, mean=-100, sigma=100):\n return np.array([self.pdf_fold(fNL, mean=mean, sigma=sigma) for fNL in fNLlist])",
"def calc_star_class_pdf_binned(flux_model,obj_catalog,flux_ratio_names,label):\n\n # Calculates the pdf for each obj in the obj_catalog by\n # using the ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the equatorial normalization file and rebin if the bin_width is not equal to the index difference. | def _load_norm(self, bin_width):
norm_dir = '/home/mike/research/ac6_microburst_scale_sizes/data/norm'
norm_name = 'equatorial_norm.csv'
norm_path = os.path.join(norm_dir, norm_name)
self.norm = pd.read_csv(norm_path, index_col=0)
sep_min = self.norm.index.min()
sep_max =... | [
"def normalize_bins(self):\n self.norm_bin = np.ones(self.nbins)\n for i in range(self.nbins):\n f = lambda z: self.raw_dndz_bin(z, i)\n\n norm = integrate.simps(f(np.linspace(self.z_min,self.z_max,1000)), x=np.linspace(self.z_min,self.z_max,1000))\n\n \n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to make the 1000 various adjustments to the subplots | def _label_and_adjust_subplots(self):
self.ax[0].legend()
self.ax[0].set_xlim(left=1, right=self.norm.index[-3])
self.ax[0].set_ylim(bottom=0)
self.ax[1].set_ylim(bottom=0)
self.ax[0].set_title('AC6 equatorial separation distribution of > 35 keV microbursts')
self.ax[0].s... | [
"def __set_subplots(self):\n self.logger.debug(\"running\")\n if len(self.__plot_names) < 1:\n return\n r = len(self.__plot_names)\n c = 1\n for i in range(0, r):\n self.__plots[self.__plot_names[i]] = [(r, c, i + 1), True]\n self.logger.debug(\"done\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maps to magnetic equator assuming d is soly in latitude. | def map2equator(self, lat, lon, alt, time, d):
# Define the coordinates of the two spacecraft.
dLat = self.deltaLat(d, alt)
X1 = {'x1':alt, 'x2':lat-dLat, 'x3':lon, 'dateTime':time}
X2 = {'x1':alt, 'x2':lat+dLat, 'x3':lon, 'dateTime':time}
# Run IRBEM
X1_equator = self.mo... | [
"def magnetic(self):\n return self.__magnetic",
"def magnetization(self):\n return np.abs(np.sum(self.system) / self.size ** 2)",
"def magnetic_field(self, xy, field=\"secondary\"):\n sig = self.sigma_hat # (n_freq, )\n f = self.frequency\n w = 2*np.pi*f\n k = np.sqrt(-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Manually modify the Ti profile to be more realistic CER measures C+6, the temperature of which can differ significantly from the main ion species in the edge. | def modify_ti(self, sol_points = None, max_psin = 1.1, decay_length = 0.015,
rad_loc_for_exp_decay = 1.0, reduce_ti = True, ti_min = 1, plotit = False):
tiexp = self.data['pedData']['fitVals']['tisplpsi']['y']
tiexppsi = self.data['pedData']['fitVals']['tisplpsi']['x']
ti_mod... | [
"def generate_idealized_temp_profile(SST, plevs, Tstrat=200):\r\n solution = sp.odeint(pseudoadiabat, SST, np.flip(plevs))\r\n temp = solution.reshape(-1)\r\n temp[np.where(temp<Tstrat)] = Tstrat\r\n return np.flip(temp) # need to re-invert the pressure axis\r",
"def photometric_Teff(apogee_cluster_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the prof files from OMFIT fits (saved as prof.txt files) and produce fits These were made using 'OMFIT_tools.py' (they're just text files with individual fitted profiles) Only plots if the profiles are remapped | def getProfsOMFIT(self, prof_folder, prof_filename_prefix, min_npsi = 100,
psiMax=1.05, plotit = False):
# Read in the prof*.txt files
if os.path.isfile(os.path.join(prof_folder,prof_filename_prefix + '_T_D.txt')):
profs = ['n_e', 'T_e', 'n_12C6', 'T_D']
... | [
"def fitProfiles(self, monitor, showProfiles=False,\n skipShots=1, plot=True):\n # Uncomment to plot beam profiles\n # showProfiles=True\n gitterFiles = glob.glob((self.monitorPath+monitor+'/*'))\n gitterFiles.sort()\n positionFits = []\n envelopeFits = [... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call b2plot to get the locations of each grid cell in psin space Saves the values to dictionaries in self.data['solpsData'] | def calcPsiVals(self, plotit = False):
from scipy import interpolate
"""
Find grid corners first:
0: lower left
1: lower right
2: upper left
3: upper right
Average location of cells 0 and 2 for middle of 'top' surface,
which is the top l... | [
"def plot_bvalue_maps_grid(dataset, plotspecs):\n grid_dims = plotspecs[\"grid_dims\"]\n for t in range(len(dataset.topologies)):\n names = dataset.top_names[t]\n for n in range(len(names)):\n # Plot whatever for a protein\n pairs = dataset.pairs[t][n]\n N = data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls b2plot to get the particle flux profiles | def getSOLPSfluxProfs(self, plotit = False):
# x variable is identical for all of these
x_fTot, fluxTot = sut.B2pl("fnay za m* 0 0 sumz sy m/ writ jxa f.y")
dummy, fluxConv = sut.B2pl("na za m* vlay m* 0 0 sumz sy m/ writ jxa f.y")
dummy, na = sut.B2pl("na 0 0 sumz writ jxa f.y")
... | [
"def plot(self): # coverage: ignore\n import matplotlib.pyplot as plt\n\n with quantity_support():\n plt.figure()\n plt.scatter(self.bias.to(u.V), self.current.to(u.mA), marker=\".\", color=\"k\")\n plt.title(\"Probe characteristic\")",
"def plot_wall_fluxes(self,cb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the upstream profiles from SOLPS compared to the experiment along with the corresponding updated transport coefficients | def plotXportCoef(self, ti_mod_used = True):
dnew_ratio = self.data['solpsData']['xportCoef']['dnew_ratio']
dnew_flux = self.data['solpsData']['xportCoef']['dnew_flux']
kenew_ratio = self.data['solpsData']['xportCoef']['kenew_ratio']
kenew_flux = self.data['solpsData']['xportCoef']['ken... | [
"def plot_profiles(self):\n # if 'xportCoef' not in self.data['solpsData']:\n # print('Transport coefficients not yet calculated!! Calculating them using defaults')\n # self.calcXportCoef(plotit = False,debug_plots = False)\n\n headroom = 1.04\n \n # Load SOLPS prof... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the upstream profiles from SOLPS compared to the experiment | def plot_profiles(self):
# if 'xportCoef' not in self.data['solpsData']:
# print('Transport coefficients not yet calculated!! Calculating them using defaults')
# self.calcXportCoef(plotit = False,debug_plots = False)
headroom = 1.04
# Load SOLPS profiles and tra... | [
"def psf_plot(initial_psf, current_psf, new_psf, lower, parms):\n shrink = 0.7\n ws, hs = 0.05, 0.05\n mn, mx = -0.15, 0.15\n f = pl.figure(figsize=(10, 10))\n pl.subplots_adjust(wspace=ws, hspace=hs)\n\n ax = pl.subplot(221)\n pl.imshow(initial_psf, interpolation='nearest', origin='lower',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs taskspecific attention reduction, using learned attention context vector (constant within task of interest). | def task_specific_attention(inputs, output_size,
initializer=layers.xavier_initializer(),
activation_fn=tf.tanh, scope=None):
assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None
with tf.variable_scope(scope or 'attention') as ... | [
"def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size,\n output_patch_size, output_patch_overlap, output_crop_margin, patch_num,\n num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk,\n mask_myelin_threshold, input_chunk_name, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], it can be used when the features of padding (on right hand side) are all zeros. | def retrieve_seq_length_op(data):
with tf.name_scope('GetLength'):
## TF 1.0 change reduction_indices to axis
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
length = tf.reduce_sum(used, 1)
## TF < 1.0
# used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
... | [
"def length(self, sequence):\n\n\t\tused = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))\n\t\tlength = tf.reduce_sum(used, reduction_indices=1)\n\t\tlength = tf.cast(length, tf.int32)\n\t\treturn length",
"def train_sequence_length(self) -> int:\n pass",
"def compute_sequence_lengths(inputs, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], it can be used when the features of padding (on right hand side) are all zeros. | def retrieve_seq_length_op2(data):
return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) | [
"def retrieve_seq_length_op(data):\n with tf.name_scope('GetLength'):\n ## TF 1.0 change reduction_indices to axis\n used = tf.sign(tf.reduce_max(tf.abs(data), 2))\n length = tf.reduce_sum(used, 1)\n ## TF < 1.0\n # used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of a single client for having the correct status and returning the correct objects. | def test_get_client(self):
response = client.get("/api/client/{}".format(str(self.clients["burning_man"].id)))
client_object = Client.objects.get(id=self.clients["burning_man"].id)
serializer = ClientSerializer(client_object)
self.assertEqual(response.data, serializer.data)
self... | [
"def test_get_all_clients(self):\n\n response = client.get(\"/api/client\")\n self.assertEqual(len(response.data), 3)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def get(self, args):\n\n if len(args) == 0:\n return {\"status\": \"no query provided\"}, 400... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of all clients for having the correct status and returning the correct number of objects. | def test_get_all_clients(self):
response = client.get("/api/client")
self.assertEqual(len(response.data), 3)
self.assertEqual(response.status_code, status.HTTP_200_OK) | [
"def count(cls, client) :\n try :\n obj = service()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n excep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of all events for having the correct status and returning the correct number of objects. | def test_get_all_events(self):
response = client.get("/api/event")
self.assertEqual(len(response.data), 2)
self.assertEqual(response.status_code, status.HTTP_200_OK) | [
"def test_07_api_can_get_all_events(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 2)",
"def test_10_api_can_get_all_free_events(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setting up test data for the customer endpoints. | def setUp(self):
self.customers = dict()
self.customers["james_bowen"] = Customer.objects.create(
name='James Bowen')
self.customers["amanda-arias"] = Customer.objects.create(
name='Amanda Arias')
self.customers["beau-jeppesen"] = Customer.objects.create(
... | [
"def setUp(self):\n User = get_user_model()\n user = User.objects.create_user(\n email='testUser@email.com',\n password='testpassword123',\n )\n self.customer = CustomerProfile.objects.create(\n user=user,\n first_name='Mohamed',\n l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of a single customer for having the correct status and returning the correct objects. | def test_get_customer(self):
# get API response
response = client.get("/api/customer/{}".format(str(self.customers["james_bowen"].id)))
# get data from db
customer_object = Customer.objects.get(id=self.customers["james_bowen"].id)
serializer = CustomerSerializer(customer_object)... | [
"def test_get_customer_single(self):\n user = User.objects.create_user(email='jacob@…', password='top_secret')\n customer = Customer.objects.create(user=user, store_linked=self.vendor)\n\n self.assertEqual(user.get_customer(), customer)",
"def test_list_customers(self):\n\n add_custome... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of all customers for having the correct status and returning the correct number of objects. | def test_get_all_customers(self):
response = client.get("/api/customer")
self.assertEqual(len(response.data), 3)
self.assertEqual(response.status_code, status.HTTP_200_OK) | [
"def test_list_customers(self):\n\n add_customer(**test_customer)\n\n inactive_customer = {'customer_id': 321,\n 'customer_name': 'Dwayne',\n 'customer_last_name': 'Johnson',\n 'customer_address': '321 Not-Fake Street'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of a single ticket for having the correct status and returning the correct objects. | def test_get_ticket(self):
response = client.get("/api/ticket/{}".format(
str(self.tickets["burning_man_2018_james_bowen"].id)
))
ticket_object = Ticket.objects.get(id=self.tickets["burning_man_2018_james_bowen"].id)
serializer = TicketSerializer(ticket_object)
self.... | [
"def test_get_ticket(self):\n res = self.client().get(\"/api/v1/events/tickets/1\")\n self.assertEqual(res.status_code, 200)",
"def get_ticket_status(request, ticket_id):\n if request.method == \"GET\":\n try:\n ticket = Ticket.objects.get(id=ticket_id)\n except Ticket.Do... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing the get of all tickets for having the correct status and returning the correct number of objects. | def test_get_all_tickets(self):
response = client.get("/api/ticket")
self.assertEqual(len(response.data), 4)
self.assertEqual(response.status_code, status.HTTP_200_OK) | [
"def test_view_all_tickets(app):\n for i in range(10):\n tick = Ticket(\n title=\"Ticket {}\".format(i),\n text=\"Text {}\".format(i),\n creator=\"creator{}@gmail.com\".format(i),\n assignee=\"assignee{}@gmail.com\".format(i),\n status=TicketStatus.Pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the embed and include include field functionality of the DynamicModelSerializer | def test_embed_and_include(self):
response = client.get("/api/ticket/{}?embed_fields=event&include_fields=customer"
.format(str(self.tickets["burning_man_2019_james_bowen"].id)))
event_object = Event.objects.get(id=self.events["burning_man_2019"].id)
event_serializ... | [
"def test_nested_dynamic_fields():\n widget = Widget(name=\"test widget\", quantity=11)\n serializer = NestedTestSerializer(\n instance={\n \"widget\": widget,\n \"owner_name\": \"test_name\",\n },\n fields=\"widget_created,widget_name,widget_quantity\",\n )\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize main cloud controller given a cloud Most times one is expected to access a controller from inside the | def __init__(self, cloud):
self.cloud = cloud
self._conn = None
# Initialize compute controller.
assert issubclass(self.ComputeController, BaseComputeController)
self.compute = self.ComputeController(self)
# Initialize DNS controller.
if self.DnsController is n... | [
"def init_controller(component_name, *args, **kwargs):\n return import_controllers(component_name)(*args, **kwargs)",
"def _do_startup(self, cloud):\n\n #self.logger.info(\"_do_startup(%s)\" % cloud)\n startCloud = self._init_cloud(cloud)\n m_type = self.config_parser.get(startCloud.cloud_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Preparse keyword arguments to `self.add` This is called by `self.add` when adding a new cloud, in order to apply preprocessing to the given params. Any subclass that requires any special preprocessing of the params passed to `self.add`, SHOULD override this method. | def _add__preparse_kwargs(self, kwargs):
return | [
"def pre_create(cls, **kwargs):\n return kwargs",
"def init_params(self, params):\n args = {k: v for k, v in params.items() if k != 'name'}\n logger.debug('init_args: {}'.format(args))\n self.init_args(**args)",
"def _parse_args(self):\n self._verify(self.args + list(self.kwar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Preparse keyword arguments to `self.update` This is called by `self.update` when updating a cloud and it is also indirectly called during `self.add`, in order to apply preprocessing to the given params. Any subclass that requires any special preprocessing of the params passed to `self.update`, SHOULD override this meth... | def _update__preparse_kwargs(self, kwargs):
return | [
"def prepair_params(self, prepair_params):\n\n self._prepair_params = prepair_params",
"def init_params(self, params):\n args = {k: v for k, v in params.items() if k != 'name'}\n logger.debug('init_args: {}'.format(args))\n self.init_args(**args)",
"def _parse_args(self):\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a Cloud. By default the corresponding mongodb document is not actually deleted, but rather marked as deleted. | def delete(self, expire=False):
self.cloud.deleted = datetime.datetime.utcnow()
self.cloud.save()
if expire:
# FIXME: Circular dependency.
from mist.api.machines.models import Machine
Machine.objects(cloud=self.cloud).delete()
self.cloud.delete() | [
"def delete(self,cn):\n try:\n collection.delete_one({\"_id\": str(cn)})\n except:\n print(\"Kan de data niet verwijderen\")",
"def delete(gvar):\n\n mandatory = ['-cn']\n required = []\n optional = ['-g', '-H', '-h', '-s', '-v',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cat's size is increased after eating | def test_CatSizeIncreaseShoud_increasesizeBy1(self):
name = "Cat Name"
fed = False
sleepy = False
size = 0
cat = Cat(name)
cat.eat()
self.assertEqual(size + 1, cat.size) | [
"def updateCatSize(self): \n self.size=(Window.size[0]*1/4,Window.size[1]*2/3)",
"def update_size(self):\n self.size = len(self.nodes)",
"def _UpdateLength(self):\n self._len = reduce(lambda x,y: len(y)+x,self.children,1)",
"def test_shrink_obj2cat():\n atom = ATOMClassifier(X10_str2, y10,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cat is fed after eating | def test_CatFedAfterEating(self):
name = "Cat Name"
cat = Cat(name)
cat.eat()
self.assertTrue(cat.fed) | [
"def feed(self, food):\n nutrition = food.nutrition\n print(f\"{self.name} ate it so fast it was hardly there\")\n if food is RiceBall:\n self.modify_hunger(nutrition-10)\n elif food is Medicine:\n self.modify_health(nutrition*(-1))\n else:\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cat cannot eat if already fed, raises an error | def test_CatCannotEatAfterFedShoud_raise_error(self):
name = "Cat Name"
cat = Cat(name)
cat.eat()
with self.assertRaises(Exception) as context:
cat.eat()
self.assertIsNotNone(context.exception) | [
"def test_CatIsNotSleepyAfterSleeping(self):\r\n name = \"Cat Name\"\r\n cat = Cat(name)\r\n cat.eat()\r\n cat.sleep()\r\n self.assertFalse(cat.sleepy)",
"def test_CatFedAfterEating(self):\r\n name = \"Cat Name\"\r\n cat = Cat(name)\r\n cat.eat()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cat is not sleepy after sleeping | def test_CatIsNotSleepyAfterSleeping(self):
name = "Cat Name"
cat = Cat(name)
cat.eat()
cat.sleep()
self.assertFalse(cat.sleepy) | [
"def _sleep(self):\n self.kill()",
"def sleep(self):\n # Put the creature to sleep\n self.is_sleeping = True\n self.tiredness -=3\n self.boredom -=2\n print(\"Zzzzzzzzzzzzzz........Zzzzzzzzzzzzzz........Zzzzzzzzzzzzzz\")\n\n # If tiredness and boredome is less than... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function determines the Klocwork version number. | def get_version_number(klocwork_path):
try:
# Set the path, if necessary
if klocwork_path == '':
call_string = 'which kwinject'
my_env = os.environ.copy()
subprocess.call(call_string, shell=True, env=my_env)
proc = subprocess.Popen(call_string, shell=... | [
"def get_build_version():\r\n\r\n prefix = \"MSC v.\"\r\n i = string.find(sys.version, prefix)\r\n if i == -1:\r\n return 6\r\n i = i + len(prefix)\r\n s, rest = sys.version[i:].split(\" \", 1)\r\n majorVersion = int(s[:-2]) - 6\r\n minorVersion = int(s[2:3]) / 10.0\r\n # I don't thin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns the ltoken value found in the file ltoken_file. | def get_ltoken_data(ltoken_file):
# Read in the contents of the ltoken file
with open(ltoken_file, 'r') as input_fh:
input_data = input_fh.read()
# Get the ltoken data
ltoken_split = list(filter(None, re.split(';', input_data.strip())))
username = ltoken_split[-2]
ltoken = ltoken_split... | [
"def get_token():\n f = open(\"token.txt\", \"r\")\n return f.read()",
"def load_token(self):\n\t\tif file_exists(TOKEN_FILE):\n\t\t\tload_token_file_handler = open(TOKEN_FILE,\"r\")\n\t\t\ttoken = load_token_file_handler.read()\n\t\t\treturn token\n\t\telse:\n\t\t\t## Return empty string if no token exists... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function finds the build ID within the appropriate log file. | def get_build_id(log_file):
# Initialize variables
build_id = None
# Read in the first line of the log file
with open(log_file, 'r') as input_fh:
log_line = input_fh.readline()
# Split the line
line_split = filter(None, re.split('[" ]', log_line))
# Find the build ID parameter
... | [
"def get_buildlog_file(self) -> str:\n buildlog_paths = glob.glob(os.path.join(self.build_dir, \"build.log\"))\n if len(buildlog_paths) != 1:\n raise BuildError(\"Missing/multiple build.log file.\")\n return buildlog_paths[0]",
"def get_log_id(self) -> str:\n pass",
"def g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Displays up to date list of pets. | def display_pets_list():
pets = Pet.query.all()
return render_template('pet_listing.html',
pets=pets) | [
"def show_pets():\n\n pets = Pet.query.all()\n\n return render_template(\"pet-list.html\", pets=pets)",
"def list_pets():\n pets = Pet.query.all()\n return render_template('list.html', pets=pets)",
"def plants_list():\n return render_template('plants_list.html', plants=plants.find())",
"def sho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Displays form to add a new pet. | def display_add_pet_form():
form = AddPetForm()
if form.validate_on_submit():
pet = Pet(
name=form.name.data,
species=form.species.data,
photo_url=form.photo_url.data,
age=form.age.data,
notes=form.notes.data)
db.session.add(pet)
... | [
"def show_and_handle_new_pet_form():\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n img = form.img.data or None\n age = form.age.data\n notes = form.notes.data\n\n\n new_pet = Pet(name=name,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shows pet details and edit pet form | def display_pet_details_and_edit_form(pet_id):
pet = Pet.query.get_or_404(pet_id)
form = EditPetForm(obj=pet)
if form.validate_on_submit():
print("*!*!*!*!*! IT WORKED !*!!"*10)
pet.photo_url=form.photo_url.data
pet.notes=form.notes.data
pet.available=form.available.data
... | [
"def edit_pet_details(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.img = form.img.data or None\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n flash(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns whether this node accepts node 'child' as child | def acceptsAsChild(self, child):
return False | [
"def is_child(self, parent):\n return self in parent.children(class_name = self.class_name())",
"def isChildPermitted(self, child: 'SoNode') -> \"SbBool\":\n return _coin.SoNodeKitListPart_isChildPermitted(self, child)",
"def is_child(self, letter: Text) -> bool:\n return letter in self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns all editable attributes | def getEditableAttributes(self):
return ["active"] | [
"def get_attributes(self):\r\n return self.__atributos",
"def getAttrs(self):\n\t\treturn self._attributes",
"def get_attrs(self):\n return self.ms.get_attrs()",
"def all_attributes (self):\n attrs = []\n for sup in self.super:\n sup_attrs = sup.all_attributes ()\n if len (sup_at... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a Source holding the DR12 combined sample randoms file for the specified sample and redshift bin | def read_randoms(sample, zbin):
# load the catalog
dirname = '%s/combined_sample/Randoms' %CSCRATCH
path = os.path.join(dirname, 'random0_DR12v5_CMASSLOWZTOT_%s.fits' %sample)
s = FITSCatalog(path, use_cache=True)
# add the Position column
s['Position'] = transform.SkyToCartesion(s['RA'], s['DE... | [
"def get_sample_source(prob_label):\n\n if prob_label not in label2fname:\n raise ValueError('Unknown problem label. Need to be one of %s'%str(list(label2fname.keys())) )\n fname = label2fname[prob_label]\n tst_data, n = load_nips_TSTData(fname)\n ss = data.SSResample(tst_data)\n return ss, n"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a frozen mapping view of the items in this mapped collection. Unlike simply doing `dict(mapping)`, this may rely on internal detail around how the data is being stored to allow for a more efficient copy. This may look like calling `dict.copy`. !!! note Unlike `MappedCollection.copy`, this should return a pure ma... | def freeze(self) -> typing.MutableMapping[KeyT, ValueT]: | [
"def invert_mapping(\n mapping: Mapping[K, V]\n) -> Mapping[V, K]:\n if len(set(mapping.values())) < len(mapping):\n raise ValueError('Mapping is not a bijection, since there are duplicate values!')\n return {\n v: k\n for k, v in mapping.items()\n }",
"def copy(self):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create cpts for variables with no cpt assigned | def make_cpts(variables,parents):
for var in variables:
if var.cpt is None:
set = [var]
if parents.has_key(var) and parents[var] is not None:
for pa in parents[var]:
set.append(pa)
var.cpt = Factor(set) | [
"def create_critic_variables(self) -> Dict[str, Dict[str, snt.Module]]:",
"def generate_copt_file(style):\n with open(relative_filename(style.filename()), \"w\") as f:\n f.write(style.docstring())\n f.write(\"\\n\")\n for var_name, arg_list in sorted(COPT_VARS.items()):\n f.write(\"\\n\")\n f.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Eval class incremental learning on miniImageNet | def eval_mini_imagenet_cl(
model: PrototypicalNetworkCL,
root_dir: str,
n_classes_per_task: int = 1,
k: int = 30,
test_shots_per_class: int = 30,
runs: int = 100,
image_resize_hw: Tuple[int, int] = (84, 84),
device: str = "cpu",
):
n_meta_test_classes = 20
dataset = MiniImagenet(... | [
"def training_pool(self):",
"def train(self, images, labels, load):\n \n PATH='./trained.pickle'\n\n if os.path.isfile(PATH) and load:\n print 'Loading already existing training values from ' + PATH\n with open('trained.pickle') as f:\n self.classes, self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
From given backends create and return engine, filename and extension indexes. | def scan_backends(self, backends):
engines = OrderedDict()
filenames = OrderedDict()
extensions = OrderedDict()
for item in backends:
engines[item._kind_name] = item
filenames[item._default_filename] = item._kind_name
extensions[item._file_extension] ... | [
"def create_backends_from_conf(cls):\n backends = []\n for bname in CONF.hnas.enabled_backends:\n CONF.register_opts(hnas_drivers_common_opts, bname)\n backend = getattr(CONF, bname)\n\n svcs_pool = [backend.hnas_svc0_pool_name,\n backend.hnas_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to find existing settings filename from base directory using default filename from available engines. First finded filename from available engines win. So registred engines order matter. | def guess_filename(self, basedir, kind=None):
if kind:
filepath = os.path.join(basedir,
self.engines[kind]._default_filename)
if os.path.exists(filepath):
return filepath, self.engines[kind]
for filename, kind in self.filenames... | [
"def search(self, filepath=None, basedir=None, kind=None):\n # None values would cause trouble with path joining\n if filepath is None:\n filepath = \"\"\n if basedir is None:\n basedir = \".\"\n\n if not basedir and not filepath:\n msg = \"Either basedir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for a settings file. | def search(self, filepath=None, basedir=None, kind=None):
# None values would cause trouble with path joining
if filepath is None:
filepath = ""
if basedir is None:
basedir = "."
if not basedir and not filepath:
msg = "Either basedir or filepath is re... | [
"def findSettingInSettings(self, setting_names):\n for s in setting_names:\n if s in self.settings.keys():\n return self.settings[s]\n # return self.settings['foreground']\n raise KeyError(\"none of these settings found in a theme: {0}\"\n .format... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the first step of a test event. 1. Attempts to build the firmware. 2. Uploads the built firmware onto the target board. | def start_test(self):
self.state = "starting_fw_prep"
self.log.write(
f"Preparing Firmware..."
)
try:
fw_build_dir = cirpy_actions.build_fw(
self.board_name,
self.log,
self.clone_dir_path.resolve()
)
... | [
"def startTest(self, event):\r\n self._setupLoghandler()",
"def run_initial_setup(self):\r\n last_initial_setup_step = ServerConfig.objects.conf('last_initial_setup_step')\r\n if not last_initial_setup_step:\r\n # None is only returned if the config does not exist,\r\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the second step of a test event, by calling ``pytest`` to run the tests located in the circuitpython repository. Since the location pointed to is the version we're testing, any new test scripts or changes will be used. | def run_tests(self):
rosie_tests_dir = str(
self.clone_dir_path.resolve()
/ "tests"
/ "circuitpython"
/ "rosie_tests"
)
pytest.main([rosie_tests_dir], plugins=[RosieTestController(self)]) | [
"def run_tests():\n print blue(\"Running tests suites\")\n with fabtools.python.virtualenv(env.virtualenv):\n with cd(env.new_release_path):\n run(\"python -m unittest discover -s %s -p '*.py' -v\" % env.tests_package)",
"def smoke_tests(self):\n #\n # run \"octopus --version... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identify the core distance (minimum value of epsilon) for each point in an array of integers. | def core_distances(array, min_points):
# groups must contain at least two points
assert min_points > 1
# offset for indexing because the minimum points includes itself
offset = min_points - 1
length = len(array)
lower = array[0:length - offset]
upper = array[offset:length]
eps_values = ... | [
"def _fork_epsilon(array, min_points):\n if len(array) <= min_points:\n # no forks possible because all points must have the same eps\n return None\n\n offset = min_points - 1\n\n # calculate split eps using the 2d method\n eps_values = array[offset:] - array[:-offs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if an array is sorted in ascending order. | def _sorted_ascending(array):
return np.sum(array[1:] - array[:-1] < 0) == 0 | [
"def is_sorted(array): # real signature unknown; restored from __doc__\n pass",
"def ascending(array):\n for index in range(array.size() - 1):\n if arr[index] >= arr[index + 1]:\n return False\n\n return True",
"def is_sorted(a):\n if type(a) != np.ndarray:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combines overlapping slices assuming halfopen intervals. | def _melt_slices(slices):
lowers, uppers = slices['lower'], slices['upper']
lowers.sort()
uppers.sort()
splits = np.append(np.array([False]),
uppers[:-1] <= lowers[1:]) # True means gap between
def _melter(lowers, uppers, splits):
l = lowe... | [
"def _split_arrays_to_start_ends(data: np.ndarray, peak_indices: np.ndarray, periods=3):\n left = peak_indices - periods\n left[left < 0] = 0\n right = peak_indices + (periods + 1)\n right[right >= len(right)] = len(right)\n all_ = np.concatenate([left, right])\n all_.sort()\n return np.concate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate subclusters of an array and return their slice indices. The input array must be sorted in ascending order. If n is greater than the length of the array, an empty array is returned. | def _subcluster(array, min_points, epsilon):
assert DBICAN._sorted_ascending(array)
offset = min_points - 1
upper = array[offset:]
lower = array[:-offset]
selected = upper - lower <= epsilon
lower_index = np.arange(0, len(lower))[selected]
upper_index = np.arange... | [
"def _cluster(array, min_points, epsilon):\n # sorted-ascending checked in method _subcluster\n slices = DBICAN._subcluster(array, min_points, epsilon)\n if len(slices) > 1:\n slices = DBICAN._melt_slices(slices)\n return slices",
"def clusters(self):\n return (self.i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate clusters of an array and return their slice indices. The input array must be sorted in ascending order. If n is greater than the length of the array, an empty array is returned. | def _cluster(array, min_points, epsilon):
# sorted-ascending checked in method _subcluster
slices = DBICAN._subcluster(array, min_points, epsilon)
if len(slices) > 1:
slices = DBICAN._melt_slices(slices)
return slices | [
"def cluster(array, n_clusters, max_mask_pts=np.infty):\n\n array = np.array(array)\n \n assert array.ndim == 2\n\n coord = np.where(array > 0)\n y = coord[0].reshape((-1, 1))\n x = coord[1].reshape((-1, 1))\n c = np.concatenate((y, x), axis=1)\n if len(c) == 0:\n centroids = np.array... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return values from the input array grouped into clusters. Points classified as noise are not returned. | def clusters(self):
return (self.input_array[lower:upper]
for lower, upper in self.slices) | [
"def cluster(array, n_clusters, max_mask_pts=np.infty):\n\n array = np.array(array)\n \n assert array.ndim == 2\n\n coord = np.where(array > 0)\n y = coord[0].reshape((-1, 1))\n x = coord[1].reshape((-1, 1))\n c = np.concatenate((y, x), axis=1)\n if len(c) == 0:\n centroids = np.array... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identify eps 'splits' in an array by calculating epsilon of the gaps between values in the array. Identifies the minimum epsilon of a cluster prior to it forking into child clusters. If the cluster does not fork into children then None is returned. Note that in this cas the minimum epsilon of the cluster is then equal ... | def _fork_epsilon(array, min_points):
if len(array) <= min_points:
# no forks possible because all points must have the same eps
return None
offset = min_points - 1
# calculate split eps using the 2d method
eps_values = array[offset:] - array[:-offset]
e... | [
"def select_epsilon_leaf(condensed_tree, n_clusters):\n # Use an epsilon value that produces the right number of clusters.\n # The condensed tree of HDBSCAN has this information.\n # Extract the lambda levels (=1/distance) from the condensed tree\n lambdas = condensed_tree._raw_tree['lambda_val']\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Traverse a tree of nested density clusters and recursively identify clusters based on their area. | def _traverse_cluster_tree(self,
local_points,
local_max_eps):
# Values of epsilon bellow which the cluster forks
fork_epsilon = self._fork_epsilon(local_points['value'],
self.min_points)
if ... | [
"def _walk_netcdf_tree(top):\n values = top.groups.values()\n yield values\n for value in top.groups.values():\n for children in _walk_netcdf_tree(value):\n yield children",
"def create_KD_tree(data_list,deep,father_node):\n l = len(data_list)\n if l == 0:\n return\n # d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Docker filesysteminfo plugin scan. | def scan(container, output_queue):
res = {}
_log.info('Staring {} Plugin ...'.format(_plugin_name_))
driveinfo = {
"MOUNT": {
"cmd": "mount",
"msg": "Mount results",
"results": []
},
"FSTAB": {
"cmd": "cat /etc/fstab",
"ms... | [
"def monitor_filesystem():\n\n # Begin logging\n logging.info('Beginning filesystem monitoring.')\n\n # Get path, directories and files in system and count files in all directories\n settings = get_config()\n filesystem = settings['filesystem']\n outputs_dir = os.path.join(settings['outputs'], 'mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the to fixed function. | def test_to_fixed(self):
# creates the the same value using an "infinite"
# (repeating decimal) approach (through the 0.33)
# and using the final and fixed value, this will create
# problems in a normal float comparison
infinite_float_value = 0.33 + 0.11 - 0.09 - 0.33
... | [
"def test_fdwrap(self):\n\n for func in self.functions:\n msg = \"\\nfdwrap alters function behavior\"\n f=forcebalance.finite_difference.fdwrap(func[0], [0]*3, 0)\n self.logger.debug(\"Checking to make sure fdwrap returns a function\\n\")\n self.assertEqual(type(f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the issue object at index. | def getIssue(self, index):
# type: (int) -> Issue
if 0 <= index < len(self.issues):
return self.issues[index]
return self.issues[0] | [
"def getObject(self, index: long) -> object:\n ...",
"def get_issue(key, issues):\n return filter((lambda issue: issue.key == key), issues)[0]",
"def get_issue(self, title: str) -> Issue:\n for issue in self.repository.issues:\n if issue.title == title:\n return issue\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the issue to the list of issues. | def addIssue(self, issue):
# type: (Issue) -> ()
# let's worry about manual indexing later?
self.issues.append(issue)
self.fireTableDataChanged() | [
"def _add(self, issue: Issue) -> None:\n self._issueList.append(issue)\n if isinstance(issue, LocalizedSourceIssue):\n index = issue.line\n else:\n index = 0\n if index not in self._issuesAtLine:\n self._issuesAtLine[index] = []\n self._issuesAtLin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the issue at index from the list of issues. | def removeIssue(self, index):
# type: (int) -> ()
if 0 <= index < len(self.issues):
del self.issues[index]
self.fireTableDataChanged()
# otherwise do nothing.
| [
"def remove(self, index=-1):\n del(self.crystal_list[index])",
"def removeIndex(self, index):\r\n item = self.queue.pop(index)\r\n return item",
"def remove(self, index: 'int const') -> \"void\":\n return _coin.SbPList_remove(self, index)",
"def remove_index(index):\n raise ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the value of the first column of the table row that was clicked. This is not the same as the row index because the table can be sorted. | def getClickedIndex(self, event):
# get the event source, the table in this case.
tbl = event.getSource()
# get the clicked row
row = tbl.convertRowIndexToModel(tbl.getSelectedRow())
# get the first value of clicked row
return tbl.getValueAt(row, 0)
# retur... | [
"def get_selected_row(event):",
"def getClickedRow(self, event):\r\n tbl = event.getSource()\r\n mdl = tbl.getModel()\r\n row = tbl.convertRowIndexToModel(tbl.getSelectedRow())\r\n assert isinstance(mdl, IssueTableModel)\r\n return mdl.getIssue(row)\r\n # return tbl.getMo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the complete clicked row. | def getClickedRow(self, event):
tbl = event.getSource()
mdl = tbl.getModel()
row = tbl.convertRowIndexToModel(tbl.getSelectedRow())
assert isinstance(mdl, IssueTableModel)
return mdl.getIssue(row)
# return tbl.getModel().getDataVector().elementAt()
| [
"def get_selected_row(event):",
"def _get_selectedRow(self) -> \"int\" :\n return _core.TableCommandInput__get_selectedRow(self)",
"def row(self, index):\n return self.rows[index]",
"def tableClick(self, x, y):\n self.selectRow(x)",
"def top_row(self):\n return self._row",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the results, such as energy, forces, eigenvalues, etc. | def read_results(self):
FileIOCalculator.read(self, self.label)
if not os.path.isfile(self.label + '.out'):
raise ReadError
parser = MopacParser(self.label)
self.parser = parser
self.atoms = parser.atoms
self.results = parser.get_properties() | [
"def read_data( filename ):\n\n # read first word at first line\n with open( filename, 'r' ) as f:\n lattice = f.readline().split()[0] \n\n\n # read volumen and energy results \n data = np.loadtxt(filename, skiprows=1) \n\n return lattice, factor[lattice]*data[:,0]**3, data[:,1]",
"def _r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> c = np.ones(2) 0.5 >>> r = 1 >>> thetas = np.array([10, 20, 30]) np.pi / 180 >>> points = r np.vstack((np.cos(thetas), np.sin(thetas))) + c.reshape(1, 1) >>> [round(f, 2) for f in revolute_params_analytic(points)] [0.5, 0.5, 1.0, 0.17, 0.17] >>> c = np.random.rand(2) >>> r = np.random.rand(1) >>> omega = np.random.... | def revolute_params_analytic(points):
# | x_i - c |^2 = r^2, for all i
# for all i, minimize ||c' c - 2 x_i' c - r^2 ||
#
## for 2D
# take any pair of points
# xm_ij = (x_i + x_j)/2
# (x_i - xm_ij)' (c - xm_ij) = 0
# (x_j - xm_ij)' (c - xm_ij) = 0
# [x_i' - xm_ij'] c = (x_i -xm_ij)'... | [
"def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python():\n\n npts = 99\n\n with NumpyRNGContext(fixed_seed):\n sample1 = np.random.random((npts, 3))\n sample2 = np.random.random((npts, 3))\n velocities1 = np.random.uniform(-10, 10, npts * 3).reshape((npts, 3))\n velocities... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is called at the end of the job. It should do final cleanup, e.g. close all open files. evt event data object env environment object | def endjob( self, evt, env ) :
logging.info( "dump_evr.endjob() called" ) | [
"def endjob( self, evt, env ) : \n logging.info( \"image_save_in_file.endjob() called\" )",
"def close(self):\n self.env = None",
"def close_env(self) -> None:\n self.env.close()",
"def __exit__(self, *args):\r\n\t\tself.io_buffer.close()",
"def __exit__(self, *args: Any) -> None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all label names (either from one or all subdirectories if subdatasets are defined) and check consistency of names. | def get_label_names(
self,
dataset_paths: List[str],
):
tmp_label_names = []
for dataset_path in dataset_paths:
dataset_label_names = []
if self.args.add_null_class:
dataset_label_names.append(const.NULL_CLASS_LABEL)
for name in s... | [
"def load_additional_labels(dataset_name, label_name=\"\"):\n in_name = {\n \"NEURON_1K\": \"scRNA/neuron_1k_multi_labels\",\n # ['graph_based_cluster', 'umi']\n \"HEART_1K\": \"scRNA/heart_1k_multi_labels\",\n # ['graph_based_cluster', 'umi']\n \"PBMC_1K\": \"scRNA/pbmc_1k_mul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Count number of samples in dataset. | def count_samples(
self,
samples: List,
) -> int:
num_samples = len(samples)
with utils.format_text("yellow", ["underline"]) as fmt:
self.log.info(fmt(f"number of data: {num_samples}"))
return num_samples | [
"def count_data(self):\n num_data = 0\n for cur_file_name in self.file_names:\n cur_file_features, cur_file_labels = self.load_data(cur_file_name)\n num_data += self.get_num_samples( cur_file_features )\n return num_data",
"def count_samples(measurement_df):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Doing oversampling based on labels. | def oversampling(self, data, labels):
assert self.args.oversampling_ratio is not None, (
"When `--do_oversampling` is set, it also needs a proper value for `--oversampling_ratio`.")
samples_of_label = defaultdict(list)
for sample, label in zip(data, labels):
samples_of_l... | [
"def oversample(self, *args, **kwargs):\n if args:\n assert len(args) in [1, len(self)]\n elif kwargs:\n for name in self.names:\n if name not in kwargs:\n kwargs[name] = 1\n factors = self._args_kwargs_to_list(*args, **kwargs)\n ne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds all nonstatic methods and randomly chooses one of them | def init_make_method_static(self):
refactoring_main = make_method_static_2.main
params = {"udb_path": self.udb_path}
candidates = self._methods
params.update(random.choice(candidates))
return refactoring_main, params | [
"def init_make_method_non_static(self):\n refactoring_main = make_method_non_static_2.main\n params = {\"udb_path\": self.udb_path}\n candidates = self._static_methods\n params.update(random.choice(candidates))\n return refactoring_main, params",
"def test_get_standard_methods()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds all static methods and randomly chooses one of them | def init_make_method_non_static(self):
refactoring_main = make_method_non_static_2.main
params = {"udb_path": self.udb_path}
candidates = self._static_methods
params.update(random.choice(candidates))
return refactoring_main, params | [
"def init_make_method_static(self):\n refactoring_main = make_method_static_2.main\n params = {\"udb_path\": self.udb_path}\n candidates = self._methods\n params.update(random.choice(candidates))\n return refactoring_main, params",
"def static_method():\n print('Called st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds fields with a class to move | def init_move_field(self):
refactoring_main = move_field.main
params = {"udb_path": str(Path(self.udb_path))}
random_field = random.choice(self._variables)
params.update(random_field)
random_class = random.choice(self.get_all_class_entities()).longname().split(".")
target... | [
"def get_field_names(self):\n return [\n item\n for item in self.__dict__.keys()\n if isinstance(getattr(self,item),DestinationField)\n ]",
"def move_fields(from_model, to_model, **fields):\n for from_attr, to_attr in fields.items():\n setattr(to_model, to_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds methods with a class to move | def init_move_method(self):
refactoring_main = move_method.main
params = {"udb_path": str(Path(self.udb_path))}
random_method = random.choice(self._methods)
params.update(random_method)
random_class = random.choice(self.get_all_class_entities()).longname().split(".")
targ... | [
"def get_methods_by_class(self, clazz):\n range_start = 0\n range_end = self.num_methods\n split_idx = 0\n test_method = None\n\n clazz = ghidra_utils.SymbolDescriptor(clazz).to_java()\n\n while(range_end >= range_start):\n split_idx = range_start + ((range_end-r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query OVS REST API for flows for a datapath given some filter criteria in `fields`. | def get_flows(datapath, fields, ip=DEV_VM_URL):
url = "http://%s:%d/stats/flow/%s" % (ip, OF_REST_PORT, datapath)
data = json.dumps(fields)
return _ovs_api_request('POST', url, data=data)[datapath] | [
"def search(\n endpoint,\n in_filter={},\n exclude_filter={},\n fields=[],\n expand=[],\n typ='dataframe',\n method='GET',\n):\n\n try:\n assert typ.lower() in ['json', 'dataframe']\n except (AttributeError, AssertionError):\n raise ValueError(\n 'typ should be a ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a flowentry to OVS, flowentry info such as datapath, table_id, match, actions, etc., is stored in `fields` | def add_flowentry(fields, ip=DEV_VM_URL):
url = "http://%s:%d/stats/flowentry/add" % (ip, OF_REST_PORT)
data = json.dumps(fields)
return _ovs_api_request('POST', url, data=data, return_json=False) | [
"def add(self, flow):\n if hasattr(self, 'request'):\n del self.request\n #print(flow)\n self.odl.prepare(self.__app, '/' + flow['node']['@type'] + '/' + \n flow['node']['@id'] + '/' + flow['name'] + '/')\n headers = {'Content-type': 'application/json'}\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes a flowentry by using OVS REST API, flowentry is matched based on the information in `fields` | def delete_flowentry(fields, ip=DEV_VM_URL):
url = "http://%s:%d/stats/flowentry/delete_strict" % (ip, OF_REST_PORT)
data = json.dumps(fields)
return _ovs_api_request('POST', url, data=data, return_json=False) | [
"def delete(self, node_id, flow_name):\n if hasattr(self, 'request'):\n del self.request\n\n self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/' + \n flow_name + '/')\n self.request = requests.delete(url=self.odl.url, auth=self.odl.auth)\n\n # note... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if pipelined is started and a datapath is initialized. This means, endpoint is reachable and a list containing at least one datapath id is returned (for tests it's actually exactly one entry). | def get_datapath_state(ip=DEV_VM_URL) -> Tuple[bool, bool]:
url = "http://%s:%d/stats/switches" % (ip, OF_REST_PORT)
try:
datapath_list = _ovs_api_request('GET', url)
except requests.ConnectionError:
# Check if datapath is initialized failed: pipelined not reachable.
return False, Fa... | [
"def is_endpoint_external(self):\n return self.endpoint in objects.EXTERNAL_END_POINTS",
"def has_demultiplexing_started_locally(self) -> bool:\n return self.demultiplexing_started_path.exists()",
"def started(self):\n\n return self.__depot_handle != None",
"def test_datasource():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send generic OVS REST API request and retry if request fails. Returns json decoded message | def _ovs_api_request(
method, url, data=None, max_retries=MAX_RETRIES,
return_json=True,
):
for _ in range(MAX_RETRIES):
response = requests.request(method, url, data=data)
if response.status_code == 200:
if return_json:
return response.json()
else:
... | [
"def _send_request(\n self,\n method,\n path_params,\n query_params=None,\n headers=None,\n data=None,\n stream=False,\n files=None,\n ):\n base_path_params = [\"hopsworks-api\", \"api\"]\n f_url = furl.furl(self._base_url)\n f_url.path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots n sample images for both values of y (labels). | def plot_sample_images(X, y,target_names,n=10):
for label in target_names.keys():
# grab the first n images with the corresponding y values equal to label
images = X[np.argwhere(y == label)]
n_images = images[:n]
columns_n = 10
rows_n = int(n/ columns_n)
... | [
"def plot_image(num):\n\n #labels = dict(zip(samples.columns.tolist()[:-1],samples.loc[num].tolist()[:-1]))\n #plt.title(f\"sample #{num+1}- {list(labels.keys())[0]}: {ethnicities[list(labels.values())[0]]}, {list(labels.keys())[1]}: {genders[list(labels.values())[1]]}, {list(labels.keys())[2]}: {AgeGroups[li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Grab an WKT version of an EPSG code usage getPRJwkt(4326) | def getPRJwkt(epsg):
import urllib
f=urllib.urlopen("http://spatialreference.org/ref/epsg/{0}/prettywkt/".format(epsg))
return (f.read()) | [
"def getPRJwkt(epsg):\n\n import urllib\n f=urllib.urlopen(\"http://spatialreference.org/ref/epsg/{0}/prettywkt/\".format(epsg))\n return f.read()",
"def get_esriwkt(epsg):\n try:\n with urllib.request.urlopen(\"http://spatialreference.org/ref/epsg/{0}/esriwkt/\".format(epsg)) as response:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Whether a given a tree is complete | def isCompleteTree(self, root: TreeNode) -> bool:
if not root:
return True
stack = [root]
self.empty = False
while stack:
n = stack.pop(0)
if not n:
self.empty = True
if n is None and len(stack) > 0 and stack[-1] is not Non... | [
"def is_complete(self):\n return self.dot >= len(self.tree.children)",
"def is_complete(self) -> bool:\n node_ids = set(self.graph.nx_graph_flat().nodes)\n return self.has_error() or all((k in self.executed for k in node_ids))",
"def check_all_complete(nodes, completed_nodes):\n status =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the prefixSum matrix, where prefix_sum[i][j] is the sum of submatrix mat[0...i][0...j] | def getPrefixSum(mat):
m, n = len(mat), len(mat[0])
x = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
x[i][j] = mat[0][0]
elif i == 0:
x[i][j] = mat[0][j] + x[i][j-1]
elif j == 0:
... | [
"def array_prefix_sum_mutable(arr):\n for I in range(1, len(arr)):\n arr[I] = arr[I] + arr[I - 1]\n return arr",
"def reduce_coo_matrix(mat, mask):\n G = wgraph_from_coo_matrix(mat)\n K = G.subgraph(mask)\n if K is None:\n return None\n return K.to_coo_matrix()",
"def sum_of_matr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Correct the directory name so it has 0's in front of single digit numbers | def correct(directory_name):
add_zeros = lambda string: '{0:02d}'.format(int(string))
elements = directory_name.split('_')
return '{0}_{1}_{2}_{3}_{4}_{5}_{6}'.format(elements[0], elements[1], add_zeros(elements[2]), add_zeros(elements[3]), add_zeros(elements[4]), add_zeros(elements[5]), add_zeros(elements[... | [
"def clean_dir_name(name: str) -> str:\n return name.replace(\"/\", \"_\")",
"def make_foldername(name):\n result = \"\"\n for c in name:\n if c.isdigit() or c.isalpha() or c == \",\" or c == \" \":\n result += c\n elif c == ':':\n result += \".\"\n elif c == '-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We're running on the AMI instance so actually do the work Find the files and move them to S3 | def process_ami():
delete_delay_ago = datetime.datetime.now() - datetime.timedelta(days=float(ARC_BOINC_STATISTICS_DELAY))
LOG.info('delete_delay_ago: {0}'.format(delete_delay_ago))
s3helper = S3Helper()
for directory_name in glob.glob(os.path.join(POGS_BOINC_PROJECT_ROOT, 'html/stats_archive/*')):
... | [
"def move_files_to_s3():\n if settings.S3_USE:\n for filename in os.listdir(settings.STORAGE_ROOT_DIR):\n local_filepath = os.path.join(settings.STORAGE_ROOT_DIR, filename)\n try:\n transfer_obj = Transfer.objects.get(machine_file_path=local_filepath)\n exce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merging multiple XML files is nontrivial and must be done in subclasses. | def merge(split_files: List[str], output_file: str) -> None:
if len(split_files) > 1:
raise NotImplementedError(
"Merging multiple XML files is non-trivial and must be implemented for each XML type"
)
# For one file only, use base class method (move/copy)
... | [
"def merge(self):",
"def merge_x2p(self, to_merge):\n merge_tree = ET.parse(to_merge)\n # input_root = merge_tree.getroot()\n paths = set(self._file_paths(self.tree))\n for elem in XPathCommand._files(merge_tree):\n path = elem.attrib[\"path\"]\n if path not in pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checking for keyword 'Collection' or 'Image' in the first 200 lines. >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname('1.dzi') >>> Dzi().sniff(fname) True >>> fname = get_test_fname('megablast_xml_parser_test1.blastxml') >>> Dzi().sniff(fname) False | def sniff_prefix(self, file_prefix: FilePrefix) -> bool:
for line in file_prefix.line_iterator():
line = line.lower()
if line.find("<collection") >= 0 or line.find("<image") >= 0:
return True
return False | [
"def test_fits_summary_when_keywords_are_not_subset(self, triage_setup):\n ic = ImageFileCollection(triage_setup.test_dir,\n keywords=['imagetyp', 'exposure'])\n n_files = len(ic.files)\n files_missing_this_key = ic.files_filtered(imagetyp='*',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checking for keyword '<sbml' in the first 200 lines. | def sniff_prefix(self, file_prefix: FilePrefix) -> bool:
return file_prefix.search(SBML_MARKER) | [
"def isTag(self, line):\n\t\n\t\treturn re.match('^\\s*(LÄNK|BILD|RUBRIK|STYCKE|LISTA|NUMMER|CITAT|TEXT|HTML|KOD|RUTA)', line)",
"def has_30k_or_fewer_records(medline_xml, parser=None, tree=None):",
"def testB(self, b, u):\n if not u.find(r'\\b ') == -1: print('\\\\b tag with text content in: ' + b)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Die Funktion exportiert die aktuelle Statistik inklusive Zeitstempel in ein txtFile. | def export():
now = datetime.datetime.now()
with open("Statistik_BlackJack.txt", "a") as open_file:
open_file.write("\n\nDatum und Zeit: " + str(now.strftime("%d.%m.%Y %H:%M:%S"))
+ "\nPlayer: " + str(statistik.stat_player)
+ "\nDealer: " + str(statistik.... | [
"def _init_txt_(self):\n self.rates_info_txt_file = 'db/rates_info.nfo'\n self.encoding = 'utf-8'",
"def exportStatistics(self, filename):\n\t\ttimepoint = scripting.visualizer.getTimepoint()\n\t\tself.writeToFile(filename, self.dataUnit, timepoint)",
"def writeControlFile(self, name, t, mapDirect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return normal path of joined components with old prefix replaced by new. | def _path(*components, **oldnew):
old, new = oldnew.get('old', ''), oldnew.get('new', '')
return os.path.normpath(re.sub('^' + re.escape(old), new,
os.path.join(*components))) | [
"def simplify_path(self, old_path):\n path = re.sub(r\"//+\", \"/\", old_path)\n path = re.sub(r\"/\\./+\", \"/\", path)\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\", path)\n while (new_path != path):\n path = new_path\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of paths below abs_top containing any of defaults. | def _collect_paths(abs_top, defaults):
paths, top_path = list(), pathlib.Path(abs_top)
for default in defaults:
glob = os.path.join(r'**/', default)
paths += list(sorted(Webpages._path(os.path.dirname(p))
for p in top_path.glob(glob)))
return pat... | [
"def get_relevant_paths():\n paths = []\n for f in all_paths():\n for regexp in PATH_PATTERNS:\n if re.match(regexp, f):\n paths.append(f)\n break\n\n paths.extend(EXPLICIT_PATHS)\n return paths",
"def find_path_above(*names):\n\n path = '.'\n\n while os.path.split(os.path.abspath(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |