query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Modifies the board representation using the specified move and piece | def place_piece(self, move, piece):
if len(move) > 1:
self.board[move[1][0]][move[1][1]] = ' '
self.board[move[0][0]][move[0][1]] = piece | [
"def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add particle container to the file. | def _add_particles(self, particles, cuba_keys):
name = particles.name
particles_root = self._root.particle
group = tables.Group(particles_root, name=name, new=True)
h5_particles = H5Particles(group)
h5_particles.data = particles.data
if cuba_keys is not None:
... | [
"def add_particle(self, particle):\n self.particles_.append(particle)",
"def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_stimulus(self.optical_series)",
"def particle_to_yml(self, particles, filename):\n # open write append, if you want to start from sc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add lattice to the file. | def _add_lattice(self, lattice, cuba_keys):
name = lattice.name
lattice_root = self._root.lattice
group = tables.Group(lattice_root, name=name, new=True)
h5_lattice = H5Lattice.create_new(
group, lattice.primitive_cell, lattice.size, lattice.origin)
h5_lattice.data =... | [
"def write_lat_file(self):\n\n # If the lattice file exists, remove it and start over\n if os.path.isfile(self.filename):\n os.remove(self.filename)\n\n lat = open(self.filename, 'w')\n\n header = '? VERSION = 1.0\\n'\n header += '? UNITLENGTH = ' + str(self.unit_length... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get lattice from file. The returned lattice can be used to query and change the related data stored in the file. If the file has been closed then the lattice should no longer be used. | def _get_lattice(self, name):
group = self._root.lattice._f_get_child(name)
return H5Lattice(group) | [
"def load_lattice(filename):\n lattice = np.load(filename)\n print (\"SOM lattice loaded from %s\" %filename)\n return lattice",
"def __init__(self, lattice_file):\n\n super().__init__()\n\n # No log conversion by default. \"None\" means the lattice file uses\n # linear probabilities... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete lattice from file. | def _remove_lattice(self, name):
node = self._root.lattice._f_get_child(name)
node._f_remove(recursive=True) | [
"def _ClearTriageLinkFile(self) -> None:\n open(self._triage_link_file, 'w').close()",
"def remove(self, file):\n pass",
"def delete(self):\n self.gridfs.delete(self.file_id)",
"def delete_file(self):\n os.remove(self.full_path())\n self.size = 0",
"def delete(task_file):\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an iterator over a subset or all of the particle containers. | def _iter_particles(self, names=None):
if names is None:
for node in self._root.particle._f_iter_nodes():
yield self._get_particles(node._v_name)
else:
for name in names:
if name in self._get_child_names(self._root.particle):
yi... | [
"def _iter_particles(self, ids=None):\n if ids is None:\n return iter(self._particles)\n else:\n return self._particles.itersequence(ids)",
"def particles(self, selection_func=None):\n if selection_func is None:\n return self.particles_\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an iterator over a subset or all of the meshes. | def _iter_meshes(self, names=None):
if names is None:
for mesh_node in self._root.mesh._f_iter_nodes():
yield self._get_mesh(mesh_node._v_name)
else:
for name in names:
if name in self._get_child_names(self._root.mesh):
yield se... | [
"def __iter__(self):\n return self.subset_loader.__iter__()",
"def __iter__(self):\r\n for shape in self.__shapes:\r\n yield shape",
"def iter(root=None, **kwargs):\n # type: (om2.MObject, Dict) -> Generator[om2.MObject]\n return idag(root, filter_type=om2.MFn.kMesh, **kwa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new bullet | def new_bullet(name, rotation, width, x, y, velocity_x, velocity_y, speed, batch):
angle_radians = -math.radians(rotation)
ship_radius = width
bullet_x = x + math.cos(angle_radians) * ship_radius
bullet_y = y + math.sin(angle_radians) * ship_radius
from version2.game.Bullet import Bullet
_new_bu... | [
"def add_bullet(self):\n self.game_objects.append(Bullet(self.player.heading, self.player.position))",
"def _create_bullet(self, size, velocity, color):\n shape = pygame.Rect(self._rect.centerx,\n self._rect.centery, size[0], size[1])\n bullet = {'velocity': velocit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new AWS S3 Bucket Policy. | def create(self, params):
return self.make_client_call(
'put_bucket_policy',
params,
fatal_handled_exceptions=ParamValidationError) | [
"def create_bucket(name, policy=None):\n s3 = boto3.client('s3')\n\n s3.create_bucket(Bucket=bucket)\n print(\"S3 bucket %s created.\" % bucket)\n\n if policy:\n s3.put_bucket_policy(\n Bucket=bucket,\n Policy=json.dumps(bucketPolicy)\n )\n print(\"Policy attac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes an existing AWS S3 Bucket Policy. | def delete(self, params=None):
self.logger.debug('Deleting %s with parameters: %s'
% (self.type_name, params))
self.client.delete_bucket_policy(**params) | [
"def delete_bucket():\n\n s3 = session.resource('s3')\n\n try:\n bucket = s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\")\n bucket.objects.all().delete()\n bucket.delete()\n print('Deleted S3 bucket!')\n\n except Exception as e:\n print(f\"Error deleting S3 buck... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if a user is logged in, else false. | def is_logged_in():
return 'username' in session | [
"def is_logged_in():\n return 'user_id' in session",
"def is_logged_in(self):\n return self.get_secure_cookie(\"userid\") != None",
"def is_loggedin():\n \n if \"user_id\" in session:\n loggedin = \"True\"\n else:\n loggedin = \"False\"\n return loggedin",
"def checkUserAut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the elasticsearch mapping bits. | def elastic_mapping_builder(obj):
super(Citations, Citations).elastic_mapping_builder(obj)
obj['journal_id'] = obj['journal_volume'] = \
obj['journal_issue'] = {'type': 'integer'}
obj['abstract_text'] = obj['xml_text'] = \
obj['page_range'] = obj['release_authorization_id... | [
"def generate_docs_mapping(self):\n m = {}\n m['n_words'] = {'type': 'integer'}\n m['n_sents'] = {'type': 'integer'}\n if len(self.settings['languages']) > 1:\n for lang in self.settings['languages']:\n m['n_words_' + lang] = {'type': 'integer'}\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the citation fields to a serializable hash. | def to_hash(self, recursion_depth=1):
obj = super(Citations, self).to_hash(recursion_depth)
obj['_id'] = int(self.id)
obj['article_title'] = unicode_type(self.article_title)
obj['abstract_text'] = unicode_type(self.abstract_text)
obj['xml_text'] = unicode_type(self.xml_text)
... | [
"def from_hash(self, obj):\n super(Citations, self).from_hash(obj)\n self._set_only_if('_id', obj, 'id', lambda: int(obj['_id']))\n self._set_only_if('journal_id', obj, 'journal',\n lambda: Journals.get(Journals.id == int(obj['journal_id'])))\n for key in ['journ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the object into the citation object fields. | def from_hash(self, obj):
super(Citations, self).from_hash(obj)
self._set_only_if('_id', obj, 'id', lambda: int(obj['_id']))
self._set_only_if('journal_id', obj, 'journal',
lambda: Journals.get(Journals.id == int(obj['journal_id'])))
for key in ['journal_volume'... | [
"def elastic_mapping_builder(obj):\n super(Citations, Citations).elastic_mapping_builder(obj)\n obj['journal_id'] = obj['journal_volume'] = \\\n obj['journal_issue'] = {'type': 'integer'}\n obj['abstract_text'] = obj['xml_text'] = \\\n obj['page_range'] = obj['release_auth... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We set the adapted methods in the object's dict | def __init__(self, obj, adapted_methods):
self.obj = obj
self.__dict__.update(adapted_methods)
for key in self.__dict__:
print(key,self.__dict__[key]) | [
"def __init__(self, obj, **adapted_methods):\n self.object = obj\n self.__dict__.update(adapted_methods)",
"def _include_redis_methods(self, redis):\n for attr in dir(redis):\n value = getattr(redis, attr)\n if attr.startswith('_') or not callable(value):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the CryBlend properties of a materialname as dict or None if name is invalid. | def extract_cryblend_properties(materialname):
if is_cryblend_material(materialname):
groups = re.findall("(.+)__([0-9]+)__(.*)__(phys[A-Za-z0-9]+)", materialname)
properties = {}
properties["ExportNode"] = groups[0][0]
properties["Number"] = int(groups[0][1])
propertie... | [
"def material_name(self):\n try:\n self._material_name = self._layer.GetMaterial()\n except:\n pass\n return self._material_name",
"def export_material_property(self, name='', flags=0x0001,\n ambient=(1.0, 1.0, 1.0), diffuse=(1.0, 1.0, 1.0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simple SSE loss over the generated image and the content image | def content_loss(noise: torch.Tensor, image: torch.Tensor):
return 1/2. * torch.sum(torch.pow(noise - image, 2)) | [
"def get_loss(generation_img):\n generation_img = K.reshape(generation_img, [1, 300, 400, 3])\n return fn([generation_img])[0].astype('float64')",
"def mse_amm_img(args):\n x_, x_fake_, attn_map = args\n return K.mean(K.square((x_ - x_fake_) * attn_map), axis=[1,2,3], keepdims=False)",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles app logic for a user leaving a room. Must be passed either a Room and Account object, or a RoomList object. Examples leave_room(room=room_obj, account=account_obj) leave_room(session=roomlist_obj) | def leave_room(room=None, account=None, session=None):
if room is not None and account is not None:
session = RoomList.all().filter('room =', room).filter('account =', account).get()
elif session is not None:
room = session.room
account = session.account
else:
raise TypeError... | [
"def on_leave(data):\n username = session[\"login\"][0]\n room = find_room(data[\"bookgroup_id\"], data.get(\"chapter_number\"))\n leave_room(room)\n\n emit('leave_status', {'msg': username + \" has left room \" + str(room)}, room=room)",
"async def leave_room(self, chat_rooms: Union[List[str], str]):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create curriculum integration object. | def create_integration(self, topic, number, lessons=None, curriculum_areas=None):
integration = CurriculumIntegration(
topic=topic,
slug="integration-{}".format(number),
name="Integration {}".format(number),
number=number,
content="<p>Content for integ... | [
"def new_curriculum(req):\r\n\treturn direct_to_template(req, 'lesson/curriculum.html')",
"def create_integrator(self, model, inputs, t_eval=None, use_event_switch=False):\n pybamm.logger.debug(\"Creating CasADi integrator\")\n\n # Use grid if t_eval is given\n use_grid = t_eval is not None\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create curriculum area object. | def create_curriculum_area(self, number, parent=None):
area = CurriculumArea(
slug="area-{}".format(number),
name="Area {}".format(number),
colour="colour-{}".format(number),
number=number,
parent=parent,
languages=["en"],
)
... | [
"def new_area(self):\n pass",
"def __init__(self, area_points, reversed_counting_logic):\n super(AreaCounter, self).__init__(reversed_counting_logic)\n if len(area_points) > 2:\n self.area_polygon = area_points\n else:\n print(\"[Counter] Invalid counting area set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create programming language object. | def create_programming_language(self, number):
language = ProgrammingChallengeLanguage(
slug="language-{}".format(number),
name="Language {}".format(number),
number=number,
languages=["en"],
)
language.save()
return language | [
"def create_test_language(name: str, code: str) -> Language:\r\n lang = Language(name=name, code=code)\r\n lang.full_clean()\r\n lang.save()\r\n return lang",
"def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create programming challenge object. | def create_programming_challenge(self, topic, number,
difficulty,
challenge_set_number=1,
challenge_number=1,
content="<p>Example content.</p>",
... | [
"def create_programming_challenge_implementation(self, topic,\n language,\n challenge,\n expected_result=\"<p>Example result.</p>\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create programming challenge implementation object. | def create_programming_challenge_implementation(self, topic,
language,
challenge,
expected_result="<p>Example result.</p>",
... | [
"def create_programming_challenge(self, topic, number,\n difficulty,\n challenge_set_number=1,\n challenge_number=1,\n content=\"<p>Example content.</p>\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create learning outcome object. | def create_learning_outcome(self, number):
outcome = LearningOutcome(
slug="outcome-{}".format(number),
text="Outcome {}".format(number),
languages=["en"],
)
outcome.save()
return outcome | [
"def sample_outcome(self, state: State, action: Action):\n pass",
"def learn(self, reward, observation):",
"def set_outcome(self, outcome):\n self.outcome = outcome\n self.y = self._makey()",
"def add_outcomes(self):\n study_outcomes = StudyOutcomes()\n for outcome_type in (... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create classroom resource object. | def create_classroom_resource(self, number):
resource = ClassroomResource(
slug="resource-{}".format(number),
description="Resource {}".format(number),
languages=["en"],
)
resource.save()
return resource | [
"def _create_new_classroom(\n classroom: classroom_config_domain.Classroom\n) -> None:\n classroom.validate()\n classroom_models.ClassroomModel.create(\n classroom.classroom_id,\n classroom.name,\n classroom.url_fragment,\n classroom.course_details,\n classroom.topic_list... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add relationship between challenge and lesson objects. | def add_challenge_lesson_relationship(self, challenge, lesson, set_number, number):
relationship = ProgrammingChallengeNumber(
programming_challenge=challenge,
lesson=lesson,
challenge_set_number=set_number,
challenge_number=number,
)
relationship.... | [
"def add_lesson_resource_relationship(self, lesson, resource, number):\n relationship = ResourceDescription(\n lesson=lesson,\n resource=resource,\n description=\"Description {}\".format(number),\n )\n relationship.save()",
"def add_rel_person(self, added):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create glossary term object. | def create_glossary_term(self, number):
term = GlossaryTerm(
slug="term-{}".format(number),
term="Term {}".format(number),
definition="Defintion for term {}".format(number),
)
term.save()
return term | [
"def test_glossary_term_create(self):\n pass",
"def _create_term_definition(self, active_ctx, local_ctx, term, defined):\n if term in defined:\n # term already defined\n if defined[term]:\n return\n # cycle detected\n raise JsonLdError(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add relationship between challenge and lesson objects. | def add_lesson_resource_relationship(self, lesson, resource, number):
relationship = ResourceDescription(
lesson=lesson,
resource=resource,
description="Description {}".format(number),
)
relationship.save() | [
"def add_challenge_lesson_relationship(self, challenge, lesson, set_number, number):\n relationship = ProgrammingChallengeNumber(\n programming_challenge=challenge,\n lesson=lesson,\n challenge_set_number=set_number,\n challenge_number=number,\n )\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
=========== This is for reading in apr3 hdf (HDF5 updated 2/21/18) files from OLYMPEX and return them all in one dictionary =========== filename = filename of the apr3 file | def apr3read(filename):
apr = {}
flag = 0
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'z95s' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
hdf = h5py.File(filename,"r")
... | [
"def apr3read(filename):\n \n apr = {}\n flag = 0\n ##Radar varibles in hdf file found by hdf.datasets\n radar_freq = 'zhh14' #Ku\n radar_freq2 = 'zhh35' #Ka\n radar_freq3 = 'zvv95' #W\n radar_freq4 = 'ldr14' #LDR\n vel_str = 'vel14' #Doppler\n ##\n\n hdf = SD(filename, SDC.READ)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is a function to load and combine the PSD into 1 matrix with dimensions (time,bin) | def PSD_load(psd_filename_2DS,psd_filename_HVPS,day=1,month=12):
##Load in UIOPS
#Select bins for each probe
#2DS
index1 = 4
index2 = 18
#
#HVPS
index3 = 5
index4 = 28
#
#2DS
data = netCDF4.Dataset(psd_filename_2DS,'r')
time1 = data['time'][:]
ND1 = data... | [
"def load_p_beam_2s():\n # get start time\n start_time = get_start_time()\n \n # instantiate array to hold the resulting data, empty and single column \n # at first, for data to be successively stacked\n p_beam_data = np.empty((0,2), float)\n \n # loop through the files and load the data\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
=========== Calculate the density of the icesphere/spheriod return_ice = bool, returns iwc with the rho | def rho_e(midpoints,binwidth,ND,MD,aspect,mass,twc,return_ice=False):
flag1 = False
##Determine Volume function based on input
if aspect == 1:
def volume(D,N,dD):
vol = vol_1(D,N,dD)
return vol
elif aspect == 2:
def volume(D,N,dD):
vol = vol_2(D,N,dD... | [
"def rho(self):\n\n if \"rho\" not in self.ds:\n var = xroms.density(self.ds.temp, self.ds.salt, self.ds.z_rho)\n self.ds[\"rho\"] = var\n\n return self.ds.rho",
"def resistivity(self):\n A = self. w * self.t # Cross-sectional area of cpw\n rho = self.resistance()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function provides a filter for APR3 data to determine if there exists a precip echo in the column. Only preforms it on one scan at a time. Could easily go up to all scans. | def precip_echo_filt(ku):
precip_yn = np.zeros(ku.shape[1])
for j in np.arange(0,ku.shape[1]):
flag1 = 0
c1 = -1
i = -1
start_flag = 0
while flag1 == 0:
i = i + 1
if c1 >= 3:
precip_yn[j] = 1
break
if i... | [
"def filter_pfcp_ngap(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overload so len() simply returns the number of nucleotides stored within the instance of the class. | def __len__(self):
return(len(self.nucleotides)) | [
"def __len__(self):\n return len(self.organisms)",
"def __len__(self):\n\t\tret = 0\n\t\tfor elem in self:\n\t\t\tret += 1\n\t\treturn ret",
"def __len__(self) -> \"int\":\n return _coin.SoMField___len__(self)",
"def __len__(self) -> \"int\":\n return _coin.SoGroup___len__(self)",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is a private method that, given a string of nucleotides, calls the current BIGSI test site and parses the results, given the target species_name and species_min_amount. | def _interrogate_bigsi(self,nucleotides_string):
# create an empty list to store the list of sample numbers from the Short Read Archive
sra_samples={}
# define the URL of the BIGSI instance
url_front="http://api.bigsi.io/search?seq="
url_end="&threshold=1.0"
query_stri... | [
"def test_species(self):\n\n # test node.species\n\n species_tree = PhyloTree(\n \"((Felis_catus_1:1, (Homo_sapiens_1:1, Pan_troglodytes_1:1), Saccharomyces_cerevisiae_1:1));\",\n format=1)\n species_tree.set_species_naming_function(lambda n: n.name.split(\"_\")[1] if \"_\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Pandas dataframe that lists all the mutations and their occurences, one per line | def _create_dataframe(self):
data_dict={}
# create a Boolean array of only those positions where sequences have been identified
positive_elements=self.arrays["number_genomes"]>0
for key in ['amino_acid_position','original_triplet','new_triplet','number_nucleotide_changes','mutation','... | [
"def get_mutation_records(self):\n\n yield from self._mutation_records",
"def extract_avenio_mutations(\n columns=[\"Allele Fraction\", \"No. Mutant Molecules per mL\", \"CNV Score\"]\n) -> Dict[str, pd.DataFrame]:\n # Load data from spreadsheet.\n patient_mutations = pd.read_excel(\"variant_list_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns data including only lines containing SNPs. | def vcf_snp_prune(self, in_data=None):
snps_data = []
print "\nPruning non-SNP entries..."
bar = progressbar.ProgressBar(redirect_stdout=True)
for i in bar(range(len(in_data))):
file_line = in_data[i]
cols = file_line.split('\t')
# If the second chara... | [
"def filter_snps( snp_list ):\n return [s for s in snp_list if s.S1 != 'N']",
"def all_snpedia_rsids():\n snpedia = MediaWiki(url=\"https://bots.snpedia.com/api.php\")\n\n # pull list of all available SNP markers\n all_snps = snpedia.categorymembers(\"Is a snp\", results=None, subcategories=False)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of variants from a reference sequence. Intended for use with FASTA input, but will work with any AlignIO object or list of sequence data. | def variants_from_sequence(self):
print "\nSetting up sequence difference sets..."
firstseq = self.sequence[0]
diffsets = []
for i in range(len(firstseq)):
diffsets.append(set())
bar = progressbar.ProgressBar(redirect_stdout=True)
for i in bar(range(len(self.s... | [
"def get_bam_ref_sequences(filename, headsize=64000):\n with open(filename, 'rb') as bamfile:\n head = bamfile.read(headsize)\n\n # read a bunch of fields, where the final field is the size of the block\n # minus 1, then decompress the block\n fields = struct.unpack_from('<4BI2BH2B2H', head)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Strips out nonsegregating sites from a sequence alignment. Uses self.variantset, which must be filled first. | def prune_non_seg(self):
self.fullsequence = self.sequence # First back up the original sequence
self.fullvariantset = self.variantset
self.fullvariants = self.variants
self.sequence = MultipleSeqAlignment([]) # Blank the sequence to be worked on
print "\nPruning non-segregati... | [
"def remove_unalignable(self):\n if self.unalignable is None or self.data is None:\n self.logger.warn(\"Both unalignable regions and RE data must be \"\n \"loaded prior to running this function\")\n return None\n self.logger.info(\"Removing unalignable... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Output a REJECTOR2 input file | def rej_infile(self):
rejfilename = self.filebase + "-rej.txt"
rejfile = open(rejfilename, 'w')
rejfile.write("/--Data\n")
rejfile.write("Vnaught 0\n\n")
rejfile.write("Loci\tSNP\n")
rejfile.write("Ancestral\t-1\n")
rejfile.write("RecombRt\t0\n")
rejfile... | [
"def output(self, file: 'FILE *const') -> \"void\":\n return _coin.SoNotRec_output(self, file)",
"def check_2bit(self):\n print(self.temp_file_dir)\n print(self.temp_file)\n print(self.output_file)\n if self.target[-4:] != \"2bit\" or self.query[-4:] != \"2bit\":\n pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes input tree file or sequence data. | def input_tree(self):
if self.starttreename:
if self.starttreename[-3:] == 'xml':
self.starttree = Phylo.read(self.starttreename, "phyloxml")
elif self.starttreename[-6:] == 'newick':
self.starttree = Phylo.read(self.starttreename, "newick")
prin... | [
"def parse_input():\n\n with open('input.txt', 'r') as txt:\n tree = txt.read().strip().split(' ')\n\n return tree",
"def parse_files(preorder=None, postorder=None, inorder=None):\n data_is_sufficient = (preorder and inorder) or (\n postorder and inorder)\n if not data_is_suf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find all parents of all children in the tree. | def all_parents(tree):
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child] = clade
return parents | [
"def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets",
"def parents(self, rev):\n self._scanparents(rev)\n return [r for _c, r in sorted(self._parents.get(rev, []))]",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect neighbors of minimum sized monophyletic clade including term. | def neighbors_by_mono(self, term, ctree, parents, tsize):
neighbors = set()
monn = set()
monn.add(term)
curnode = term
while len(neighbors) < tsize:
if curnode not in parents: # will not go past the root
break
curparent = parents[curnode]
... | [
"def neighbors_by_hops(self, term, ctree, parents, tsize):\n workneighbors = set()\n neighbors = set()\n monn = set()\n monn.add(term)\n\n height = 0\n while len(workneighbors) <= tsize:\n curnode = term\n for i in xrange(height):\n if c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect neighbors up to a maximum number of hops along tree. | def neighbors_by_hops(self, term, ctree, parents, tsize):
workneighbors = set()
neighbors = set()
monn = set()
monn.add(term)
height = 0
while len(workneighbors) <= tsize:
curnode = term
for i in xrange(height):
if curnode not in p... | [
"def get_neighbours(self):\n return self.__neighbours",
"def _calculate_nb_neighbors(self, target_node):\n # if number of neighbors was calculated at least once\n # skips calculating the distance\n if target_node.nb_neighbors != -1:\n # only check if there are dead nodes\n all_neighbors ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a tree via maximum parsimony using Biopython's ParsimonyTreeConstructor. | def parsimony_tree(self):
print "Generating maximum parsimony tree.."
if runs > 0 or boot > 0:
print "ERROR: Bootstrap and multiple runs not compatible with -tree pars option."
exit()
cpus = multiprocessing.cpu_count()
if cpus > maxthreads:
cpus = maxt... | [
"def construct_maxheap_recursive(arr):\n if len(arr) > 0:\n v = arr.pop()\n heap = BTNode(v)\n else:\n return None\n heap.left = construct_maxheap_recursive(arr)\n heap.right = construct_maxheap_recursive(arr)\n fix_maxheap(heap, heap.value)\n return heap",
"def __init__(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a tree via maximum likelihood by invoking external software PhyML. See docs for PhyML installation and setup. | def phyml_tree(self):
print "Invoking PhyML..."
if runs > 0 or boot > 0:
print "ERROR: Bootstrap and multiple runs not yet implemented for PhyML."
print "Try using RAxML."
exit()
# Output sequence to a temp FASTA file
tempfastafile = self.indata.fileba... | [
"def max_tree(max_depth = None, out_file = None):\n\n data = np.loadtxt(\"fourier/energy.txt\", delimiter=\",\")\n\n X = []\n y = []\n for row in data:\n y.append(int(row[1]))\n X.append(map(int,row[2:]))\n\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, tes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up imputation function for all terminal nodes. | def impute(self):
if ncollect == 'hops':
print "HOPS"
elif ncollect == 'distance':
print "DISTANCE"
else:
print "ROOTWARD"
terms = self.phytree.tree.get_terminals() # Get all internal nodes on tree. These are the ones with samples.
if boot >... | [
"def initialize(self):\n\n for n in self.nodes():\n # if self.nodes[n].get(\"init_fn\") is not None:\n if n in self.input_functions:\n self.nodes[n][\"value\"] = self.nodes[n][\"init_fn\"]()\n self.update()",
"def imputation(self):\n return self._imputatio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Output imputed sequence and auxilliary files. | def output_imputed(self, limpout):
for imputed in self.imputelist:
if indata.orig_vcf_pos:
imputed[1] = str(indata.orig_vcf_pos[int(imputed[1])])
else:
imputed[1] = str(imputed[1])
if verbose:
if len(self.imputelist) > 0:
... | [
"def _gen_aux(self, ssym):\n\n for asym in self.aux:\n if self.auxout > 0:\n self.hmm_file_ofp.write(\"%d\\t%d\\t%s\\t%s\\n\" % (ssym, ssym, asym, asym))\n else:\n self.hmm_file_ofp.write(\"%d\\t%d\\t%s\\t%s\\n\" % (ssym, ssym, self.eps, asym))\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build path with endpoint and args | def _build_path(self, *args):
return '/'.join(chain((self.endpoint,), map(str, args))) | [
"def __url_builder(self, endpoint: str, **kwargs: dict) -> str:\n\n endpoint = self.__clean_endpoints_string(endpoint)\n if kwargs != {}:\n endpoint = endpoint.format(**kwargs)\n elif type(endpoint) == tuple:\n endpoint = endpoint[0]\n endpoint += \"&api_key={}\".fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a set of pings, calculate the echometrics values based on the depth bin provided. | def calculate_echometrics(frame_buffers, depth_to_bin):
depth_to_bin = int(depth_to_bin)
print 'depth to bin is ...', depth_to_bin
num_beams = len(frame_buffers)
num_samples = frame_buffers[0].num_samples[0]
max_range = frame_buffers[0].range_max_m[0]
min_range = frame_buffers[0].range_min_m[0]
... | [
"def sumBinEnergies( binnedPoints, factor=1000 ):\n\n energies = []\n\n for tBin in binnedPoints:\n if tBin.size > 0:\n energies.append(sum(tBin[:,3]*factor))\n else:\n energies.append(0)\n\n return energies",
"def calc_phase_hist_data(property, snap, bin_nums=100):\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add command line options | def add_command_line_options():
AddOption('--preprocess',
dest='preprocess',
action='store_true',
default=False,
help='Preprocess selected files for profiling')
AddOption('--no-rpath',
dest='no_rpath',
action='store_true',
... | [
"def add_opts(self, optparser):\n return",
"def _setCommandLineOptions(self):\n self._commandLineParser.add_option(\"-f\", \"--file\", dest=\"filename\", help=\"open a FILE\", metavar=\"FILE\")\n self._commandLineParser.add_option(\"-l\", \"--loglevel\", dest=\"loglevel\", help=\"set LOGLEVEL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the version (and release) in the RPM spec file | def update_rpm_version(version, tag):
# pylint: disable=consider-using-f-string
spec = open("utils/rpms/daos.spec", "r").readlines() # pylint: disable=consider-using-with
current_version = 0
release = 0
for line_num, line in enumerate(spec):
if line.startswith("Version:"):
curr... | [
"def _update_version_file(self):\n file_path = os.path.join(self.repo.working_tree_dir, self.version_file)\n self._update_version_numbers(file_path)\n return self._commit_file(\n self.version_file,\n 'Version updated for release {}.{}.{}{}.'.format(\n self.major, self.minor, self.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function for importing custom scons file. Making this a function allows us to export 'env' without namespace pollution in the parent. | def load_local(env_script, env):
# pylint: disable=unused-argument
SConscript(env_script, exports=['env']) | [
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def environInject(shellName):",
"def extend_env(extra_env):\n env = os.environ.copy()\n env.update(extra_env)\n return env",
"def get_environment(py_src):\n env_src = \".\".join((os.path.splitex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the user's timeline with the list of tweets in the following format and aggregate into one document. | def aggregate_tweets(self, timeline, lang=None):
if lang is None:
twt_doc = ' '.join([t['text'] for t in timeline['tweets']])
else:
twt_doc = ' '.join([t['text'] for t in timeline['tweets'] if t['lang'] == lang])
return {'user_id': timeline['user_id'], 'all_tweets': twt_d... | [
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def tweeterise_timeline(self, request, blasts):\n tweets = []\n for blast in blasts:\n tweets.append(self.twe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get timelines for all friends (following) for this twitter account and return tweets aggregated for each user. | def get_timelines_for_parent(self, parent_name):
db = self.db
cursor = db.tweets.find({'parent_account': parent_name})
friends_tweets = []
for tl in range(cursor.count()):
friends_tweets.append(self.aggregate_tweets(cursor.next()))
return friends_tweets | [
"def get_friends_tweets(self):\n tweets = []\n for friend in self.friends:\n for tweet in tweepy.Cursor(self.API.user_timeline).items():\n tweets.append(tweet._json)\n print(tweets,\"\\n\")\n \n return tweets",
"def get_user_tweets(self):\n tweet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Base class for an initialiser with a distribution between [low,high] | def __init__(self, low=0, high=1):
self.low = low
self.high = high | [
"def __init__(self, a, b, the_seed=None):\n super(UniformRNS, self).__init__(the_seed)\n self.upper_bound = a\n self.lower_bound = b\n self.width = self.upper_bound - self.lower_bound",
"def __init__(self) :\n\t\t#later may want to pass in something that effects the probability of cert... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A common function for ensuring that two observables contain derivatives with respected to the same force field parameters, and refactors these derivatives into more easily manipulable dictionaries. | def _compatible_gradients(
self, other: T
) -> Tuple[
Dict[ParameterGradientKey, ParameterGradient],
Dict[ParameterGradientKey, ParameterGradient],
]:
self_gradients = {gradient.key: gradient for gradient in self._gradients}
other_gradients = {gradient.key: gradient for ... | [
"def compute_observations_and_derivatives(self, state1, state2, dt): \n data1 = self.compute_observations(state1)\n data2 = self.compute_observations(state2)\n \n # For each sensor type (e.g., sensor_type = 'rangefinder')\n for sensor_type in self.config.sensor_types.keys()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears all gradient information. | def clear_gradients(self):
self._gradients = [] | [
"def clear_gradients(self):\n for observable in self._observables.values():\n observable.clear_gradients()",
"def clear(self):\n # Clear all terms!\n self.set_point = 0.0\n self.Pterm = 0.0\n self.Iterm = 0.0\n self.Dterm = 0.0\n self.last_error = 0.0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the subset of the values stored for this observable at the specified indices. | def subset(self, indices: Iterable[int]) -> "ObservableArray":
return self.__class__(
value=self._value[indices],
gradients=[
ParameterGradient(key=gradient.key, value=gradient.value[indices])
for gradient in self._gradients
],
) | [
"def get_subset(self, indices):\n return MLData(self.data[indices], self.target[indices])",
"def slice(self, indices):\n indices = np.asarray(indices)\n max_index = indices.max()\n n_total = len(self)\n if max_index >= len(self):\n raise ValueError(\"Invalid index %d ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validates whether a key is either an `ObservableType` or a string representation of an `ObservableType`. A `KeyError` is raised if any other types are passed as an key, or if the `str` cannot be converted to an `ObservableType` | def _validate_key(key: Union[str, ObservableType]) -> ObservableType:
key_error_message = (
"The key must either be an `ObservableType` object or a "
"string representation of an `ObservableType` object."
)
if isinstance(key, str):
try:
key = ... | [
"def _validate_key(key):\n key_error_message = (\n \"The key must either be an ObservableType or a \"\n \"string representation of an ObservableType\"\n )\n\n if isinstance(key, str):\n\n try:\n key = ObservableType(key)\n except ValueE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates an observable frame from the CSV output of an OpenMM simulation. | def from_openmm(
cls, file_path: str, pressure: unit.Quantity = None
) -> "ObservableFrame":
with open(file_path, "r") as file:
file_contents = file.read()
if len(file_contents) < 1:
return cls()
file_contents = file_contents[1:]
file... | [
"def format_bom_mjo(src_file, target_file):\n\n cols = [0, 1, 2, 3, 4, 5, 6]\n names = ['year', 'month', 'day', 'RMM1', 'RMM2', 'phase', 'amplitude']\n na_values = ['1E+36', '1.E+36', '999', 999,\n '9.99999962E+35', 9.99999962E+35]\n\n logging.info('Reading source file: %s', src_file)\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Joins multiple observable frames together in the order that they appear in the args list. | def join(cls, *observable_frames: "ObservableFrame") -> "ObservableFrame":
if len(observable_frames) < 2:
raise ValueError("At least two observable frames must be provided.")
expected_observables: Set[ObservableType] = {*observable_frames[0]}
# Ensure the observable frames contain... | [
"def prep_animations(self,*animations,args=None):\n for animation in animations:\n if not args is None:\n animation.pass_obs=args\n self.held_animations.append(animation)\n animation.prep_queue()",
"def zip(*args: Union[Iterable[Any], ObservableBase], # pyli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears all gradient information for each observable in the frame. | def clear_gradients(self):
for observable in self._observables.values():
observable.clear_gradients() | [
"def clear_gradients(self):\n self._gradients = []",
"def zero_grad(self):\n\t\tfor i in range(len(self.Layers)):\n\t\t\tself.Layers[i].zero_grad()",
"def clearAll(self):\n\t\tself.faceSnapShot = None #This is the state of the HappyFace to which all the expressions are compared\n\t\tself.expressionLibrar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Bootstrapping a set of observables to compute the average value of the observables as well as the the standard error in the average. | def bootstrap(
bootstrap_function: Callable,
iterations: int = 200,
relative_sample_size: float = 1.0,
sub_counts: Iterable[int] = None,
**observables: ObservableArray,
) -> Observable:
if len(observables) == 0:
raise ValueError("There are no observables to bootstrap")
# Ensure tha... | [
"def computeMeansErrors(*arrays):\n workMat = stack(arrays)\n return workMat.mean(axis=0), workMat.std(axis=0)",
"def _compute_bootstrapped_statistics(\n measured_values,\n measured_stds,\n estimated_values,\n estimated_stds,\n statistics=None,\n percentile=0.95,\n bootstrap_iterations=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for me_get | def test_me_get(self):
pass | [
"def test_hirststonge_using_get(self):\n pass",
"def test_chores_get(self):\n pass",
"def test_musicals_get(self):\n pass",
"def test_hirststonge_using_get1(self):\n pass",
"def test_hirststonge_using_get2(self):\n pass",
"def test_greenalgas_get(self):\n pass",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for me_get_list | def test_me_get_list(self):
pass | [
"def test_list_using_get1(self):\n pass",
"def test_get_list(self):\n\t\tinput = get_list('./tests/sample.json')\n\t\tassert isinstance(input, list)",
"def test_list_operations(self):\n pass",
"def test_get_items_in_list(self):\n\n list_name = 'travel'\n item1 = 'cake'\n ite... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for me_get_pay_ins | def test_me_get_pay_ins(self):
pass | [
"def test_pay_ins_universal_pay_universal_pay_get_payment(self):\n pass",
"def test_payment_methods_get(self):\n pass",
"def test__account_instruments(self, mock_req):\n tid = \"_v3_account_by_accountID_instruments\"\n resp, data, params = fetchTestData(responses, tid)\n r = a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Here, we check to see if every endpoint documented in the OpenAPI documentation actually exists in urls.py and thus in actual code. | def check_for_non_existent_openapi_endpoints(self) -> None:
openapi_paths = set(get_openapi_paths())
undocumented_paths = openapi_paths - self.checked_endpoints
undocumented_paths -= self.buggy_documentation_endpoints
undocumented_paths -= self.pending_endpoints
try:
... | [
"def test_openapi_arguments(self) -> None:\n\n from zproject import urls as urlconf\n\n # We loop through all the API patterns, looking in particular\n # for those using the rest_dispatch decorator; we then parse\n # its mapping of (HTTP_METHOD -> FUNCTION).\n for p in urlconf.v1_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print a VERY clear and verbose error message for when the types (between the OpenAPI documentation and the function declaration) don't match. | def render_openapi_type_exception(
self,
function: Callable[..., HttpResponse],
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
diff: Set[Tuple[str, Union[type, Tuple[type, object]]]],
) ->... | [
"def _type_error_message(func: callable, expected: type,\n got: object) -> str:\n\n return ('{} should return a {}, but returned {}' +\n '.').format(func.__name__, expected.__name__, got)",
"def test_function_args(self):\n reporter = SimpleReporter(\n pkgs=[P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We construct for both the OpenAPI data and the function's definition a set of tuples of the form (var_name, type) and then compare those sets to see if the OpenAPI data defines a different type than that actually accepted by the function. Otherwise, we print out the exact differences for convenient debugging and raise ... | def check_argument_types(
self, function: Callable[..., HttpResponse], openapi_parameters: List[Dict[str, Any]]
) -> None:
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()
json_params: Dict[str, Union[type, Tuple[type, object]]] = {}
for element in openapi_pa... | [
"def render_openapi_type_exception(\n self,\n function: Callable[..., HttpResponse],\n openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],\n function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],\n diff: Set[Tuple[str, Union[type, Tuple[type, object]]]]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This endtoend API documentation test compares the arguments defined in the actual code using and REQ(), with the arguments declared in our API documentation for every API endpoint in Zulip. First, we import the fancyDjango version of zproject/urls.py by doing this, each has_request_variables wrapper around each importe... | def test_openapi_arguments(self) -> None:
from zproject import urls as urlconf
# We loop through all the API patterns, looking in particular
# for those using the rest_dispatch decorator; we then parse
# its mapping of (HTTP_METHOD -> FUNCTION).
for p in urlconf.v1_api_and_json... | [
"def test_URL_kwargs(self):\n self.request_method_test('matchdict')",
"def test_iomanager_kwargs_collected(self):\n iomanager_kwargs = dict(\n required=object(),\n optional=object(),\n unlimited=object(),\n returns=object(),\n )\n view_kw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to make sure the request validator works properly The tests cover both cases such as catching valid requests marked as invalid and making sure invalid requests are marked properly | def test_validator(self) -> None:
# `/users/me/subscriptions` doesn't require any parameters
validate_request("/users/me/subscriptions", "get", {}, {}, False, "200")
with self.assertRaises(SchemaError):
# `/messages` POST does not work on an empty response
validate_reques... | [
"def validate(self, request):\n\t\treturn True",
"def test_invalid_action_in_requests(action):\n req = {\n 'dataset': {'database': 'ABC', 'name': 'XYZ'},\n 'fetch': {},\n 'action': action\n }\n with pytest.raises(ValidationError):\n validator.validate(req)",
"def test_confor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all the Bittrex markets and filters them based on the main market filter | def get_markets(self, main_market_filter=None):
markets = self.bittrex.get_markets()
if not markets["success"]:
error_str = print("market", True)
logger.error(error_str)
exit()
markets = markets["result"]
#return markets
markets = list(map(lam... | [
"def get_active_markets():\n b = Bittrex(None, None)\n response = b.get_markets()\n if response['success']:\n markets = response['result']\n active_markets = []\n for market in markets:\n if market['IsActive']:\n active_markets.append(Market(market['MarketName... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply patches to tweak SDK build system. | def apply_patches():
with open(os.path.join(os.getcwd(), 'utils', 'sdk.patch'), 'r') as fin:
subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)
with open(os.path.join(SRCDIR, 's-video_sgx.patch'), 'r') as fin:
subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR) | [
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quantum ESPRESSO Symmetry class =============================== This class contains all the info about Quantum ESPRESSO symmetry data. It is used to wrap symmetries into the quantum espresso fortran subroutines. Starting from a set of symmetry operation and the structure of the system, it builds all the QE symmetry ope... | def __init__(self, structure, threshold = 1e-5):
if not structure.has_unit_cell:
raise ValueError("Error, symmetry operation can be initialize only if the structure has a unit cell")
self.structure = structure
self.threshold = np.float64(threshold)
... | [
"def DialsSymmetry(DriverType=None):\n\n DriverInstance = DriverFactory.Driver(DriverType)\n\n class DialsSymmetryWrapper(DriverInstance.__class__):\n \"\"\"A wrapper for dials.symmetry\"\"\"\n\n def __init__(self):\n # generic things\n super().__init__()\n\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method just prints the symmetries on stdout. | def PrintSymmetries(self):
print()
print("Number of symmetries: {}".format(self.QE_nsym))
syms = self.GetSymmetries()
for i in range(self.QE_nsym):
print(" Symmetry {}".format(i+1))
for j in range(3):
print(" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}".for... | [
"def OutputAllSymbols():\n new_symbols_file = os.path.join(ROOT_DIR, MODULE + \"-symbols.txt\")\n with open(new_symbols_file, 'w', encoding='utf-8') as out:\n for symbol in sorted(AllSymbols.keys()):\n out.write(symbol + \"\\n\")",
"def sym_print_tree(symb):\n\n assert type(symb) == str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DIVIDE THE Q POINTS IN STARS ============================ This method divides the given q point list into the star. Remember, you need to pass the whole number of q points | def SetupQStar(self, q_tot, supergroup = False):
# Setup the symmetries
#self.SetupQPoint()
# Lets copy the q list (we are going to pop items from it)
q_list = q_tot[:]
q_stars = []
count_qstar = 0
count_q = 0
q_indices = np.zero... | [
"def ApplyQStar(self, fcq, q_point_group):\n \n nq = np.shape(q_point_group)[0]\n final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)\n \n # Setup all the symmetries\n self.SetupQPoint()\n \n new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SYMMETRIZE A RANK3 TENSOR ========================== This subroutines uses the current symmetries to symmetrize a rank3 tensor. This tensor must be in the supercell space. The v3 argument will be overwritten. | def ApplySymmetryToTensor3(self, v3, initialize_symmetries = True):
if initialize_symmetries:
self.SetupFromSPGLIB()
# Apply the permutation symmetry
symph.permute_v3(v3)
# Apply the translational symmetries
symph.trans_v3(v3, self.QE_translations_irt)
# Ap... | [
"def _sym3x3(T):\n T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]",
"def TransformSymmetricSecondRankTensor(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD3_TransformSymmetricSecondRankTensor(self, *args)",
"def symCrossMat3x3( v ):\n\n A ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SYMMETRIZE EFFECTIVE CHARGES ============================ This subroutine applies the symmetries to the effective charges. As always, the eff_charges will be modified by this subroutine. | def ApplySymmetryToEffCharge(self, eff_charges):
nat, cart1, cart2 = np.shape(eff_charges)
assert cart1 == cart2
assert cart1 == 3
assert nat == self.QE_nat, "Error, the structure and effective charges are not compatible"
# Apply the sum rule
tot_sum = np.sum(eff... | [
"def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SYMMETRIZE RAMAN TENSOR ============================ This subroutine applies the symmetries to the raman tensor As always, the raman_tensor will be modified by this subroutine. | def ApplySymmetryToRamanTensor(self, raman_tensor):
pol1, pol2, at_cart = np.shape(raman_tensor)
assert pol1 == pol2
assert pol2 == 3
assert at_cart == 3*self.QE_nat, "Error, the structure and effective charges are not compatible"
# Apply the permutation on the electric f... | [
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SYMMETRIZE A RANK4 TENSOR ========================== This subroutines uses the current symmetries to symmetrize a rank4 tensor. This tensor must be in the supercell space. The v4 argument will be overwritten. | def ApplySymmetryToTensor4(self, v4, initialize_symmetries = True):
if initialize_symmetries:
self.SetupFromSPGLIB()
# Apply the permutation symmetry
symph.permute_v4(v4)
# Apply the translational symmetries
symph.trans_v4(v4, self.QE_translations_irt)
# Ap... | [
"def SymmetrizeVector(self, vector):\n\n # Apply Translations if any\n self.ApplyTranslationsToVector(vector)\n \n # Prepare the real vector\n tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = \"F\")\n \n for i in range(self.QE_nat):\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
APPLY THE Q STAR SYMMETRY ========================= Given the fc matrix at each q in the star, it applies the symmetries in between them. | def ApplyQStar(self, fcq, q_point_group):
nq = np.shape(q_point_group)[0]
final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)
# Setup all the symmetries
self.SetupQPoint()
new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128... | [
"def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = \"simple\"):\n nqirr = len(q_stars)\n nq = np.sum([len(x) for x in q_stars])\n \n # Get the q_points vector\n q_points = np.zeros( (nq, 3), dtype = np.float64)\n sigma = 0\n for i in range(nqirr):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use the current structure to impose symmetries on a complete dynamical matrix in q space. Also the simple sum rule at Gamma is imposed | def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = "simple"):
nqirr = len(q_stars)
nq = np.sum([len(x) for x in q_stars])
# Get the q_points vector
q_points = np.zeros( (nq, 3), dtype = np.float64)
sigma = 0
for i in range(nqirr):
for q_vec ... | [
"def SymmetrizeDynQ(self, dyn_matrix, q_point):\n \n # TODO: implement hermitianity to speedup the conversion\n \n #Prepare the array to be passed to the fortran code\n QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
QE SUM RULE =========== This subroutine imposes on the given force constant matrix the acustic sum rule | def ImposeSumRule(self, force_constant, asr = "simple", axis = 1, zeu = None):
QE_fc = np.zeros( (3, 3, self.QE_nat, self.QE_nat), order ="F", dtype = np.complex128)
# Fill the effective charges if required
if zeu is not None:
# Convert in the correct indexing and u... | [
"def _reduceSum(self,tensor):\n if self._envType == \"DDP\":\n dist.reduce(tensor,0)\n return tensor",
"def test_large_sum(self):\r\n for n in [10, 20, 30, 40, 50]:\r\n A = matrix(range(n*n), (n,n))\r\n x = Variable(n,n)\r\n p = Problem(Minimize(at.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
USE SPGLIB TO SETUP THE SYMMETRIZATION ====================================== This function uses spglib to find symmetries, recognize the supercell and setup all the variables to perform the symmetrization inside the supercell. | def SetupFromSPGLIB(self):
if not __SPGLIB__:
raise ImportError("Error, this function works only if spglib is available")
# Get the symmetries
spg_syms = spglib.get_symmetry(self.structure.get_ase_atoms(), symprec = self.threshold)
symmetries = GetSymmetriesFromSPGLIB(spg_sy... | [
"def ApplyTranslationsToSupercell(fc_matrix, super_cell_structure, supercell):\n\n natsc = super_cell_structure.N_atoms\n\n # Check the consistency of the passed options\n natsc3, _ = np.shape(fc_matrix)\n assert natsc == int(natsc3 / 3), \"Error, wrong number of atoms in the supercell structure\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This subroutine applies the translations to the given vector. To be used only if the structure is a supercell structure and the symmetries have been initialized with SPGLIB | def ApplyTranslationsToVector(self, vector):
nat = self.QE_nat
assert vector.shape[0] == nat
assert vector.shape[1] == 3
# Ignore if no translations are presents
if self.QE_translation_nr <= 1:
return
sum_all = np.zeros((nat, 3), dtype = type(vector[0,0... | [
"def translate(self, vector):\n for atom in self.atoms:\n atom.translate(vector)",
"def translate(self, vector):\n seg2 = [ x.translated(vector) for x in self.asSegments()]\n self.activeRepresentation = SegmentRepresentation(self, seg2)\n return self",
"def translate(self, vector):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function initialize the QE symmetries from the symmetries expressed in the Cellconstructor format, i.e. a list of numpy array 3x4 where the last column is the fractional translation. | def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):
nsym = len(symmetries)
self.QE_nsymq = np.intc(nsym)
self.QE_nsym = self.QE_nsymq
for i, sym in enumerate(symmetries):
self.QE_s[:,:, i] = np.transpose(sym[:, :3])
... | [
"def generate_init_eqn(self):\n\n self.init_asn = OrderedDict() # assignment-type initialization\n self.init_itn = OrderedDict() # iterative initialization\n self.init_itn_vars = OrderedDict() # variables corr. to iterative vars\n self.init_jac = OrderedDict()\n\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET SYMMETRIES FROM QE ====================== This method returns the symmetries in the CellConstructor format from the ones elaborated here. | def GetSymmetries(self, get_irt=False):
syms = []
for i in range(self.QE_nsym):
s_rot = np.zeros( (3, 4))
s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])
s_rot[:, 3] = self.QE_ft[:, i]
syms.append(s_rot)
if not get_ir... | [
"def symbols(self):\r\n return [symbolData.symbol for symbolData in self.symbolData]",
"def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SYMMETRIZE A VECTOR =================== This is the easier symmetrization of a generic vector. Note, fractional translation and generic translations are not imposed. This is because this simmetrization acts on displacements and forces. | def SymmetrizeVector(self, vector):
# Apply Translations if any
self.ApplyTranslationsToVector(vector)
# Prepare the real vector
tmp_vector = np.zeros( (3, self.QE_nat), dtype = np.float64, order = "F")
for i in range(self.QE_nat):
tmp_vector[0, i] ... | [
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DYNAMICAL MATRIX SYMMETRIZATION =============================== Use the Quantum ESPRESSO fortran code to symmetrize the dynamical matrix at the given q point. | def SymmetrizeDynQ(self, dyn_matrix, q_point):
# TODO: implement hermitianity to speedup the conversion
#Prepare the array to be passed to the fortran code
QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = "F")
# Get the crysta... | [
"def getSymmetryMatrix(*args, **kwargs):\n \n pass",
"def SymmetrizeFCQ(self, fcq, q_stars, verbose = False, asr = \"simple\"):\n nqirr = len(q_stars)\n nq = np.sum([len(x) for x in q_stars])\n \n # Get the q_points vector\n q_points = np.zeros( (nq, 3), dtype = np.flo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET THE Q STAR ============== Given a vector in q space, get the whole star. We use the quantum espresso subrouitine. | def GetQStar(self, q_vector):
self.SetupQPoint()
nq_new, sxq, isq, imq = symph.star_q(q_vector, self.QE_at, self.QE_bg,
self.QE_nsymq, self.QE_s, self.QE_invs, 0)
#print ("STAR IMQ:", imq)
if imq != 0:
total_star = np.zeros( (nq_new, 3), dtype = np.float6... | [
"def SetupQStar(self, q_tot, supergroup = False):\n \n # Setup the symmetries\n #self.SetupQPoint()\n \n # Lets copy the q list (we are going to pop items from it)\n q_list = q_tot[:]\n q_stars = []\n \n count_qstar = 0\n count_q = 0\n q_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET ONLY THE IRREDUCIBLE Q POINTS ================================= This methods selects only the irreducible q points given a list of total q points for the structure. | def SelectIrreducibleQ(self, q_vectors):
qs = np.array(q_vectors)
nq = np.shape(qs)[0]
q_irr = [qs[x, :].copy() for x in range(nq)]
for i in range(nq):
if i >= len(q_irr):
break
q_stars = self.GetQStar(q_irr[i])
n_star = ... | [
"def GetQIrr(self, supercell):\n\n # Get all the q points\n q_points = GetQGrid(self.QE_at.T, supercell)\n\n # Delete the irreducible ones\n q_irr = self.SelectIrreducibleQ(q_points)\n\n return q_irr",
"def liste_Qx (self):\n liste_matrices_Qx = [self.Qx(self.liste_J[self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET THE LIST OF IRREDUCIBLE Q POINTS ==================================== This method returns a list of irreducible q points given the supercell size. | def GetQIrr(self, supercell):
# Get all the q points
q_points = GetQGrid(self.QE_at.T, supercell)
# Delete the irreducible ones
q_irr = self.SelectIrreducibleQ(q_points)
return q_irr | [
"def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the super... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
APPLY THE SYMMETRIES TO A 2RANK TENSOR ======================================= This subroutines applies the symmetries to a 2rank tensor. Usefull to work with supercells. | def ApplySymmetriesToV2(self, v2, apply_translations = True):
# Apply the Permutation symmetry
v2[:,:] = 0.5 * (v2 + v2.T)
# First lets recall that the fortran subroutines
# Takes the input as (3,3,nat,nat)
new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, ... | [
"def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns a matrix containing the symmetries from the given ITA code of the Group. The corresponding ITA/group label can be found on the Bilbao Crystallographic Server. | def get_symmetries_from_ita(ita, red=False):
if ita <= 0:
raise ValueError("Error, ITA group %d is not valid." % ita)
filename="%s/SymData/%d.dat" % (CURRENT_DIR, ita)
if red:
filename="%s/SymData/%d_red.dat" % (CURRENT_DIR, ita)
if not os.path.exists(filename):
... | [
"def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CONVERT THE SYMMETRIES ====================== This module comvert the symmetry fynction from the spglib format. | def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):
# Check if the type is correct
if not "translations" in spglib_sym:
raise ValueError("Error, your symmetry dict has no 'translations' key.")
if not "rotations" in spglib_sym:
raise ValueError("Error, your symmetry dic... | [
"def list2sym(lst):\n ...",
"def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini",
"def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini",
"def _a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET IRT ======= Get the irt array. It is the array of the atom index that the symmetry operation swaps. the yth element of the array (irt[y]) is the index of the original structure, while y is the index of the equivalent atom after the symmetry is applied. | def GetIRT(structure, symmetry, timer = Timer.Timer(), debug = False):
new_struct = structure.copy()
if timer is None:
new_struct.fix_coords_in_unit_cell(delete_copies = False, debug = debug)
else:
timer.execute_timed_function(new_struct.fix_coords_in_unit_cell, delete_copies = Fal... | [
"def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
APPLY SYMMETRY ============== Apply the symmetry to the given vector of displacements. Translations are neglected. | def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):
# Get the vector in crystalline coordinate
nat, dumb = np.shape(vector)
work = np.zeros( (nat, 3))
sym = symmetry[:, :3]
v1 = Methods.covariant_coordinates(unit_cell, vector)
w1 = sym.dot(v1.T).T
# Return in cartesian coordi... | [
"def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n n_sym = len(symmetries)\n\n assert n_sym == len(irts)\n\n work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = \"C\")\n \n # Pass to cry... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
APPLY SYMMETRY ============== Apply the symmetry to the given vector of displacements. Translations are neglected. | def ApplySymmetriesToVector(symmetries, vector, unit_cell, irts):
# Get the vector in crystalline coordinate
nat, dumb = np.shape(vector)
n_sym = len(symmetries)
assert n_sym == len(irts)
work = np.zeros( (n_sym, nat, 3), dtype = np.double, order = "C")
# Pass to crystalline coordina... | [
"def ApplySymmetryToVector(symmetry, vector, unit_cell, irt):\n \n # Get the vector in crystalline coordinate\n nat, dumb = np.shape(vector)\n work = np.zeros( (nat, 3))\n sym = symmetry[:, :3]\n\n v1 = Methods.covariant_coordinates(unit_cell, vector)\n w1 = sym.dot(v1.T).T\n\n # Return in c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare a FIND SYM input file ============================= This method can be used to prepare a suitable input file for the ISOTROPY findsym program. | def PrepareISOTROPYFindSymInput(structure, path_to_file = "findsym.in",
title = "Prepared with Cellconstructor",
latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):
lines = GetISOTROPYFindSymInput(structure, title, latticeTolerance, atomicP... | [
"def createNewInput(self,currentInputFiles,oriInputFiles,samplerType,**Kwargs): \n import DecayParser\n import FissionYieldParser\n import QValuesParser\n import MaterialParser\n import PathParser\n \n keyWordDict = {}\n \n directoryFiles = ['path','library_fiss','input_dpl']\n #print ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
As the method PrepareISOTROPYFindSymInput, but the input is returned as a list of string (lines). | def GetISOTROPYFindSymInput(structure, title = "Prepared with Cellconstructor",
latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):
# Check if the structure has a unit cell
if not structure.has_unit_cell:
raise ValueError("Error, the given structure has not a valid uni... | [
"def PrepareISOTROPYFindSymInput(structure, path_to_file = \"findsym.in\",\n title = \"Prepared with Cellconstructor\",\n latticeTolerance = 1e-5, atomicPositionTolerance = 0.001):\n \n lines = GetISOTROPYFindSymInput(structure, title, latticeToler... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |