text
stringlengths 1
93.6k
|
|---|
elif values[0] == 'vt':
|
#texture coordinate (u, v)
|
self.texcoords.append([float(a) for a in values[1:3] ])
|
elif values[0] == 'f':
|
#face description
|
face_vertices = []
|
face_texcoords = []
|
for v in values[1:]:
|
w = v.split('/')
|
face_vertices.append(int(w[0]))
|
if len(w) >= 2 and len(w[1]) > 0:
|
face_texcoords.append(int(w[1]))
|
else:
|
color_fixed = True
|
face_texcoords.append(0)
|
self.faces.append([face_vertices, face_texcoords])
|
for f in self.faces:
|
if not color_fixed:
|
f.append(three_d_object.decide_face_color(f[-1], self.texture, self.texcoords))
|
else:
|
f.append((50, 50, 50)) #default color
|
# cv2.imwrite('texture_marked.png', self.texture)
|
def decide_face_color(hex_color, texture, textures):
|
#doesnt use proper texture
|
#takes the color at the mean of the texture coords
|
h, w, _ = texture.shape
|
col = np.zeros(3)
|
coord = np.zeros(2)
|
all_us = []
|
all_vs = []
|
for i in hex_color:
|
t = textures[i - 1]
|
coord = np.array([t[0], t[1]])
|
u , v = int(w*(t[0]) - 0.0001), int(h*(1-t[1])- 0.0001)
|
all_us.append(u)
|
all_vs.append(v)
|
u = int(sum(all_us)/len(all_us))
|
v = int(sum(all_vs)/len(all_vs))
|
# all_us.append(all_us[0])
|
# all_vs.append(all_vs[0])
|
# for i in range(len(all_us) - 1):
|
# texture = cv2.line(texture, (all_us[i], all_vs[i]), (all_us[i + 1], all_vs[i + 1]), (0,0,255), 2)
|
# pass
|
col = np.uint8(texture[v, u])
|
col = [int(a) for a in col]
|
col = tuple(col)
|
return (col)
|
# <FILESEP>
|
#! /usr/bin/env python
|
import argparse
|
import os
|
import pysam
|
import pandas as pd
|
import numpy as np
|
from scipy.optimize import nnls
|
import scipy.sparse
|
import multiprocessing as mp
|
from hashed_read_genome_array import HashedReadBAMGenomeArray, ReadKeyMapFactory, read_length_nmis, get_hashed_counts
|
from plastid.genomics.roitools import SegmentChain, positionlist_to_segments
|
import sys
|
from time import strftime
|
parser = argparse.ArgumentParser(description='Use linear regression to identify likely sites of translation. Regression will be performed for ORFs '
|
'defined by find_orfs_and_types.py using a metagene profile constructed from annotated CDSs. If '
|
'multiple ribosome profiling datasets are to be analyzed separately (e.g. if they were collected under '
|
'different drug treatments), then this program should be run separately for each, ideally in separate '
|
'subfolders indicated by SUBDIR.')
|
parser.add_argument('bamfiles', nargs='+', help='Path to transcriptome-aligned BAM file(s) for read data')
|
parser.add_argument('--subdir', default=os.path.curdir,
|
help='Convenience argument when dealing with multiple datasets. In such a case, set SUBDIR to an appropriate name (e.g. HARR, '
|
'CHX) to avoid file conflicts. (Default: current directory)')
|
parser.add_argument('--restrictbystarts', nargs='+',
|
help='Subdirectory/subdirectories or filename(s) containing regression output to use to restrict ORFs for regression. If a '
|
'directory or list of directories, file(s) of name REGRESSFILE (regression.h5 by default) will be searched for within them. '
|
'For use to restrict regression on e.g. CHX or no-drug data based only on positive hits from e.g. HARR or LTM data. '
|
'Value(s) of MINWSTART indicate the minimum W statistic to require. If multiple directories/files are provided, start '
|
'sites will be taken from their union.')
|
parser.add_argument('--minwstart', type=float, nargs='+', default=[0],
|
help='Minimum W_start statistic to require for regression output in RESTRICTBYSTARTS. If only one value is given, it will be '
|
'assumed to apply to all; if multiple values are given, the number of values must match the number of values provided for '
|
'RESTRICTBYSTARTS. Ignored if RESTRICTBYSTARTS not included. (Default: 0)')
|
parser.add_argument('--orfstore', default='orf.h5',
|
help='Path to pandas HDF store containing ORFs to regress; generated by find_orfs_and_types.py (Default: orf.h5)')
|
parser.add_argument('--inbed', default='transcripts.bed', help='Transcriptome BED-file (Default: transcripts.bed)')
|
parser.add_argument('--offsetfile', default='offsets.txt',
|
help='Path to 2-column tab-delimited file with 5\' offsets for variable P-site mappings. First column indicates read length, '
|
'second column indicates offset to apply. Read lengths are calculated after trimming up to MAX5MIS 5\' mismatches. Accepted '
|
'read lengths are defined by those present in the first column of this file. If SUBDIR is set, this file is assumed to be '
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.