hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f31c461ea88a83b782769751389f56772c713d60
| 1,457
|
py
|
Python
|
pyof/v0x05/asynchronous/table_status.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
pyof/v0x05/asynchronous/table_status.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
pyof/v0x05/asynchronous/table_status.py
|
mhaji007/python-openflow
|
25f032d660e648501d1e732969b6f91357ef5b66
|
[
"MIT"
] | null | null | null |
"""Defines an Table Status Message."""
# System imports
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericMessage, GenericStruct
from pyof.foundation.basic_types import BinaryData, FixedTypeList, UBInt16, UBInt8, UBInt32, UBInt64, Pad
from pyof.v0x05.common.header import Header, Type
from pyof.v0x05.controller2switch.multipart_reply import TableDesc
# Third-party imports
__all__ = ('TableStatus', 'TableReason')
# Enums
class TableReason(IntEnum):
"""What changed about the table."""
#: Vacancy down threshold event
OFPTR_VACANCY_DOWN = 3
#: Vacancy up threshold event
OFPTR_VACANCY_UP = 4
# Classes
class TableStatus(GenericMessage):
"""OpenFlow TableStatus Message OFPT_TABLE_STATUS.
A table config has changed in the datapath
"""
#: :class:`~pyof.v0x05.common.action.ActionHeader`: OpenFlow Header
header = Header(message_type=Type.OFPT_TABLE_STATUS)
#: One of OFPTR_.*
reason = UBInt8(enum_ref=TableReason)
#: Pad to 64 bits
pad = Pad(7)
#: New table config
table = TableDesc()
def __init__(self, xid=None, reason=None, table=None):
"""Create a message with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
elements: List of elements - 0 or more
"""
super().__init__(xid)
self.reason = reason
self.table = table
| 26.490909
| 105
| 0.691833
| 968
| 0.664379
| 0
| 0
| 0
| 0
| 0
| 0
| 662
| 0.454358
|
f31ce1a1719984d1cf324a95ea4f226d430436e1
| 361
|
py
|
Python
|
DEQModel/utils/debug.py
|
JunLi-Galios/deq
|
80eb6b598357e8e01ad419126465fa3ed53b12c7
|
[
"MIT"
] | 548
|
2019-09-05T04:25:21.000Z
|
2022-03-22T01:49:35.000Z
|
DEQModel/utils/debug.py
|
JunLi-Galios/deq
|
80eb6b598357e8e01ad419126465fa3ed53b12c7
|
[
"MIT"
] | 21
|
2019-10-04T16:36:05.000Z
|
2022-03-24T02:20:28.000Z
|
DEQModel/utils/debug.py
|
JunLi-Galios/deq
|
80eb6b598357e8e01ad419126465fa3ed53b12c7
|
[
"MIT"
] | 75
|
2019-09-05T22:40:32.000Z
|
2022-03-31T09:40:44.000Z
|
import torch
from torch.autograd import Function
class Identity(Function):
@staticmethod
def forward(ctx, x, name):
ctx.name = name
return x.clone()
def backward(ctx, grad):
import pydevd
pydevd.settrace(suspend=False, trace_only_current_thread=True)
grad_temp = grad.clone()
return grad_temp, None
| 24.066667
| 70
| 0.65928
| 310
| 0.858726
| 0
| 0
| 93
| 0.257618
| 0
| 0
| 0
| 0
|
f31cf93ef20fe7554b80d699b5aa26fadaf86834
| 13,629
|
py
|
Python
|
feature_track_visualizer/visualizer.py
|
jfvilaro/rpg_feature_tracking_analysis
|
4c29a64cc07db44b43c12ff66c71d5c7da062c79
|
[
"MIT"
] | null | null | null |
feature_track_visualizer/visualizer.py
|
jfvilaro/rpg_feature_tracking_analysis
|
4c29a64cc07db44b43c12ff66c71d5c7da062c79
|
[
"MIT"
] | null | null | null |
feature_track_visualizer/visualizer.py
|
jfvilaro/rpg_feature_tracking_analysis
|
4c29a64cc07db44b43c12ff66c71d5c7da062c79
|
[
"MIT"
] | null | null | null |
from os.path import isfile
import os
import cv2
from os.path import join
import numpy as np
import tqdm
import random
from big_pun.tracker_utils import filter_first_tracks, getTrackData
def component():
return random.randint(0, 255)
class FeatureTracksVisualizer:
def __init__(self, gt_file,track_file, dataset, params):
self.params = params
self.dataset = dataset
self.tracks, self.feature_ids, self.colors, self.colors_id = self.loadFeatureTracks(gt_file,track_file)
self.min_time_between_screen_refresh_ms = 5
self.max_time_between_screen_refresh_ms = 100
self.is_paused = False
self.is_looped = False
self.marker = params["marker"]
self.computeMinMaxSpeed()
self.updateDisplayRate()
self.times = np.linspace(self.min_stamp, self.max_stamp, int(self.params['framerate'] * (self.max_stamp - self.min_stamp)))
self.time_index = 0
self.cv2_window_name = 'tracks'
cv2.namedWindow(self.cv2_window_name, cv2.WINDOW_NORMAL)
def cropGT(self, gt, predictions):
gt = {i: g for i,g in gt.items() if i in predictions}
predictions = {i: p for i,p in predictions.items() if i in gt}
for i, gt_track in gt.items():
prediction_track = predictions[i]
t_max = prediction_track[-1,0]
gt_track = gt_track[gt_track[:,0]<=t_max]
gt[i] = gt_track
return gt
def discardOnThreshold(self, predictions, gt, thresh):
assert set(gt.keys()) == set(predictions.keys())
for i, gt_track in gt.items():
pred_track = predictions[i]
x_p_interp = np.interp(gt_track[:,0], pred_track[:,0], pred_track[:,1])
y_p_interp = np.interp(gt_track[:,0], pred_track[:,0], pred_track[:,2])
error = np.sqrt((x_p_interp-gt_track[:,1])**2 + (y_p_interp-gt_track[:,2])**2)
idxs = np.where(error > thresh)[0]
if len(idxs) == 0:
continue
t_max = gt_track[idxs[0],0]
gt[i] = gt_track[:idxs[0]]
predictions[i] = pred_track[pred_track[:,0]<t_max]
return predictions, gt
def loadFeatureTracks(self, gt_file, track_file, method="estimation", color=[0, 255, 0], gt_color=[255, 0, 255]):
tracks = {}
colors = {}
colors_id = {}
color = [r for r in reversed(color)]
if self.params['visualisation_mode'] == "estimation":
# load track
colors["estimation"] = color
data = np.genfromtxt(track_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks[method] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
# load gt
tracks_csv = join(gt_file)
if isfile(tracks_csv):
gt = getTrackData(tracks_csv)
colors["gt"] = gt_color
# if true, crop all tracks from gt to have the same length as the predictions.
if self.params["crop_to_predictions"]:
gt = self.cropGT(gt, tracks[method])
if self.params["error_threshold"] > 0:
tracks[method], gt = self.discardOnThreshold(tracks[method], gt, self.params["error_threshold"])
tracks["gt"] = gt
elif self.params['visualisation_mode'] == "gt":
# load gt
data = np.genfromtxt(gt_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks["gt"] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
colors["gt"] = gt_color
elif self.params['visualisation_mode'] == "track":
# load track
data = np.genfromtxt(track_file, delimiter=" ")
first_len_tracks = len(data)
valid_ids, data = filter_first_tracks(data, filter_too_short=True)
track_data = {i: data[data[:, 0] == i, 1:] for i in valid_ids}
tracks["track"] = track_data
for i in valid_ids: # Define a different random color for each id.
colors_id[i] = [component(), component(), component()]
if len(track_data) < first_len_tracks:
print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
"time. All tracks except the first have been discarded.")
colors["track"] = gt_color
feature_ids = {label: list(tracks_dict.keys()) for label, tracks_dict in tracks.items()}
max_stamp = -1
min_stamp = 10**1000
for label, tracks_dict in tracks.items():
for i, track in tracks_dict.items():
min_stamp = min([min_stamp, min(track[:,0])])
max_stamp = max([max_stamp, max(track[:,0])])
self.min_stamp = min_stamp
self.max_stamp = max_stamp
return tracks, feature_ids, colors, colors_id
def pause(self):
self.is_paused = True
def unpause(self):
self.is_paused= False
def togglePause(self):
self.is_paused = not self.is_paused
def toggleLoop(self):
self.is_looped = not self.is_looped
def forward(self, num_timesteps = 1):
if self.is_looped:
self.time_index = (self.time_index + 1) % len(self.times)
else:
self.time_index = min(self.time_index + num_timesteps, len(self.times) - 1)
def backward(self, num_timesteps = 1):
self.time_index = max(self.time_index - num_timesteps, 0)
def goToBegin(self):
self.time_index = 0
def goToEnd(self):
self.time_index = len(self.times) - 1
def increaseTrackHistoryLength(self):
self.params['track_history_length'] = self.params['track_history_length'] * 1.25
def decreaseTrackHistoryLength(self):
self.params['track_history_length'] = self.params['track_history_length'] / 1.25
def computeMinMaxSpeed(self):
self.max_speed = 1000.0 / (self.min_time_between_screen_refresh_ms * self.params['framerate'])
self.min_speed = 1000.0 / (self.max_time_between_screen_refresh_ms * self.params['framerate'])
def updateDisplayRate(self):
self.params['speed'] = np.clip(self.params['speed'], self.min_speed, self.max_speed)
self.time_between_screen_refresh_ms = int(1000.0 / (self.params['speed'] * self.params['framerate']))
def writeToVideoFile(self, f):
height, width, _ = self.dataset.images[0].shape
height, width = int(self.params["scale"]*height), int(self.params["scale"]*width)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.video_writer = cv2.VideoWriter(f, fourcc, self.params['framerate']*self.params["speed"], (width, height))
for t in tqdm.tqdm(self.times):
image_to_display = self.update(t)
self.video_writer.write(image_to_display)
self.cleanup()
def visualizationLoop(self):
while True:
t = self.times[self.time_index]
image_to_display = self.update(t)
cv2.imshow(self.cv2_window_name, image_to_display)
if not self.is_paused:
self.forward(1)
c = cv2.waitKey(self.time_between_screen_refresh_ms)
key = chr(c & 255)
if c == 27: # 'q' or 'Esc': Quit
break
elif key == 'r': # 'r': Reset
self.goToBegin()
self.unpause()
elif key == 'p' or c == 32: # 'p' or 'Space': Toggle play/pause
self.togglePause()
elif key == "a": # 'Left arrow': Go backward
self.backward(1)
self.pause()
elif key == "d": # 'Right arrow': Go forward
self.forward(1)
self.pause()
elif key == "s": # 'Down arrow': Go to beginning
self.goToBegin()
self.pause()
elif key == "w": # 'Up arrow': Go to end
self.goToEnd()
self.pause()
elif key == "e":
self.increaseTrackHistoryLength()
elif key == "q":
self.decreaseTrackHistoryLength()
elif key == 'l': # 'l': Toggle looping
self.toggleLoop()
self.cleanup()
def cleanup(self):
cv2.destroyAllWindows()
if hasattr(self, 'video_writer'):
self.video_writer.release()
def update(self, t, track_history_length = None):
if track_history_length == None:
track_history_length = self.params['track_history_length']
return self.plotBetween(t - track_history_length, t)
def getImageClosestTo(self, t):
image_index = np.searchsorted(self.dataset.times, t, side="left") - 1
return self.dataset.images[image_index]
def drawMarker(self, img, x, y, color):
c = int(3 * self.params["scale"])
t = int(1 * self.params["scale"])
if self.marker == "cross":
cv2.line(img, (x-c, y), (x+c, y), color, thickness=t)
cv2.line(img, (x, y-c), (x, y+c), color, thickness=t)
elif self.marker == "circle":
cv2.circle(img, center=(x, y), radius=c, color=color, thickness=t)
def drawLegend(self, image, legend, size):
s = self.params["scale"]
off_x = int(size[1])
t = int(10 * s)
n = int(70 *s)
for label, color in legend.items():
if self.params['visualisation_mode'] == "gt" or label == "gt":
label = "ground truth"
if self.params['visualisation_mode'] == "track" or label == "track":
label = "track"
cv2.putText(image, label, (off_x-n, t), cv2.FONT_HERSHEY_COMPLEX, int(s/4), color)
t += int(10 *s)
return image
def plotBetween(self, t0, t1):
image = self.getImageClosestTo(t1).copy()
# resize
h,w,_ = image.shape
s = self.params["scale"]
image = cv2.resize(image, dsize=(int(w*s), int(h*s)))
image = self.drawLegend(image, self.colors, image.shape[:2])
for label, tracks_dict in self.tracks.items():
for feature_id, track in tracks_dict.items():
t = track[:,0]
track_segment = track[(t<=t1) & (t>=t0)]
if len(track_segment) > 0:
for point in track_segment[:-1]:
_, x, y = (s*point).astype(int)
#trail_marker = "cross" if label == "gt" else "dot"
trail_marker = "dot"
if self.params["visualisation_mode"] == "gt" or self.params["visualisation_mode"] == "track":
self.drawTrail(image, x, y, self.colors_id[feature_id], marker=trail_marker)
else:
self.drawTrail(image, x, y, self.colors[label], marker=trail_marker)
_, x, y = (s*track_segment[-1]).astype(int)
if self.params["visualisation_mode"] == "gt" or self.params["visualisation_mode"] == "track":
self.drawMarker(image, x, y, self.colors_id[feature_id])
else:
self.drawMarker(image, x, y, self.colors[label])
return image
def drawTrail(self, img, x, y, entry, marker="dot"):
c = 0*self.params["scale"]
if marker=="dot":
x_min = int(max([x - c, 0]))
x_max = int(min([x + c+self.params["scale"], img.shape[1]]))
y_min = int(max([y - c, 0]))
y_max = int(min([y + c+self.params["scale"], img.shape[0]]))
img[y_min:y_max,x_min:x_max,:] = np.array(entry)
elif marker=="cross":
c = int(2 * self.params['scale'])
t = int(.5 * self.params["scale"])
x_min = max([x - c, 0])
x_max = int(min([x + c+self.params["scale"], img.shape[1]]))
y_min = max([y - c, 0])
y_max = int(min([y + c+self.params["scale"], img.shape[0]]))
xmi =x-t/2
ymi =y-t/2
xma =x + t / 2
yma =y + t / 2
if x < 0 or x > img.shape[1]-1 or y < 0 or x > img.shape[0]-1:
return
img[y_min:y_max,xmi:xma, :] = np.array(entry)
img[ymi:yma,x_min:x_max, :] = np.array(entry)
| 37.035326
| 131
| 0.556387
| 13,385
| 0.982097
| 0
| 0
| 0
| 0
| 0
| 0
| 1,751
| 0.128476
|
f31e643bb5106928ddc94996a97d51a1aa497458
| 12,163
|
py
|
Python
|
Polygon2-2.0.7/Polygon/IO.py
|
tangrams/landgrab
|
217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c
|
[
"MIT"
] | 20
|
2015-02-26T15:55:42.000Z
|
2021-07-30T00:19:31.000Z
|
Polygon2-2.0.7/Polygon/IO.py
|
tangrams/landgrab
|
217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c
|
[
"MIT"
] | 1
|
2018-04-02T12:13:30.000Z
|
2021-10-04T00:59:38.000Z
|
Polygon2-2.0.7/Polygon/IO.py
|
tangrams/landgrab
|
217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c
|
[
"MIT"
] | 5
|
2015-03-03T23:31:39.000Z
|
2018-01-17T03:13:34.000Z
|
# -*- coding: utf-8 -*-
"""
This module provides functions for reading and writing Polygons in different
formats.
The following write-methods will accept different argument types for the
output. If ofile is None, the method will create and return a StringIO-object.
If ofile is a string, a file with that name will be created. If ofile is a
file, it will be used for writing.
The following read-methods will accept different argument types for the
output. An file or StringIO object will be used directly. If the argument is a
string, the function tries to read a file with that name. If it fails, it
will evaluate the string directly.
"""
from cPolygon import Polygon
from types import StringTypes
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from xml.dom.minidom import parseString, Node
from struct import pack, unpack, calcsize
try:
import reportlab
hasPDFExport = True
except:
hasPDFExport = False
try:
import Imaging
hasPILExport = True
except:
hasPILExport = False
## some helpers
def __flatten(s):
for a in s:
for b in a:
yield b
def __couples(s):
for i in range(0, len(s), 2):
yield s[i], s[i+1]
def __unpack(f, b):
s = calcsize(f)
return unpack(f, b[:s]), b[s:]
class __RingBuffer:
def __init__(self, seq):
self.s = seq
self.i = 0
self.l = len(seq)
def __call__(self):
o = self.s[self.i]
self.i += 1
if self.i == self.l:
self.i = 0
return o
def getWritableObject(ofile):
"""try to make a writable file-like object from argument"""
if ofile is None:
return StringIO(), False
elif type(ofile) in StringTypes:
return open(ofile, 'w'), True
elif type(ofile) in (file, StringIO):
return ofile, False
else:
raise Exception("Can't make a writable object from argument!")
def getReadableObject(ifile):
"""try to make a readable file-like object from argument"""
if type(ifile) in StringTypes:
try:
return open(ifile, 'r'), True
except:
return StringIO(ifile), True
elif type(ifile) in (file, StringIO):
return ifile, False
else:
raise Exception("Can't make a readable object from argument!")
def decodeBinary(bin):
"""
Create Polygon from a binary string created with encodeBinary(). If the string
is not valid, the whole thing may break!
:Arguments:
- s: string
:Returns:
new Polygon
"""
nC, b = __unpack('!I', bin)
p = Polygon()
for i in range(nC[0]):
x, b = __unpack('!l', b)
if x[0] < 0:
isHole = 1
s = -2*x[0]
else:
isHole = 0
s = 2*x[0]
flat, b = __unpack('!%dd' % s, b)
p.addContour(tuple(__couples(flat)), isHole)
return p
def encodeBinary(p):
"""
Encode Polygon p to a binary string. The binary string will be in a standard
format with network byte order and should be rather machine independant.
There's no redundancy in the string, any damage will make the whole polygon
information unusable.
:Arguments:
- p: Polygon
:Returns:
string
"""
l = [pack('!I', len(p))]
for i, c in enumerate(p):
l.append(pack('!l', len(c)*(1,-1)[p.isHole(i)]))
l.append(pack('!%dd' %(2*len(c)), *__flatten(c)))
return "".join(l)
def writeGnuplot(ofile, polylist):
"""
Write a list of Polygons to a gnuplot file, which may be plotted using the
command ``plot "ofile" with lines`` from gnuplot.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
for p in polylist:
for vl in p:
for j in vl:
f.write('%g %g\n' % tuple(j))
f.write('%g %g\n\n' % tuple(vl[0]))
if cl: f.close()
return f
def writeGnuplotTriangles(ofile, polylist):
"""
Converts a list of Polygons to triangles and write the tringle data to a
gnuplot file, which may be plotted using the command
``plot "ofile" with lines`` from gnuplot.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
for p in polylist:
for vl in p.triStrip():
j = 0
for j in range(len(vl)-2):
f.write('%g %g \n %g %g \n %g %g \n %g %g\n\n' %
tuple(vl[j]+vl[j+1]+vl[j+2]+vl[j]))
f.write('\n')
if cl: f.close()
f.close()
def writeSVG(ofile, polylist, width=None, height=None, fill_color=None,
fill_opacity=None, stroke_color=None, stroke_width=None):
"""
Write a SVG representation of the Polygons in polylist, width and/or height
will be adapted if not given. fill_color, fill_opacity, stroke_color and
stroke_width can be sequences of the corresponding SVG style attributes to use.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional width: float
- optional height: height
- optional fill_color: sequence of colors (3-tuples of floats: RGB)
- optional fill_opacity: sequence of colors
- optional stroke_color: sequence of colors
- optional stroke_width: sequence of floats
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
pp = [Polygon(p) for p in polylist] # use clones only
[p.flop(0.0) for p in pp] # adopt to the SVG coordinate system
bbs = [p.boundingBox() for p in pp]
bbs2 = zip(*bbs)
minx = min(bbs2[0])
maxx = max(bbs2[1])
miny = min(bbs2[2])
maxy = max(bbs2[3])
xdim = maxx-minx
ydim = maxy-miny
if not (xdim or ydim):
raise Error("Polygons have no extent in one direction!")
a = ydim / xdim
if not width and not height:
if a < 1.0:
width = 300
else:
height = 300
if width and not height:
height = width * a
if height and not width:
width = height / a
npoly = len(pp)
fill_color = __RingBuffer(fill_color or ((255,0,0), (0,255,0), (0,0,255), (255,255,0)))
fill_opacity = __RingBuffer(fill_opacity or (1.0,))
stroke_color = __RingBuffer(stroke_color or ((0,0,0),))
stroke_width = __RingBuffer(stroke_width or (1.0,))
s = ['<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>',
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">',
'<svg xmlns="http://www.w3.org/2000/svg" width="%d" height="%d">' % (width, height)]
for i in range(npoly):
p = pp[i]
bb = bbs[i]
p.warpToBox(width*(bb[0]-minx)/xdim, width*(bb[1]-minx)/xdim,
height*(bb[2]-miny)/ydim, height*(bb[3]-miny)/ydim)
subl = ['<path style="fill:rgb%s;fill-opacity:%s;fill-rule:evenodd;stroke:rgb%s;stroke-width:%s;" d="' %
(fill_color(), fill_opacity(), stroke_color(), stroke_width())]
for c in p:
subl.append('M %g, %g %s z ' % (c[0][0], c[0][1], ' '.join([("L %g, %g" % (a,b)) for a,b in c[1:]])))
subl.append('"/>')
s.append(''.join(subl))
s.append('</svg>')
f.write('\n'.join(s))
if cl: f.close()
return f
def writeXML(ofile, polylist, withHeader=False):
"""
Write a readable representation of the Polygons in polylist to a XML file.
A simple header can be added to make the file parsable.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional withHeader: bool
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
if withHeader:
f.write('<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>\n')
for p in polylist:
l = ['<polygon contours="%d" area="%g" xMin="%g" xMax="%g" yMin="%g" yMax="%g">' % ((len(p), p.area())+p.boundingBox())]
for i, c in enumerate(p):
l.append(' <contour points="%d" isHole="%d" area="%g" xMin="%g" xMax="%g" yMin="%g" yMax="%g">' \
% ((len(c), p.isHole(i), p.area(i))+p.boundingBox(i)))
for po in c:
l.append(' <p x="%g" y="%g"/>' % po)
l.append(' </contour>')
l.append('</polygon>\n')
f.write('\n'.join(l))
if cl: f.close()
return f
def readXML(ifile):
"""
Read a list of Polygons from a XML file which was written with writeXML().
:Arguments:
- ofile: see above
:Returns:
list of Polygon objects
"""
f, cl = getReadableObject(ifile)
d = parseString(f.read())
if cl: f.close()
plist = []
for pn in d.getElementsByTagName('polygon'):
p = Polygon()
plist.append(p)
for sn in pn.childNodes:
if not sn.nodeType == Node.ELEMENT_NODE:
continue
assert sn.tagName == 'contour'
polist = []
for pon in sn.childNodes:
if not pon.nodeType == Node.ELEMENT_NODE:
continue
polist.append((float(pon.getAttribute('x')), float(pon.getAttribute('y'))))
assert int(sn.getAttribute('points')) == len(polist)
p.addContour(polist, int(sn.getAttribute('isHole')))
assert int(pn.getAttribute('contours')) == len(p)
return plist
if hasPDFExport:
def writePDF(ofile, polylist, pagesize=None, linewidth=0, fill_color=None):
"""
*This function is only available if the reportlab package is installed!*
Write a the Polygons in polylist to a PDF file.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional pagesize: 2-tuple of floats
- optional linewidth: float
- optional fill_color: color
:Returns:
ofile object
"""
from reportlab.pdfgen import canvas
from reportlab.lib.colors import red, green, blue, yellow, black, white
if not pagesize:
from reportlab.lib.pagesizes import A4
pagesize = A4
can = canvas.Canvas(ofile, pagesize=pagesize)
can.setLineWidth(linewidth)
pp = [Polygon(p) for p in polylist] # use clones only
bbs = [p.boundingBox() for p in pp]
bbs2 = zip(*bbs)
minx = min(bbs2[0])
maxx = max(bbs2[1])
miny = min(bbs2[2])
maxy = max(bbs2[3])
xdim = maxx-minx
ydim = maxy-miny
if not (xdim or ydim):
raise Error("Polygons have no extent in one direction!")
a = ydim / xdim
width, height = pagesize
if a > (height/width):
width = height / a
else:
height = width * a
npoly = len(pp)
fill_color = __RingBuffer(fill_color or (red, green, blue, yellow))
for i in range(npoly):
p = pp[i]
bb = bbs[i]
p.warpToBox(width*(bb[0]-minx)/xdim, width*(bb[1]-minx)/xdim,
height*(bb[2]-miny)/ydim, height*(bb[3]-miny)/ydim)
for poly in pp:
solids = [poly[i] for i in range(len(poly)) if poly.isSolid(i)]
can.setFillColor(fill_color())
for c in solids:
p = can.beginPath()
p.moveTo(c[0][0], c[0][1])
for i in range(1, len(c)):
p.lineTo(c[i][0], c[i][1])
p.close()
can.drawPath(p, stroke=1, fill=1)
holes = [poly[i] for i in range(len(poly)) if poly.isHole(i)]
can.setFillColor(white)
for c in holes:
p = can.beginPath()
p.moveTo(c[0][0], c[0][1])
for i in range(1, len(c)):
p.lineTo(c[i][0], c[i][1])
p.close()
can.drawPath(p, stroke=1, fill=1)
can.showPage()
can.save()
| 31.840314
| 128
| 0.564499
| 254
| 0.020883
| 151
| 0.012415
| 0
| 0
| 0
| 0
| 4,512
| 0.370961
|
f3200d5d53315321e6ef6c3cef5d42425590c96b
| 743
|
py
|
Python
|
strings/reverse_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/reverse_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/reverse_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from typing import List, Optional
def reverse_string(string: str) -> str:
return string[::-1]
def reverse_string_in_place(string: [str]):
index = 0
length = len(string)
middle = length / 2
while index < middle:
string[index], string[length - 1 - index] = string[length - 1 - index], string[index]
index += 1
def reverse_string_with_list_comprehension(string: str) -> str:
return ''.join([string[i] for i in range(len(string) - 1, -1, -1)])
def reverse_string_with_loop(string: str) -> str:
reversed_str: List[Optional[str]] = [None] * len(string)
for index in range(len(string) - 1, -1, -1):
reversed_str[len(string) - 1 - index] = string[index]
return ''.join(reversed_str)
| 28.576923
| 93
| 0.643338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.005384
|
f3212e189d04ba2e4747e03dc77f4721f12f30e5
| 14,706
|
py
|
Python
|
qnarre/prep/tokens/realm.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/prep/tokens/realm.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/prep/tokens/realm.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import collections
import os
import unicodedata
from ...tokens.utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...tokens.base import BatchEncoding
from ...utils import PaddingStrategy
VOCAB_FS = {"vocab_file": "vocab.txt"}
VOCAB_MAP = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
}
}
INPUT_CAPS = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class Tokenizer(PreTrainedTokenizer):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
input_caps = INPUT_CAPS
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk="[UNK]",
sep="[SEP]",
pad="[PAD]",
cls="[CLS]",
msk="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kw,
):
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kw,
)
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()]
)
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk=self.unk)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def s_vocab(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def batch_encode_candidates(self, text, **kw):
kw["padding"] = PaddingStrategy.MAX_LENGTH
batch_text = text
batch_text_pair = kw.pop("text_pair", None)
return_tensors = kw.pop("return_tensors", None)
output_data = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(batch_text):
if batch_text_pair is not None:
candidate_text_pair = batch_text_pair[idx]
else:
candidate_text_pair = None
encoded_candidates = super().__call__(
candidate_text, candidate_text_pair, return_tensors=None, **kw
)
encoded_input_ids = encoded_candidates.get("input_ids")
encoded_attention_mask = encoded_candidates.get("attention_mask")
encoded_token_type_ids = encoded_candidates.get("token_type_ids")
if encoded_input_ids is not None:
output_data["input_ids"].append(encoded_input_ids)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(encoded_attention_mask)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(encoded_token_type_ids)
output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0)
return BatchEncoding(output_data, tensor_type=return_tensors)
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return [self.cls_token_id] + toks_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + toks_0 + sep + toks_1 + sep
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True)
if toks_1 is not None:
return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1]
return [1] + ([0] * len(toks_0)) + [1]
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
def save_vocabulary(self, dir, pre=None):
index = 0
if os.path.isdir(dir):
vocab_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["vocab_file"],
)
else:
vocab_file = (pre + "-" if pre else "") + dir
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
def __init__(
self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None
):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
def __init__(self, vocab, unk, max_input_chars_per_word=100):
self.vocab = vocab
self.unk = unk
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk)
else:
output_tokens.extend(sub_tokens)
return output_tokens
| 37.707692
| 141
| 0.589963
| 11,313
| 0.769278
| 0
| 0
| 151
| 0.010268
| 0
| 0
| 3,155
| 0.214538
|
f323bb4c6d1d42af8adea82f66966d109724eba9
| 29,495
|
py
|
Python
|
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | 5
|
2021-05-12T18:18:49.000Z
|
2022-01-06T12:35:35.000Z
|
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | null | null | null |
api/fileupload.py
|
subhendu01/Audio-FIle-Server
|
6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6
|
[
"MIT"
] | null | null | null |
import datetime, os, base64
from flask import Flask, jsonify, request, Blueprint
from dbstore import dbconf
import json
from bson import json_util
# process kill
# lsof -i tcp:3000
file_upload = Blueprint('uploadAPI', __name__)
app = Flask(__name__)
def song_upload(val):
try:
# content = request.get_json()
curs = dbconf.file_store.find().sort( [("_id", -1)] ).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
song_name = str(val['song_name'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if len(song_name) != 0 and len(song_name) <= 100:
if duration_sec >= 0:
msg = "Successful"
response = {
"status": 200,
"msg": msg,
"id": id,
"song_name": song_name,
"duration_sec": duration_sec,
"upload_time": upload_time,
"audio_file_id": audio_file_id
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Song name should be between 0 to 100 characters"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return response
def podcast_upload(val):
try:
curs = dbconf.file_store.find().sort( [("_id", -1)] ).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
podcast_name = str(val['podcast_name'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
host = str(val['host'])
participant = val['participant']
# print(id, podcast_name, duration_sec, upload_time, host, participant)
if len(podcast_name) <= 100:
if duration_sec >= 0:
exceed_leng = [ x for x in participant if len(x) >= 100]
if len(participant) <= 10 and len(exceed_leng) == 0:
if len(host) <= 100:
msg = "sucessful"
response = {
"status": 200,
"msg": msg,
"id": id,
"podcast_name": podcast_name,
"duration_sec": duration_sec,
"upload_time": upload_time,
"host": host,
"participant": participant,
"audio_file_id": audio_file_id
}
else:
msg = "Host cannot be larger than 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Each string cannot be larger than 100 characters, maximum of 10 participants possible"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Name of the podcast cannot be larger than 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return response
def audiobook_upload(val):
try:
# content = request.get_json()
curs = dbconf.file_store.find().sort( [("_id", -1)]).limit(1)
if curs.count() > 0:
for rec in curs:
id_val = rec["audioFileMetadata"]["id"]
id = id_val + 1
else:
id = 1
audio_file_id = int(val["audio_file_id"])
cursor_file_id = dbconf.file_store.find({'audioFileMetadata.audio_file_id': audio_file_id})
if cursor_file_id.count() == 0:
audiobook_title = str(val['audiobook_title'])
author_title = str(val['author_title'])
narrator = str(val['narrator'])
duration_sec = int(val['duration_sec'])
upload_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if len(audiobook_title) <= 100 and len(audiobook_title) != 0:
if len(author_title) <= 100 and len(author_title) != 0:
if len(narrator) <=100 and len(narrator) != 0:
if duration_sec >= 0:
msg = "sucessful"
response = {
"status": 200,
"msg": msg,
"id": id,
"audiobook_title": audiobook_title,
"author_title": author_title,
"narrator": narrator,
"duration_sec": duration_sec,
"upload_time": upload_time,
"audio_file_id": audio_file_id
}
else:
msg = "Duration should be positive integer number"
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Narrator should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Author title should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Audiobook should be between 0 to 100 characters."
response = {
"status": 400,
"msg": msg,
"upload_time": upload_time
}
else:
msg = "Duplicate audio id found."
response = {
"status": 400,
"msg": msg
}
return response
except Exception as e:
print(str(e))
msg = "Something went wrong."
response = {
"status": 500,
"msg": msg
}
return response
@file_upload.route('api/_create', methods= ['POST'])
def create():
try:
if request.method == "POST":
#getting all the parameters
content = request.get_json()
# print(content)
audioFileType = content['audioFileType']
#for song type
if audioFileType.lower() == 'song':
audioFileMetadata = "song"
#calling the song-upload method for song type
func_call = song_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"duration_sec": func_call["duration_sec"],
"id": func_call["id"],
"song_name": func_call['song_name'],
"upload_time": func_call['upload_time'],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
# insert the data into collection
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
#for podcast type
elif audioFileType.lower() == 'podcast':
audioFileMetadata = "podcast"
func_call = podcast_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"podcast_name": func_call["podcast_name"],
"id": func_call["id"],
"duration_sec": func_call["duration_sec"],
"host": func_call['host'],
"upload_time": func_call['upload_time'],
"participant": func_call["participant"],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
#for audiobook type
elif audioFileType.lower() == 'audiobook':
audioFileMetadata = "audiobook"
func_call = audiobook_upload(content['audioFileMetadata'])
if func_call["status"] == 200:
audioFileMetadata = {
"audiobook_title": func_call["audiobook_title"],
"id": func_call["id"],
"duration_sec": func_call["duration_sec"],
"author_title": func_call['author_title'],
"upload_time": func_call['upload_time'],
"narrator": func_call["narrator"],
"audio_file_id": func_call['audio_file_id']
}
rec = {
"audioFileType": audioFileType.lower(),
"audioFileMetadata": audioFileMetadata
}
data = json.loads(json_util.dumps(rec))
dbconf.file_store.insert(rec)
response = {
"status": func_call["status"],
"msg": func_call["msg"],
"record": data
}
# print(response)
elif func_call["status"] == 400:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
elif func_call["status"] == 500:
response = {
"status": func_call["status"],
"msg": func_call["msg"]
}
# print(response)
else:
response = {
"status": 400,
"msg": "Bad request."
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route('api/_delete/<string:audioFileType>/<int:audioFileID>', methods= ['DELETE'])
def delete_(audioFileType, audioFileID):
try:
if request.method == "DELETE":
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
dbconf.file_store.remove({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route('api/_update/<string:audioFileType>/<int:audioFileID>', methods= ['PUT'])
def update(audioFileType, audioFileID):
try:
if request.method == "PUT":
content = request.json
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
#song type
if audioFileType.lower() == 'song':
song_name = content["audioFileMetadata"]["song_name"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
if len(song_name) != 0 and len(song_name) <= 100:
if duration_sec >= 0:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.song_name": song_name,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for duration
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for song name
else:
response = {
"status": 400,
"msg": "Song name should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#podcast type
elif audioFileType.lower() == 'podcast':
podcast_name = content["audioFileMetadata"]["podcast_name"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
host = content["audioFileMetadata"]["host"]
participant = content["audioFileMetadata"]["participant"]
if len(podcast_name) != 0 and len(podcast_name) <= 100:
if duration_sec >= 0:
exceed_leng = [ x for x in participant if len(x) >= 100]
if len(participant) <= 10 and len(exceed_leng) == 0:
if len(host) != 0 and len(host) <= 100:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.podcast_name": podcast_name,
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.host": host,
"audioFileMetadata.participant": participant,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#for host
else:
response = {
"status": 400,
"msg": "Host should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#participant
else:
response = {
"status": 400,
"msg": "Each string cannot be larger than 100 characters, maximum of 10 participants possible",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#duration
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#podcast_name
else:
response = {
"status": 400,
"msg": "Name of the podcast should be between 0 to 100 characters",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
#audiobook type
elif audioFileType.lower() == 'audiobook':
audiobook_title = content["audioFileMetadata"]["audiobook_title"]
duration_sec = content["audioFileMetadata"]["duration_sec"]
author_title = content["audioFileMetadata"]["author_title"]
narrator = content["audioFileMetadata"]["narrator"]
if len(audiobook_title) != 0 and len(audiobook_title) <= 100:
if len(author_title) != 0 and len(author_title) <= 100:
if len(narrator) != 0 and len(narrator) <=100:
if duration_sec >= 0:
myquery = {"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID}
newvalues = { "$set": {
"audioFileMetadata.audiobook_title": audiobook_title,
"audioFileMetadata.duration_sec": duration_sec,
"audioFileMetadata.author_title": author_title,
"audioFileMetadata.narrator": narrator,
"audioFileMetadata.upload_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}}
dbconf.file_store.update_one(myquery, newvalues)
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Duration should be positive integer number",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Narrator should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Author title should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Audiobook should be between 0 to 100 characters.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
@file_upload.route("api/_getapi/<audioFileType>", methods=["GET"], defaults={"audioFileID": None})
@file_upload.route('api/_getapi/<string:audioFileType>/<int:audioFileID>', methods= ['GET'])
def getapi(audioFileType, audioFileID):
try:
if request.method == 'GET':
if audioFileID is not None:
cursor = dbconf.file_store.find({"audioFileType": audioFileType.lower(), 'audioFileMetadata.audio_file_id': audioFileID})
if cursor.count() != 0:
for rec in cursor:
if rec["audioFileType"] == 'song':
audio_file = rec["audioFileMetadata"]["song_name"]
if rec["audioFileType"] == 'podcast':
audio_file= rec["audioFileMetadata"]["podcast_name"]
if rec["audioFileType"] == 'audiobook':
audio_file= rec["audioFileMetadata"]["audiobook_title"]
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audio_file": audio_file
}
else:
response = {
"status": 400,
"msg": "audio file ID is not found.",
"audioFileType": audioFileType,
"audioFileID": audioFileID
}
else:
cursor = dbconf.file_store.find({"audioFileType": str(audioFileType.lower())})
if cursor.count() != 0:
audio_list = []
for rec in cursor:
if rec["audioFileType"] == 'song':
audio_list.append(rec["audioFileMetadata"]["song_name"])
if rec["audioFileType"] == 'podcast':
audio_list.append(rec["audioFileMetadata"]["podcast_name"])
if rec["audioFileType"] == 'audiobook':
audio_list.append(rec["audioFileMetadata"]["audiobook_title"])
response = {
"status": 200,
"msg": "Sucessfull.",
"audioFileType": audioFileType,
"audio_list": audio_list
}
else:
response = {
"status": 400,
"msg": "Audio files not found.",
"audioFileType": audioFileType
}
else:
response = {
"status": 400,
"msg": "Bad request."
}
return jsonify(response)
except Exception as e:
print(str(e))
response = {
"status": 500,
"msg": "Something went wrong."
}
return jsonify(response)
| 45.376923
| 137
| 0.398644
| 0
| 0
| 0
| 0
| 20,390
| 0.691304
| 0
| 0
| 6,818
| 0.231158
|
f324f6cba05e902a8556f523455c852d7fd15d3d
| 2,542
|
py
|
Python
|
dna/zfec/zfec/cmdline_zunfec.py
|
bobbae/examples
|
6c998e2af9a48f7173a0b6b1ff0176df7edceda5
|
[
"Unlicense"
] | null | null | null |
dna/zfec/zfec/cmdline_zunfec.py
|
bobbae/examples
|
6c998e2af9a48f7173a0b6b1ff0176df7edceda5
|
[
"Unlicense"
] | null | null | null |
dna/zfec/zfec/cmdline_zunfec.py
|
bobbae/examples
|
6c998e2af9a48f7173a0b6b1ff0176df7edceda5
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# zfec -- a fast C implementation of Reed-Solomon erasure coding with
# command-line, C, and Python interfaces
from __future__ import print_function
import os, sys, argparse
from zfec import filefec
from zfec import __version__ as libversion
__version__ = libversion
def main():
if '-V' in sys.argv or '--version' in sys.argv:
print("zfec library version: ", libversion)
print("zunfec command-line tool version: ", __version__)
return 0
parser = argparse.ArgumentParser(description="Decode data from share files.")
parser.add_argument('-o', '--outputfile', required=True, help='file to write the resulting data to, or "-" for stdout', type=str, metavar='OUTF')
parser.add_argument('sharefiles', nargs='*', help='shares file to read the encoded data from', type=str, metavar='SHAREFILE')
parser.add_argument('-v', '--verbose', help='print out messages about progress', action='store_true')
parser.add_argument('-f', '--force', help='overwrite any file which already in place of the output file', action='store_true')
parser.add_argument('-V', '--version', help='print out version number and exit', action='store_true')
args = parser.parse_args()
if len(args.sharefiles) < 2:
print("At least two sharefiles are required.")
return 1
if args.force:
outf = open(args.outputfile, 'wb')
else:
try:
flags = os.O_WRONLY|os.O_CREAT|os.O_EXCL | (hasattr(os, 'O_BINARY') and os.O_BINARY)
outfd = os.open(args.outputfile, flags)
except OSError:
print("There is already a file named %r -- aborting. Use --force to overwrite." % (args.outputfile,))
return 2
outf = os.fdopen(outfd, "wb")
sharefs = []
# This sort() actually matters for performance (shares with numbers < k
# are much faster to use than the others), as well as being important for
# reproducibility.
args.sharefiles.sort()
for fn in args.sharefiles:
sharefs.append(open(fn, 'rb'))
try:
filefec.decode_from_files(outf, sharefs, args.verbose)
except filefec.InsufficientShareFilesError as e:
print(str(e))
return 3
finally:
outf.close()
for f in sharefs:
f.close()
return 0
# zfec -- fast forward error correction library with Python interface
#
# Copyright (C) 2007 Allmydata, Inc.
# Author: Zooko Wilcox-O'Hearn
#
# This file is part of zfec.
#
# See README.rst for licensing information.
| 35.305556
| 149
| 0.663257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,104
| 0.434304
|
f327633efe0ce2c9e557f60f7f82ada184c4948d
| 576
|
py
|
Python
|
bottomline/blweb/migrations/0012_vehicleconfig_color.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
bottomline/blweb/migrations/0012_vehicleconfig_color.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | 1
|
2021-06-14T02:20:40.000Z
|
2021-06-14T02:20:40.000Z
|
bottomline/blweb/migrations/0012_vehicleconfig_color.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.2 on 2021-07-10 03:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blweb', '0011_vehiclecolor'),
]
operations = [
migrations.AddField(
model_name='vehicleconfig',
name='color',
field=models.ForeignKey(blank=True, default=None, help_text='The chosen color for this config', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='color', to='blweb.vehiclecolor'),
),
]
| 28.8
| 211
| 0.663194
| 450
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.270833
|
b823df535990bd76d900f1381be1d7cc948408cf
| 11,634
|
py
|
Python
|
src/acs_3dpsf.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 2
|
2019-11-18T12:51:09.000Z
|
2019-12-11T03:13:51.000Z
|
src/acs_3dpsf.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 5
|
2017-06-09T10:06:27.000Z
|
2019-07-19T11:28:18.000Z
|
src/acs_3dpsf.py
|
davidharvey1986/rrg
|
26b4658f14279af21af1a61d57e9936daf315a71
|
[
"MIT"
] | 2
|
2017-07-19T15:48:33.000Z
|
2017-08-09T16:07:20.000Z
|
import numpy as np
from . import acs_map_xy as acs_map
def acs_3dpsf_basisfunctions( degree, x, y, focus ):
# Generate relevant basis functions
n_stars=np.max( np.array([len(x),len(y),len(focus)]))
basis_function_order=np.zeros((1,3)) # All zeros
for k in range(degree[2]+1):
for j in range(degree[1]+1):
for i in range(degree[0]+1):
if (i+j+k > 0) & ((i+j) <= np.max(degree[0:2])):
basis_function_order=np.vstack((basis_function_order, [i,j,k]))
n_basis_functions= basis_function_order.shape[0]
basis_function_value = np.zeros( (n_basis_functions, n_stars))
for i in range(n_basis_functions):
basis_function_value[i,:] = x**basis_function_order[i,0]*\
y**basis_function_order[i,1] * \
focus**basis_function_order[i,2]
return basis_function_value
# **********************************************************************
# **********************************************************************
# **********************************************************************
def acs_3dpsf_fit( scat, degree=np.array([3,2,2]),
mag_cut=np.array([20.5,22]),
e_cut=1, size_cut=np.array([-np.inf,3]), verbose=False
):
# Fit the PSF from data in a SCAT catalogue
# F814 I magnitude catalogue cut
degree = np.array(degree)
if len(degree) < 3 :
print("DEGREE must be 3D")
degree[ degree > 0 ] = np.min(degree[ degree > 0 ])
# Find the line dividing CCDs 1 and 2
ccd_boundary = acs_map.acs_map_xy( np.array([0, 4095, 0, 4095]),
np.array([2047, 2047, 2048, 2048]),
pixel_scale=scat.pixscale)
x1=np.mean([ccd_boundary.x[0],ccd_boundary.x[2]])
x2=np.mean([ccd_boundary.x[1],ccd_boundary.x[3]])
y1=np.mean([ccd_boundary.y[0],ccd_boundary.y[2]])
y2=np.mean([ccd_boundary.y[1],ccd_boundary.y[3]])
ccd_boundary_x1=np.mean([ccd_boundary.x[0],ccd_boundary.x[2]])
ccd_boundary_x2=np.mean([ccd_boundary.x[1],ccd_boundary.x[3]])
ccd_boundary_y1=np.mean([ccd_boundary.y[0],ccd_boundary.y[2]])
ccd_boundary_y2=np.mean([ccd_boundary.y[1],ccd_boundary.y[3]])
ccd_boundary_m=(ccd_boundary_y2-ccd_boundary_y1)/(ccd_boundary_x2-ccd_boundary_x1)
ccd_boundary_c=ccd_boundary_y1-ccd_boundary_m*ccd_boundary_x1
# Find the centre of each CCD
ccd_centre = acs_map.acs_map_xy( np.array([2048,2048]),
np.array([3072,1024]), pixel_scale=scat.pixscale)
# Select only the well-behaved stars
good= np.isfinite(scat.field_focus[0][scat.field_id[0]]) & \
np.isfinite(scat.e1_uncor_unrot[0]) & \
np.isfinite(scat.e2_uncor_unrot[0]) & \
np.isfinite(scat.xx_uncor[0]) & \
np.isfinite(scat.xy_uncor[0]) & \
np.isfinite(scat.yy_uncor[0]) & \
np.isfinite(scat.xxxx_uncor[0]) & \
np.isfinite(scat.xxxy_uncor[0]) & \
np.isfinite(scat.xxyy_uncor[0]) & \
np.isfinite(scat.xyyy_uncor[0]) & \
np.isfinite(scat.yyyy_uncor[0])
n_good = len(np.arange( len( good ))[good])
if verbose:
print("Found a total of "+str(len(scat.x[0]))+" real stars, of which "+str(n_good)+" look well-behaved")
# Store quantities to be fitted in local variables
x=scat.x[0][good]
y=scat.y[0][good]
focus=scat.field_focus[0][scat.field_id[0]][good]
ixx=scat.xx_uncor[0][good]
ixy=scat.xy_uncor[0][good]
iyy=scat.yy_uncor[0][good]
ixxxx=scat.xxxx_uncor[0][good]
ixxxy=scat.xxxy_uncor[0][good]
ixxyy=scat.xxyy_uncor[0][good]
ixyyy=scat.xyyy_uncor[0][good]
iyyyy=scat.yyyy_uncor[0][good]
e1=scat.e1_uncor_unrot[0][good]
e2=scat.e2_uncor_unrot[0][good]
# Work on each CCD separately
init_coeffs_flag = True
for ccd in range(2):
# Report which CCD is being considered
if ccd +1 == 1:
in_ccd = np.arange(len(y))[ y >= ccd_boundary_m*x+ccd_boundary_c]
n_in_CCD = len(in_ccd)
if ccd + 1 == 2:
in_ccd = np.arange(len( y))[ y < ccd_boundary_m*x+ccd_boundary_c]
n_in_CCD = len(in_ccd)
if n_in_CCD > 0:
#Compute matrix necessary for matrix inversion
if verbose:
print("Fitting moments of "+str(n_in_CCD)+" real stars in CCD#"+str(ccd+1))
basis_function_value=acs_3dpsf_basisfunctions(degree,
x[in_ccd]-ccd_centre.x[ccd],
y[in_ccd]-ccd_centre.y[ccd],
focus[in_ccd])
ls_matrix = np.dot( np.linalg.inv(np.dot(basis_function_value, basis_function_value.T)), basis_function_value)
# Create global arrays to contain the answers
n_basis_functions=np.shape(np.array(ls_matrix))[0]
if init_coeffs_flag:
acs_3dpsf_coeffs=basis_coeffs( ccd_centre,
ccd_boundary_m, ccd_boundary_c,
n_basis_functions, degree )
init_coeffs_flag = False
# Fit data to basis functions using least-squares inversion
#these are all matrices
acs_3dpsf_coeffs.ixx_fit[ccd, :] = np.dot(ls_matrix ,ixx[in_ccd])
acs_3dpsf_coeffs.ixy_fit[ccd, :] = np.dot(ls_matrix , ixy[in_ccd])
acs_3dpsf_coeffs.iyy_fit[ccd, :] = np.dot(ls_matrix , iyy[in_ccd])
acs_3dpsf_coeffs.ixxxx_fit[ccd, :] = np.dot(ls_matrix , ixxxx[in_ccd])
acs_3dpsf_coeffs.ixxxy_fit[ccd, :] = np.dot(ls_matrix , ixxxy[in_ccd])
acs_3dpsf_coeffs.ixxyy_fit[ccd, :] = np.dot(ls_matrix , ixxyy[in_ccd])
acs_3dpsf_coeffs.ixyyy_fit[ccd, :] = np.dot(ls_matrix , ixyyy[in_ccd])
acs_3dpsf_coeffs.iyyyy_fit[ccd, :] = np.dot(ls_matrix , iyyyy[in_ccd])
acs_3dpsf_coeffs.e1_fit[ccd, :] = np.dot(ls_matrix , e1[in_ccd])
acs_3dpsf_coeffs.e2_fit[ccd, :] = np.dot(ls_matrix , e2[in_ccd])
return acs_3dpsf_coeffs
# **********************************************************************
# **********************************************************************
# **********************************************************************
def acs_3dpsf_reconstruct( acs_3dpsf_coeffs, x, y, focus, radius=None, verbose=False):
# Create arrays to contain the final answer
n_galaxies=np.max( np.array([len(x), len(y), len(focus)]) )
if len(focus) == 1:
focus_local = np.zeros(len(n_galaxies)) + focus
else:
focus_local=focus
if verbose:
print("Found a total of "+str(n_galaxies)+" galaxies")
if radius is None:
radius=np.zeros(len(n_galaxies))+6
moms=moments( x, y, radius[:n_galaxies],
acs_3dpsf_coeffs.degree )
for ccd in range(2):
#Report which CCD is being considered
if ccd +1 == 1:
in_ccd = np.arange(len( y))[ y >= acs_3dpsf_coeffs.ccd_boundary_m*x+acs_3dpsf_coeffs.ccd_boundary_c]
n_in_CCD = len(in_ccd)
if ccd + 1 == 2:
in_ccd = np.arange(len( y))[ y < acs_3dpsf_coeffs.ccd_boundary_m*x+acs_3dpsf_coeffs.ccd_boundary_c]
n_in_CCD = len(in_ccd)
if n_in_CCD > 0:
if verbose:
print("Interpolating model PSF moments to the position of "+str(n_in_CCD)+" galaxies in CCD#"+str(ccd+1))
#Fit the PSF
basis_function_value=acs_3dpsf_basisfunctions(acs_3dpsf_coeffs.degree[0], \
x[in_ccd]-acs_3dpsf_coeffs.ccd_centre.x[ccd], \
y[in_ccd]-acs_3dpsf_coeffs.ccd_centre.y[ccd], \
focus_local[in_ccd] )
moms.xx[in_ccd] = np.dot(acs_3dpsf_coeffs.ixx_fit[ccd, :], basis_function_value)
moms.xy[in_ccd] = np.dot(acs_3dpsf_coeffs.ixy_fit[ccd, :], basis_function_value)
moms.yy[in_ccd] = np.dot(acs_3dpsf_coeffs.iyy_fit[ccd, :], basis_function_value)
moms.xxxx[in_ccd] = np.dot(acs_3dpsf_coeffs.ixxxx_fit[ccd, :], basis_function_value)
moms.xxxy[in_ccd] = np.dot(acs_3dpsf_coeffs.ixxxy_fit[ccd, :], basis_function_value)
moms.xxyy[in_ccd] = np.dot(acs_3dpsf_coeffs.ixxyy_fit[ccd, :], basis_function_value)
moms.xyyy[in_ccd] = np.dot(acs_3dpsf_coeffs.ixyyy_fit[ccd, :], basis_function_value)
moms.yyyy[in_ccd] = np.dot(acs_3dpsf_coeffs.iyyyy_fit[ccd, :], basis_function_value)
moms.e1[in_ccd] = np.dot(acs_3dpsf_coeffs.e1_fit[ccd, :], basis_function_value)
moms.e2[in_ccd] = np.dot(acs_3dpsf_coeffs.e2_fit[ccd, :], basis_function_value)
else:
print("No galaxies in CCD#"+str(ccd))
# Work out PSF ellipticities at positions of galaxies properly. Tsk!
moms.e1 = (moms.xx-moms.yy)/(moms.xx+moms.yy)
moms.e2 = 2*moms.xy/(moms.xx+moms.yy)
return moms
# **********************************************************************
# **********************************************************************
# **********************************************************************
def acs_3dpsf( x, y, focus, radius, scat,
acs_3dpsf_coeffs=None,
degree=np.array([3,2,2])):
# Fit the PSF
if acs_3dpsf_coeffs is None:
acs_3dpsf_coeffs=acs_3dpsf_fit(scat, degree=degree)
#Reconstruct the PSF
acs_moms=acs_3dpsf_reconstruct(acs_3dpsf_coeffs, x, y, focus, radius)
return acs_moms
class basis_coeffs:
def __init__( self, ccd_centre, ccd_boundary_m, \
ccd_boundary_c, n_basis_functions, degree ):
self.degree = degree,
self.ccd_centre = ccd_centre
self.ccd_boundary_m = ccd_boundary_m
self.ccd_boundary_c = ccd_boundary_c
self.ixx_fit = np.zeros((2,n_basis_functions))
self.ixy_fit = np.zeros((2,n_basis_functions))
self.iyy_fit = np.zeros((2,n_basis_functions))
self.ixxxx_fit = np.zeros((2,n_basis_functions))
self.ixxxy_fit = np.zeros((2,n_basis_functions))
self.ixxyy_fit = np.zeros((2,n_basis_functions))
self.ixyyy_fit = np.zeros((2,n_basis_functions))
self.iyyyy_fit = np.zeros((2,n_basis_functions))
self.e1_fit = np.zeros((2,n_basis_functions))
self.e2_fit = np.zeros((2,n_basis_functions))
class moments( dict ):
def __init__(self, x, y, radius, degree ):
n_objects = len(x)
self.__dict__['x'] = x
self.__dict__['y'] = y
self.__dict__['e1']=np.zeros(n_objects)
self.__dict__['e2']=np.zeros(n_objects)
self.__dict__['xx']=np.zeros(n_objects)
self.__dict__['xy']=np.zeros(n_objects)
self.__dict__['yy']=np.zeros(n_objects)
self.__dict__['xxxx']=np.zeros(n_objects)
self.__dict__['xxxy']=np.zeros(n_objects)
self.__dict__['xxyy']=np.zeros(n_objects)
self.__dict__['xyyy']=np.zeros(n_objects)
self.__dict__['yyyy']=np.zeros(n_objects)
self.__dict__['radius'] = radius
self.__dict__['degree'] = degree
def keys(self):
return list(self.__dict__.keys())
def __getitem__(self, key):
return self.__dict__[key]
| 39.979381
| 122
| 0.565068
| 1,715
| 0.147413
| 0
| 0
| 0
| 0
| 0
| 0
| 1,674
| 0.143889
|
b824108791760c3044be86fca8557a92a30f2d41
| 27,400
|
py
|
Python
|
gsf/function_class.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 9
|
2019-08-23T19:00:54.000Z
|
2022-02-23T17:57:41.000Z
|
gsf/function_class.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 17
|
2020-05-22T17:41:15.000Z
|
2022-03-20T03:32:48.000Z
|
gsf/function_class.py
|
mtakahiro/gsf
|
c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278
|
[
"MIT"
] | 1
|
2020-02-01T22:55:37.000Z
|
2020-02-01T22:55:37.000Z
|
import numpy as np
import sys
import scipy.interpolate as interpolate
import asdf
from .function import *
from .basic_func import Basic
class Func:
'''
The list of (possible) `Func` attributes is given below:
Attributes
----------
'''
def __init__(self, MB, dust_model=0):
'''
Parameters
----------
dust_model : int
0 for Calzetti.
'''
self.ID = MB.ID
self.ZZ = MB.Zall
self.age = MB.age
self.AA = MB.nage
self.tau0 = MB.tau0
self.MB = MB
self.dust_model = dust_model
self.DIR_TMP = MB.DIR_TMP
if MB.f_dust:
self.Temp = MB.Temp
try:
self.filts = MB.filts
self.DIR_FIL = MB.DIR_FILT
except:
pass
# Already Read or not;
self.f_af = False
self.f_af0 = False
def demo(self):
ZZ = self.ZZ
AA = self.AA
return ZZ, AA
#############################
# Load template in obs range.
#############################
def open_spec_fits(self, fall=0, orig=False):
'''
'''
ID0 = self.MB.ID
tau0= self.MB.tau0 #[0.01,0.02,0.03]
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
# ASDF;
if fall == 0:
app = ''
hdu0 = self.MB.af['spec']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP
for pp in range(len(tau0)):
for zz in range(len(ZZ)):
Z = ZZ[zz]
NZ = bfnc.Z2NZ(Z)
if zz == 0 and pp == 0:
nr = hdu0['colnum']
xx = hdu0['wavelength']
lib = np.zeros((len(nr), 2+len(AA)*len(ZZ)*len(tau0)), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
for aa in range(len(AA)):
coln = int(2 + aa)
if orig:
colname = 'fspec_orig_' + str(zz) + '_' + str(aa) + '_' + str(pp)
else:
colname = 'fspec_' + str(zz) + '_' + str(aa) + '_' + str(pp)
colnall = int(2 + pp*len(ZZ)*len(AA) + zz*len(AA) + aa) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
return lib
def open_spec_dust_fits(self, fall=0):
'''
Loads dust template in obs range.
'''
ID0 = self.MB.ID
tau0= self.MB.tau0 #[0.01,0.02,0.03]
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
if fall == 0:
app = ''
hdu0 = self.MB.af['spec_dust']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_dust_full']
DIR_TMP = self.DIR_TMP
nr = hdu0['colnum']
xx = hdu0['wavelength']
lib = np.zeros((len(nr), 2+len(self.Temp)), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
for aa in range(len(self.Temp)):
coln = int(2 + aa)
colname = 'fspec_' + str(aa)
colnall = int(2 + aa) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
if fall==1 and False:
import matplotlib.pyplot as plt
plt.close()
plt.plot(lib[:,1],lib[:,coln],linestyle='-')
plt.show()
return lib
def open_spec_fits_dir(self, nage, nz, kk, Av00, zgal, A00):
'''
Load template in obs range.
But for weird template.
'''
from astropy.io import fits
tau0= self.tau0 #[0.01,0.02,0.03]
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
app = 'all'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP #'./templates/'
pp = 0
zz = nz
# Luminosity
mshdu = self.MB.af0['ML']
Ls = mshdu['Ls_%d'%nz]
xx = hdu0['wavelength'] # at RF;
nr = np.arange(0,len(xx),1) #hdu0.data['colnum']
lib = np.zeros((len(nr), 2+1), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
aa = nage
coln = int(2 + aa)
colname = 'fspec_' + str(zz) + '_' + str(aa) + '_' + str(pp)
yy0 = hdu0[colname]/Ls[aa]
yy = flamtonu(xx, yy0)
lib[:,2] = yy[:]
if self.dust_model == 0: # Calzetti
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
elif self.dust_model == 1: # MW
yyd, xxd, nrd = dust_mw(xx, yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx, yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
print('No entry. Dust model is set to Calzetti')
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
return A00 * yyd_sort, xxd_sort
def get_template(self, lib, Amp=1.0, T=1.0, Av=0.0, Z=0.0, zgal=1.0, f_bb=False):
'''
Gets an element template given a set of parameters.
Not necessarily the most efficient way, but easy to use.
Parameters:
-----------
lib : dict
library dictionary.
Amp : float
Amplitude of the target template. Note that each template has Lbol = 1e10Lsun.
T : float
Age, in Gyr.
Av : float
Dust attenuation, in mag.
Z : float
Metallicity, in log(Z/Zsun).
zgal : float
Redshift.
f_bb: bool
If calculate bb photometry for the spectrum requested.
Returns
flux : float array. Flux in Fnu.
wavelength : float array. Wave in AA.
lcen, lflux : , if f_bb==True.
'''
bfnc = self.MB.bfnc
DIR_TMP = self.MB.DIR_TMP
NZ = bfnc.Z2NZ(Z)
pp0 = np.random.uniform(low=0, high=len(self.tau0), size=(1,))
pp = int(pp0[0])
if pp>=len(self.tau0):
pp += -1
nmodel = np.argmin(np.abs(T-self.age[:]))
if T - self.age[nmodel] != 0:
print('T=%.2f is not found in age library. T=%.2f is used.'%(T,self.age[nmodel]))
coln= int(2 + pp*len(self.ZZ)*len(self.AA) + NZ*len(self.AA) + nmodel)
nr = lib[:, 0]
xx = lib[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = lib[:, coln]
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zgal), yy, Av, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
if f_bb:
#fil_cen, fil_flux = filconv(self.filts, xxd_sort, Amp * yyd_sort, self.DIR_FIL)
fil_cen, fil_flux = filconv_fast(self.MB, xxd_sort, Amp * yyd_sort)
return Amp * yyd_sort, xxd_sort, fil_flux, fil_cen
else:
return Amp * yyd_sort, xxd_sort
def tmp03(self, A00, Av00, nmodel, Z, zgal, lib):
'''
'''
tau0= self.tau0 #[0.01,0.02,0.03]
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
DIR_TMP = self.MB.DIR_TMP #'./templates/'
NZ = bfnc.Z2NZ(Z)
pp0 = np.random.uniform(low=0, high=len(tau0), size=(1,))
pp = int(pp0[0])
if pp>=len(tau0):
pp += -1
coln= int(2 + pp*len(ZZ)*len(AA) + NZ*len(AA) + nmodel)
nr = lib[:,0]
xx = lib[:,1] # This is OBSERVED wavelength range at z=zgal
yy = lib[:,coln]
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av00, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zgal), yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av00, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
return A00 * yyd_sort, xxd_sort
def tmp04(self, par, f_Alog=True, nprec=1, f_val=False, lib_all=False, f_nrd=False):
'''
Makes model template with a given param set.
Also dust attenuation.
Parameters
----------
nprec : int
Precision when redshift is refined.
'''
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc
Mtot = 0
if f_val:
par = par.params
if self.MB.fzmc == 1:
try:
zmc = par['zmc'].value
except:
zmc = self.MB.zgal
else:
zmc = self.MB.zgal
pp = 0
# AV limit;
if par['Av'] < self.MB.Avmin:
par['Av'] = self.MB.Avmin
if par['Av'] > self.MB.Avmax:
par['Av'] = self.MB.Avmax
Av00 = par['Av']
for aa in range(len(AA)):
if self.MB.ZEVOL==1 or aa == 0:
Z = par['Z'+str(aa)]
NZ = bfnc.Z2NZ(Z)
else:
pass
# Check limit;
if par['A'+str(aa)] < self.MB.Amin:
par['A'+str(aa)] = self.MB.Amin
if par['A'+str(aa)] > self.MB.Amax:
par['A'+str(aa)] = self.MB.Amax
# Z limit:
if aa == 0 or self.MB.ZEVOL == 1:
if par['Z%d'%aa] < self.MB.Zmin:
par['Z%d'%aa] = self.MB.Zmin
if par['Z%d'%aa] > self.MB.Zmax:
par['Z%d'%aa] = self.MB.Zmax
# Is A in logspace?
if f_Alog:
A00 = 10**par['A'+str(aa)]
else:
A00 = par['A'+str(aa)]
coln = int(2 + pp*len(ZZ)*len(AA) + NZ*len(AA) + aa)
sedpar = self.MB.af['ML'] # For M/L
mslist = sedpar['ML_'+str(NZ)][aa]
Mtot += 10**(par['A%d'%aa] + np.log10(mslist))
if lib_all:
if aa == 0:
nr = self.MB.lib_all[:, 0]
xx = self.MB.lib_all[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = A00 * self.MB.lib_all[:, coln]
else:
yy += A00 * self.MB.lib_all[:, coln]
else:
if aa == 0:
nr = self.MB.lib[:, 0]
xx = self.MB.lib[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = A00 * self.MB.lib[:, coln]
else:
yy += A00 * self.MB.lib[:, coln]
self.MB.logMtmp = np.log10(Mtot)
if round(zmc,nprec) != round(self.MB.zgal,nprec):
xx_s = xx / (1+self.MB.zgal) * (1+zmc)
fint = interpolate.interp1d(xx, yy, kind='nearest', fill_value="extrapolate")
yy_s = fint(xx_s)
else:
xx_s = xx
yy_s = yy
xx = xx_s
yy = yy_s
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zmc), yy, Av00, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zmc), yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zmc), yy, Av00, nr)
xxd *= (1.+zmc)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
nrd_yyd_sort = nrd_yyd[nrd_yyd[:,0].argsort()]
if not f_nrd:
return nrd_yyd_sort[:,1],nrd_yyd_sort[:,2]
else:
return nrd_yyd_sort[:,0],nrd_yyd_sort[:,1],nrd_yyd_sort[:,2]
def tmp04_dust(self, par, nprec=1):
'''
Makes model template with a given param setself.
Also dust attenuation.
'''
tau0= self.tau0
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc
DIR_TMP = self.MB.DIR_TMP
try:
m_dust = par['MDUST']
t_dust = par['TDUST']
except: # This is exception for initial minimizing;
m_dust = -99
t_dust = 0
nr = self.MB.lib_dust[:,0]
xx = self.MB.lib_dust[:,1] # This is OBSERVED wavelength range at z=zgal
coln= 2+int(t_dust+0.5)
yy = 10**m_dust * self.MB.lib_dust[:,coln]
if self.MB.fzmc == 1:
zmc = par.params['zmc'].value
else:
zmc = self.MB.zgal
# How much does this cost in time?
if round(zmc,nprec) != round(self.MB.zgal,nprec):
xx_s = xx / (1+self.MB.zgal) * (1+zmc)
fint = interpolate.interp1d(xx, yy, kind='nearest', fill_value="extrapolate")
yy_s = fint(xx_s)
else:
xx_s = xx
yy_s = yy
return yy_s, xx_s
class Func_tau:
'''
'''
def __init__(self, MB, dust_model=0):
'''
Parameters:
-----------
dust_model : int
0 for Calzetti. 1 for MW. 4 for Kriek Conroy
'''
self.MB = MB
self.ID = MB.ID
self.ZZ = MB.Zall
self.AA = MB.nage
self.tau = MB.tau
self.dust_model = dust_model
self.DIR_TMP = MB.DIR_TMP
if MB.f_dust:
self.Temp = MB.Temp
try:
self.filts = MB.filts
self.DIR_FIL = MB.DIR_FILT
except:
pass
# Already Read or not;
self.f_af = False
self.f_af0 = False
def demo(self):
ZZ = self.ZZ
AA = self.AA
return ZZ, AA
def open_spec_fits(self, fall=0, orig=False):
'''
Loads template in obs range.
'''
ID0 = self.MB.ID
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc
# ASDF;
if fall == 0:
app = ''
hdu0 = self.MB.af['spec']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP
NZ = len(ZZ)
NT = self.MB.ntau
NA = self.MB.nage
for zz,Z in enumerate(ZZ):
for tt,TT in enumerate(self.MB.tau):
for ss,TA in enumerate(self.MB.ageparam):
if zz == 0 and tt == 0 and ss == 0:
nr = hdu0['colnum']
xx = hdu0['wavelength']
coln = int(2 + NZ * NT * NA) # + self.MB.ntau * self.MB.nage + NA)
lib = np.zeros((len(nr), coln), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
if orig:
colname = 'fspec_orig_' + str(zz) + '_' + str(tt) + '_' + str(ss)
else:
colname = 'fspec_' + str(zz) + '_' + str(tt) + '_' + str(ss)
colnall = int(2 + zz * NT * NA + tt * NA + ss) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
return lib
def open_spec_dust_fits(self, fall=0):
'''
Load dust template in obs range.
'''
ID0 = self.MB.ID
tau0= self.MB.tau0 #[0.01,0.02,0.03]
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
if fall == 0:
app = ''
hdu0 = self.MB.af['spec_dust']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_dust_full']
DIR_TMP = self.DIR_TMP
nr = hdu0['colnum']
xx = hdu0['wavelength']
lib = np.zeros((len(nr), 2+len(self.Temp)), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
for aa in range(len(self.Temp)):
coln = int(2 + aa)
colname = 'fspec_' + str(aa)
colnall = int(2 + aa) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
if fall==1 and False:
import matplotlib.pyplot as plt
plt.close()
plt.plot(lib[:,1],lib[:,coln],linestyle='-')
plt.show()
return lib
def open_spec_fits_dir(self, nage, nz, kk, Av00, zgal, A00):
'''
Loads template in obs range.
But for weird template.
'''
from astropy.io import fits
tau0= self.tau0 #[0.01,0.02,0.03]
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
app = 'all'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP #'./templates/'
pp = 0
zz = nz
# Luminosity
mshdu = self.MB.af0['ML']
Ls = mshdu['Ls_%d'%nz]
xx = hdu0['wavelength'] # at RF;
nr = np.arange(0,len(xx),1) #hdu0.data['colnum']
lib = np.zeros((len(nr), 2+1), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
aa = nage
coln = int(2 + aa)
colname = 'fspec_' + str(zz) + '_' + str(aa) + '_' + str(pp)
yy0 = hdu0[colname]/Ls[aa]
yy = flamtonu(xx, yy0)
lib[:,2] = yy[:]
if self.dust_model == 0: # Calzetti
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
elif self.dust_model == 1: # MW
yyd, xxd, nrd = dust_mw(xx, yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx, yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
print('No entry. Dust model is set to Calzetti')
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
return A00 * yyd_sort, xxd_sort
def tmp04(self, par, f_Alog=True, nprec=1, f_val=False, check_bound=False, lib_all=False, f_nrd=False):
'''
Makes model template with a given param set.
Also dust attenuation.
Parameters:
-----------
nprec : int
Precision when redshift is refined.
'''
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc
Mtot = 0
pp = 0
if f_val:
par = par.params
if self.MB.fzmc == 1:
try:
zmc = par['zmc'].value
except:
zmc = self.MB.zgal
else:
zmc = self.MB.zgal
if check_bound:
# AV limit;
if par['Av'] < self.MB.Avmin:
par['Av'] = self.MB.Avmin
if par['Av'] > self.MB.Avmax:
par['Av'] = self.MB.Avmax
Av00 = par['Av']
for aa in range(self.MB.npeak):
if self.MB.ZEVOL==1 or aa == 0:
if check_bound:
# Z limit:
if par['Z%d'%aa] < self.MB.Zmin:
par['Z%d'%aa] = self.MB.Zmin
if par['Z%d'%aa] > self.MB.Zmax:
par['Z%d'%aa] = self.MB.Zmax
Z = par['Z%d'%aa]
else:
pass
if check_bound:
# A
if par['A'+str(aa)] < self.MB.Amin:
par['A'+str(aa)] = self.MB.Amin
if par['A'+str(aa)] > self.MB.Amax:
par['A'+str(aa)] = self.MB.Amax
if par['TAU'+str(aa)] < self.MB.taumin:
par['TAU'+str(aa)] = self.MB.taumin
if par['TAU'+str(aa)] > self.MB.taumax:
par['TAU'+str(aa)] = self.MB.taumax
if par['AGE'+str(aa)] < self.MB.agemin:
par['AGE'+str(aa)] = self.MB.agemin
if par['AGE'+str(aa)] > self.MB.agemax:
par['AGE'+str(aa)] = self.MB.agemax
# Is A in logspace?
if f_Alog:
A00 = 10**par['A'+str(aa)]
else:
A00 = par['A'+str(aa)]
tau,age = par['TAU%d'%aa],par['AGE%d'%aa]
NZ, NT, NA = bfnc.Z2NZ(Z,tau,age)
coln = int(2 + NZ*self.MB.ntau*self.MB.nage + NT*self.MB.nage + NA)
mslist = self.MB.af['ML']['ML_'+str(NZ)+'_'+str(NT)][NA]
Mtot += 10**(par['A%d'%aa] + np.log10(mslist))
if lib_all:
if aa == 0:
nr = self.MB.lib_all[:, 0]
xx = self.MB.lib_all[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = A00 * self.MB.lib_all[:, coln]
else:
yy += A00 * self.MB.lib_all[:, coln]
else:
if aa == 0:
nr = self.MB.lib[:, 0]
xx = self.MB.lib[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = A00 * self.MB.lib[:, coln]
else:
yy += A00 * self.MB.lib[:, coln]
# Keep logM
self.MB.logMtmp = np.log10(Mtot)
# Redshift refinement;
if round(zmc,nprec) != round(self.MB.zgal,nprec): # Not sure how much this costs in time.
xx_s = xx / (1+self.MB.zgal) * (1+zmc)
fint = interpolate.interp1d(xx, yy, kind='nearest', fill_value="extrapolate")
yy_s = fint(xx_s)
else:
xx_s = xx
yy_s = yy
xx = xx_s
yy = yy_s
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zmc), yy, Av00, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zmc), yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zmc), yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zmc), yy, Av00, nr)
xxd *= (1.+zmc)
if self.dust_model == 0:
if not f_nrd:
return yyd,xxd
else:
return nrd,yyd,xxd
else:
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
nrd_yyd_sort = nrd_yyd[nrd_yyd[:,0].argsort()]
if not f_nrd:
return nrd_yyd_sort[:,1],nrd_yyd_sort[:,2]
else:
return nrd_yyd_sort[:,0],nrd_yyd_sort[:,1],nrd_yyd_sort[:,2]
def tmp04_dust(self, par, nprec=1):
'''
Makes model template with a given param setself.
Also dust attenuation.
'''
bfnc = self.MB.bfnc #Basic(ZZ)
DIR_TMP = self.MB.DIR_TMP #'./templates/'
try:
m_dust = par['MDUST']
t_dust = par['TDUST']
except: # This is exception for initial minimizing;
m_dust = -99
t_dust = 0
nr = self.MB.lib_dust[:,0]
xx = self.MB.lib_dust[:,1] # This is OBSERVED wavelength range at z=zgal
coln= 2+int(t_dust+0.5)
yy = 10**m_dust * self.MB.lib_dust[:,coln]
if self.MB.fzmc == 1:
zmc = par.params['zmc'].value
else:
zmc = self.MB.zgal
# How much does this cost in time?
if round(zmc,nprec) != round(self.MB.zgal,nprec):
xx_s = xx / (1+self.MB.zgal) * (1+zmc)
fint = interpolate.interp1d(xx, yy, kind='nearest', fill_value="extrapolate")
yy_s = fint(xx_s)
else:
xx_s = xx
yy_s = yy
return yy_s, xx_s
| 31.823461
| 123
| 0.464964
| 27,254
| 0.994672
| 0
| 0
| 0
| 0
| 0
| 0
| 4,941
| 0.180328
|
b825f9f00f6901c5d7cf23cfa47cb3197933eecd
| 1,855
|
py
|
Python
|
loadbalanceRL/utils/exceptions.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 11
|
2018-10-29T06:50:43.000Z
|
2022-03-28T14:26:09.000Z
|
loadbalanceRL/utils/exceptions.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 1
|
2022-03-01T13:46:25.000Z
|
2022-03-01T13:46:25.000Z
|
loadbalanceRL/utils/exceptions.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 6
|
2019-02-05T20:01:53.000Z
|
2020-09-04T12:30:00.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Definition of all Rainman2 exceptions
"""
__author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)'
__date__ = 'Wednesday, February 14th 2018, 11:38:08 am'
class FileOpenError(IOError):
"""
Exception raised when a file couldn't be opened.
"""
pass
class AgentNotSupported(Exception):
"""
Exception raised when agent is not valid for requested algorithm
"""
pass
class AgentMethodNotImplemented(NotImplementedError):
"""
Exception raised when trying to access a private method of an agent
that is not implemented yet.
"""
pass
class AlgorithmNotImplemented(NotImplementedError):
"""
Exception raised when trying to access algorithm that is not
implemented yet.
"""
pass
class AlgorithmMethodNotImplemented(NotImplementedError):
"""
Exception raised when trying to access a private method of an algorithm
that is not implemented yet.
"""
pass
class ClientNotImplemented(NotImplementedError):
"""
Exception raised when trying to access client that is not
implemented yet.
"""
pass
class ClientMethodNotImplemented(NotImplementedError):
"""
Exception raised when trying to access a private method of a client
that is not implemented yet.
"""
pass
class EnvironmentNotImplemented(NotImplementedError):
"""
Exception raised when trying to access Environment that is not
implemented yet.
"""
pass
class EnvironmentMethodNotImplemented(NotImplementedError):
"""
Exception raised when trying to access a private method of an environment
that is not implemented yet.
"""
pass
class ExternalServerError(Exception):
"""
Exception raised when external server is not accessible
"""
pass
| 21.079545
| 77
| 0.698652
| 1,595
| 0.859838
| 0
| 0
| 0
| 0
| 0
| 0
| 1,177
| 0.634501
|
b826697289acc6bb7f13171d32f3b15f39b8d6bc
| 411
|
py
|
Python
|
mundo-1/ex-014.py
|
guilhermesm28/python-curso-em-video
|
50ab4e76b1903e62d4daa579699c5908329b26c8
|
[
"MIT"
] | null | null | null |
mundo-1/ex-014.py
|
guilhermesm28/python-curso-em-video
|
50ab4e76b1903e62d4daa579699c5908329b26c8
|
[
"MIT"
] | null | null | null |
mundo-1/ex-014.py
|
guilhermesm28/python-curso-em-video
|
50ab4e76b1903e62d4daa579699c5908329b26c8
|
[
"MIT"
] | null | null | null |
# Escreva um programa que converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
print('-' * 100)
print('{: ^100}'.format('EXERCÍCIO 014 - CONVERSOR DE TEMPERATURAS'))
print('-' * 100)
c = float(input('Informe a temperatura em ºC: '))
f = ((9 * c) / 5) + 32
print(f'A temperatura de {c:.2f}ºC corresponde a {f:.2f}ºF.')
print('-' * 100)
input('Pressione ENTER para sair...')
| 27.4
| 111
| 0.6691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.703614
|
b828874e2b78ad751bb04188c59615f7f159fd1a
| 848
|
py
|
Python
|
access_apps/controllers/main.py
|
aaltinisik/access-addons
|
933eef8b7abd5d2ac0b07b270271cb5aed3b23b6
|
[
"MIT"
] | null | null | null |
access_apps/controllers/main.py
|
aaltinisik/access-addons
|
933eef8b7abd5d2ac0b07b270271cb5aed3b23b6
|
[
"MIT"
] | null | null | null |
access_apps/controllers/main.py
|
aaltinisik/access-addons
|
933eef8b7abd5d2ac0b07b270271cb5aed3b23b6
|
[
"MIT"
] | 1
|
2021-02-15T03:14:52.000Z
|
2021-02-15T03:14:52.000Z
|
from odoo import SUPERUSER_ID, http
from odoo.http import request
from odoo.addons.web_settings_dashboard.controllers.main import WebSettingsDashboard
class WebSettingsDashboardCustom(WebSettingsDashboard):
@http.route("/web_settings_dashboard/data", type="json", auth="user")
def web_settings_dashboard_data(self, **kw):
has_access_to_apps = request.env["res.users"].has_group(
"access_apps.group_allow_apps"
)
# issue: due to unknown reason has_group is always invoked with superuser as uid param in new API
# has_access_to_apps = request.env.user.has_group('access_apps.group_allow_apps')
request.env.uid = SUPERUSER_ID
res = super(WebSettingsDashboardCustom, self).web_settings_dashboard_data(**kw)
res["has_access_to_apps"] = has_access_to_apps
return res
| 44.631579
| 105
| 0.740566
| 693
| 0.817217
| 0
| 0
| 633
| 0.746462
| 0
| 0
| 281
| 0.331368
|
b829ed55de73d723e9907e52986b8d92ed93231d
| 686
|
py
|
Python
|
dev/test.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
dev/test.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
dev/test.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
import game as g
import time as t
def start_game():
r.add_sprite(player1, 0,0)
r.add_sprite(player2, 5,0)
r.add_sprite(ball, 3,3)
r.print()
def wait(length_of_time):
inital = t.time()
x = False
while(not(x)):
current = t.time()
x = (current - inital) > length_of_time
def move_ball():
r.move_sprite(1,0)
r = g.Game(6,6, debugger = False)
player1 = g.Sprite("Player", 1, 2)
player2 = g.Sprite("Player", 1, 2)
ball = g.Sprite("ball", 1, 1)
start_game()
wait(4)
r.move_sprite(ball,-1,-1)
r.move_sprite(player1, 0,-2)
r.move_sprite(player1, 0, 3)
r.print()
while(ball.x < 7):
r.move_sprite(ball, 1,1)
print("oi")
wait(4)
| 17.589744
| 47
| 0.610787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.037901
|
b82a954625c33b4891411d888f3fa383b4a7acc9
| 662
|
py
|
Python
|
itermembers.py
|
hanshuaigithub/pyrogram_project
|
539ebbfa00d5381b4495450580f9c77ee8be9d11
|
[
"MIT"
] | null | null | null |
itermembers.py
|
hanshuaigithub/pyrogram_project
|
539ebbfa00d5381b4495450580f9c77ee8be9d11
|
[
"MIT"
] | null | null | null |
itermembers.py
|
hanshuaigithub/pyrogram_project
|
539ebbfa00d5381b4495450580f9c77ee8be9d11
|
[
"MIT"
] | null | null | null |
from pyrogram import Client
import json
api_id = 2763716
api_hash = "d4c2d2e53efe8fbb71f0d64deb84b3da"
app = Client("+639277144517", api_id, api_hash)
target = "cnsex8" # Target channel/supergroup sigui588 cnsex8
with app:
members = app.iter_chat_members(target)
print(f"Chanel members counts : {len(members)}")
members_arr = []
for i in range(0,len(members)):
member = members[i]
members_arr.append({'id':member.user.id, 'first_name':member.user.first_name})
members_json_str = json.dumps(members_arr)
members_open = open('members.json', 'w')
members_open.write(members_json_str)
members_open.close()
| 26.48
| 86
| 0.706949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 174
| 0.26284
|
b82b18f5c487a5e8f40d5acca12f69514df44f14
| 590
|
py
|
Python
|
FisherExactTest/__version__.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
FisherExactTest/__version__.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
FisherExactTest/__version__.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
__title__ = "FisherExactTest"
__version__ = "1.0.1"
__author__ = "Ae-Mc"
__author_email__ = "ae_mc@mail.ru"
__description__ = "Two tailed Fisher's exact test wrote in pure Python"
__url__ = "https://github.com/Ae-Mc/Fisher"
__classifiers__ = [
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Utilities"
]
| 34.705882
| 71
| 0.661017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.727119
|
b82b81bc5dbddba7f6dc9e8f6bf26affa5968f16
| 875
|
py
|
Python
|
mimosa/pylib/mimosa_reader.py
|
rafelafrance/traiter_mimosa
|
7a248b610747d5d0e5ce5473953cbdc90d336aae
|
[
"MIT"
] | null | null | null |
mimosa/pylib/mimosa_reader.py
|
rafelafrance/traiter_mimosa
|
7a248b610747d5d0e5ce5473953cbdc90d336aae
|
[
"MIT"
] | null | null | null |
mimosa/pylib/mimosa_reader.py
|
rafelafrance/traiter_mimosa
|
7a248b610747d5d0e5ce5473953cbdc90d336aae
|
[
"MIT"
] | null | null | null |
"""Parse PDFs about mimosas."""
from tqdm import tqdm
from . import mimosa_pipeline
from . import sentence_pipeline
from .parsed_data import Datum
def read(args):
with open(args.in_text) as in_file:
lines = in_file.readlines()
if args.limit:
lines = lines[: args.limit]
nlp = mimosa_pipeline.pipeline()
sent_nlp = sentence_pipeline.pipeline()
data = []
for ln in tqdm(lines):
ln = ln.strip()
sent_doc = sent_nlp(ln)
for sent in sent_doc.sents:
doc = nlp(sent.text)
traits = []
for ent in doc.ents:
trait = ent._.data
trait["start"] += sent.start_char
trait["end"] += sent.start_char
traits.append(trait)
data.append(Datum(text=sent.text, traits=traits, reject=doc._.reject))
return data
| 25
| 82
| 0.584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.049143
|
b82ba735b06701323afbbc1adb2108b231b98638
| 1,647
|
py
|
Python
|
CxMetrics/calcMetrics.py
|
Danielhiversen/pyCustusx
|
5a7fca51d885ad30f4db46ab725485d86fb2d17a
|
[
"MIT"
] | null | null | null |
CxMetrics/calcMetrics.py
|
Danielhiversen/pyCustusx
|
5a7fca51d885ad30f4db46ab725485d86fb2d17a
|
[
"MIT"
] | null | null | null |
CxMetrics/calcMetrics.py
|
Danielhiversen/pyCustusx
|
5a7fca51d885ad30f4db46ab725485d86fb2d17a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 24 11:39:42 2015
@author: dahoiv
"""
import numpy as np
def loadMetrics(filepath):
mr_points=dict()
us_points=dict()
with open(filepath, 'r') as file :
for line in file.readlines():
line = line.translate(None, '"')
data= line.split()
if not "pointMetric" in data[0]:
continue
key= data[1][-2:]
point = data[6:9]
if "_mr_" in data[1] and not "us" in data[2].lower():
mr_points[key]=[float(point[0]),float(point[1]),float(point[2])]
if "_us_" in data[1] and "us" in data[2].lower():
us_points[key]=[float(point[0]),float(point[1]),float(point[2])]
return (mr_points,us_points)
def calcDist(mr_points,us_points):
k=0
dist=[]
for key in mr_points.keys():
if not key in us_points.keys():
print key, " missing in us"
continue
diff = np.array(mr_points[key])-np.array(us_points[key])
dist.append((diff[0]**2 +diff[1]**2 +diff[2]**2)**0.5)
print key, dist[-1]
k=k+1
print "mean; ", np.mean(dist)
print "var: ", np.var(dist)
if __name__ == '__main__':
filePath1="/home/dahoiv/disk/data/brainshift/079_Tumor.cx3/Logs/metrics_a.txt"
(mr_points_1,us_points_1)=loadMetrics(filePath1)
calcDist(mr_points_1,us_points_1)
filePath2="/home/dahoiv/disk/data/brainshift/079_Tumor.cx3/Logs/metrics_b.txt"
(mr_points_2,us_points_2)=loadMetrics(filePath2)
calcDist(mr_points_2,us_points_2)
| 32.294118
| 82
| 0.571342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 300
| 0.182149
|
b82dae5c13359feb72d2a0825f3801687d516058
| 118
|
py
|
Python
|
twodspec/extern/__init__.py
|
hypergravity/songcn
|
e2b071c932720d02e5f085884c83c46baba7802d
|
[
"MIT"
] | null | null | null |
twodspec/extern/__init__.py
|
hypergravity/songcn
|
e2b071c932720d02e5f085884c83c46baba7802d
|
[
"MIT"
] | null | null | null |
twodspec/extern/__init__.py
|
hypergravity/songcn
|
e2b071c932720d02e5f085884c83c46baba7802d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = ['interpolate', 'polynomial', 'SmoothSpline']
from .interpolate import SmoothSpline
| 29.5
| 55
| 0.70339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.525424
|
b82f6fabf22a5cbcfa7dd2e7ea076e9e772feb3f
| 3,286
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @ingroup Methods-Weights-Correlations-Tube_Wing
# tube.py
#
# Created: Jan 2014, A. Wendorff
# Modified: Feb 2014, A. Wendorff
# Feb 2016, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units
# ----------------------------------------------------------------------
# Tube
# ----------------------------------------------------------------------
## @ingroup Methods-Weights-Correlations-Tube_Wing
def tube(vehicle, fuse, wt_wing, wt_propulsion):
""" Calculate the weight of a fuselage in the state tube and wing configuration
Assumptions:
fuselage in a standard wing and tube configuration
Source:
N/A
Inputs:
fuse.areas.wetted - fuselage wetted area [meters**2]
fuse.differential_pressure- Maximum fuselage pressure differential [Pascal]
fuse.width - width of the fuselage [meters]
fuse.heights.maximum - height of the fuselage [meters]
fuse.lengths.total - length of the fuselage [meters]
vehicle.envelope.limit_load - limit load factor at zero fuel weight of the aircraft [dimensionless]
vehicle.mass_properties.max_zero_fuel - zero fuel weight of the aircraft [kilograms]
wt_wing - weight of the wing of the aircraft [kilograms]
wt_propulsion - weight of the entire propulsion system of the aircraft [kilograms]
vehicle.wings.main_wing.chords.root - wing root chord [meters]
Outputs:
weight - weight of the fuselage [kilograms]
Properties Used:
N/A
"""
# unpack inputs
diff_p = fuse.differential_pressure / (Units.force_pound / Units.ft ** 2) # Convert Pascals to lbs/ square ft
width = fuse.width / Units.ft # Convert meters to ft
height = fuse.heights.maximum / Units.ft # Convert meters to ft
# setup
length = fuse.lengths.total - vehicle.wings.main_wing.chords.root / 2.
length = length / Units.ft # Convert meters to ft
weight = (vehicle.mass_properties.max_zero_fuel - wt_wing - wt_propulsion) / Units.lb # Convert kg to lbs
area = fuse.areas.wetted / Units.ft ** 2 # Convert square meters to square ft
# process
# Calculate fuselage indices
I_p = 1.5 * 10 ** -3. * diff_p * width
I_b = 1.91 * 10 ** -4. * vehicle.envelope.limit_load * weight * length / height ** 2.
if I_p > I_b:
I_f = I_p
else:
I_f = (I_p ** 2. + I_b ** 2.) / (2. * I_b)
# Calculate weight of wing for traditional aircraft vertical tail without rudder
fuselage_weight = ((1.051 + 0.102 * I_f) * area) * Units.lb # Convert from lbs to kg
return fuselage_weight
| 45.013699
| 123
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,411
| 0.733719
|
b82ff818b8e67f8cae3f7360326180bd7e14f756
| 3,837
|
py
|
Python
|
Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py
|
bognikol/Eleusis
|
ee518ede31893689eb6d3c5539e0bd757aeb0294
|
[
"MIT"
] | 4
|
2019-05-31T19:55:23.000Z
|
2020-10-27T10:00:32.000Z
|
Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py
|
bognikol/Eleusis
|
ee518ede31893689eb6d3c5539e0bd757aeb0294
|
[
"MIT"
] | null | null | null |
Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py
|
bognikol/Eleusis
|
ee518ede31893689eb6d3c5539e0bd757aeb0294
|
[
"MIT"
] | 3
|
2019-04-29T14:09:38.000Z
|
2020-10-27T10:00:33.000Z
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2010 Johan Dahlin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import optparse
import codecs
from contextlib import contextmanager
from giscanner import message
from giscanner.annotationparser import GtkDocCommentBlockParser, GtkDocCommentBlockWriter
from giscanner.scannermain import (get_preprocessor_option_group,
create_source_scanner,
process_packages)
@contextmanager
def encode_stdout(encoding):
"""Force stdout into a specific encoding."""
# Python 2 does not encode stdout writes so wrap it with 'encoding' encoded writer.
# Python 3 uses a io.TextIOBase wrapped stdout with the system default encoding.
# Re-wrap the underlying buffer with a new writer with the given 'encoding'.
# See: https://docs.python.org/3/library/sys.html#sys.stdout
old_stdout = sys.stdout
if sys.version_info.major < 3:
binary_stdout = sys.stdout
else:
binary_stdout = sys.stdout.buffer
sys.stdout = codecs.getwriter(encoding)(binary_stdout)
yield
sys.stdout = old_stdout
def annotation_main(args):
parser = optparse.OptionParser('%prog [options] sources')
group = optparse.OptionGroup(parser, "Tool modes, one is required")
group.add_option("-e", "--extract",
action="store_true", dest="extract",
help="Extract annotations from the input files")
parser.add_option_group(group)
group = get_preprocessor_option_group(parser)
group.add_option("-L", "--library-path",
action="append", dest="library_paths", default=[],
help="directories to search for libraries")
group.add_option("", "--pkg",
action="append", dest="packages", default=[],
help="pkg-config packages to get cflags from")
parser.add_option_group(group)
options, args = parser.parse_args(args)
if not options.extract:
raise SystemExit("ERROR: Nothing to do")
if options.packages:
process_packages(options, options.packages)
logger = message.MessageLogger.get(namespace=None)
ss = create_source_scanner(options, args)
if options.extract:
parser = GtkDocCommentBlockParser()
writer = GtkDocCommentBlockWriter(indent=False)
blocks = parser.parse_comment_blocks(ss.get_comments())
with encode_stdout('utf-8'):
print('/' + ('*' * 60) + '/')
print('/* THIS FILE IS GENERATED DO NOT EDIT */')
print('/' + ('*' * 60) + '/')
print('')
for block in sorted(blocks.values()):
print(writer.write(block))
print('')
print('')
print('/' + ('*' * 60) + '/')
print('/* THIS FILE IS GENERATED DO NOT EDIT */')
print('/' + ('*' * 60) + '/')
return 0
| 36.542857
| 89
| 0.65963
| 0
| 0
| 644
| 0.167839
| 660
| 0.172009
| 0
| 0
| 1,600
| 0.416992
|
b830ed284183da0f588ffc8416e532df6cb6e5aa
| 1,219
|
py
|
Python
|
src/tools/json2db.py
|
chobocho/ChoboMemo2
|
d3883e20d7c69c48477d1178120e0e32c062b27f
|
[
"MIT"
] | null | null | null |
src/tools/json2db.py
|
chobocho/ChoboMemo2
|
d3883e20d7c69c48477d1178120e0e32c062b27f
|
[
"MIT"
] | null | null | null |
src/tools/json2db.py
|
chobocho/ChoboMemo2
|
d3883e20d7c69c48477d1178120e0e32c062b27f
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
from manager import dbmanager
def loadJson(filename):
print("loadJson: " + filename)
memoList = []
try:
if os.path.isfile(filename):
file = open(filename, 'r', encoding="UTF-8")
lines = file.readlines()
file.close()
idx = 0
for line in lines[1:]:
memo = json.loads(line)
idx += 1
item = {}
item['id'] = memo["id"]
item['memo'] = memo["memo"]
item['index'] = str(idx)
memoList.append(item)
print("Success to load " + filename)
return memoList
except:
print("Loading failed:" + filename)
return []
def save2DB(data, db_name):
db = dbmanager.DBManager(db_name)
print(len(data))
# for item in data:
# db.insert([item['id'], item['memo']])
# db.printDB()
def main(filenames):
json_file = filenames[0]
db_file = filenames[1]
data = loadJson(json_file)
save2DB(data, db_file)
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: json2db json_file db_file")
else:
main(sys.argv[1:])
| 23
| 56
| 0.525021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.16653
|
b8317e86fff68e0107933de518fdf61bc7534d00
| 171
|
py
|
Python
|
Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# This modifier sets replaces the default pattern recognition with mkFit for tobTecStep
trackingMkFitTobTecStep = cms.Modifier()
| 34.2
| 87
| 0.836257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.508772
|
b8327398b4c50fa047db432d9765d37a5dd0095d
| 3,772
|
py
|
Python
|
config/config.py
|
rossja/Docker-Minecraft-Overviewer
|
bb2285f5af723a74b1365bbcbe284b9e5ce85245
|
[
"MIT"
] | 12
|
2019-12-14T13:58:44.000Z
|
2022-03-12T10:43:43.000Z
|
config/config.py
|
rossja/Docker-Minecraft-Overviewer
|
bb2285f5af723a74b1365bbcbe284b9e5ce85245
|
[
"MIT"
] | 2
|
2019-02-04T09:46:10.000Z
|
2019-02-05T10:05:56.000Z
|
config/config.py
|
rossja/Docker-Minecraft-Overviewer
|
bb2285f5af723a74b1365bbcbe284b9e5ce85245
|
[
"MIT"
] | 5
|
2020-01-29T20:38:35.000Z
|
2021-12-18T19:56:49.000Z
|
# My config.py script for overviewer:
worlds["pudel"] = "/tmp/server/world/"
worlds["pudel_nether"] = "/tmp/server/world_nether/"
texturepath = "/tmp/overviewer/client.jar"
processes = 2
outputdir = "/tmp/export/"
my_cave = [Base(), EdgeLines(), Cave(only_lit=True), DepthTinting()]
my_nowater = [Base(), EdgeLines(), NoFluids()]
defaultzoom = 5
my_crop = (-1200, -1600, 900, 400)
def playerIcons(poi):
if poi['id'] == 'Player':
poi['icon'] = "https://mc.marc.tv/assets/steve.png"
return "Last known location for %s" % poi['EntityId']
def playerSpawns(poi):
if poi['id']=='PlayerSpawn':
poi['icon'] = "https://mc.marc.tv/assets/bed.png"
return "Spawn for %s" % poi['EntityId']
def signFilter(poi):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign':
poi['icon'] = "https://mc.marc.tv/assets/sign.png"
text = "\n".join([poi['Text1'], poi['Text2'], poi['Text3'], poi['Text4']])
if text.__contains__('...'):
return text.replace('...', '')
def chestFilter(poi):
if poi['id'] == 'Chest' or poi['id'] == 'minecraft:chest':
return "Chest with %d items" % len(poi['Items'])
thingsToMaker = [
dict(name="Players", filterFunction=playerIcons),
dict(name="Beds", filterFunction=playerSpawns),
dict(name="Signs", filterFunction=signFilter),
#dict(name="Chests", filterFunction=chestFilter)
]
renders["day_complete_smooth"] = {
'world': 'pudel',
'title': 'Day',
'rendermode': 'smooth_lighting',
"dimension": "overworld",
'markers': thingsToMaker
}
renders["night_complete"] = {
'world': 'pudel',
'title': 'Night',
'rendermode': 'smooth_night',
"dimension": "overworld",
'markers': thingsToMaker
}
renders["cave_complete"] = {
'world': 'pudel',
'title': 'Cave',
'rendermode': my_cave,
"dimension": "overworld",
'markers': thingsToMaker
}
# Railoverlay
renders["rails"] = {
'world': 'pudel',
'title': 'Rails',
"dimension": "overworld",
'rendermode': [ClearBase(),
MineralOverlay(minerals=[
(66, (255,0,0)),
(27, (255,0,0)),
(28, (255,0,0))
]), EdgeLines()],
"overlay": ["day_complete_smooth","night_complete","cave_complete"]
}
'''
# Pistons and Observer
renders["farms"] = {
'world': 'pudel',
'title': 'Farms',
"dimension": "overworld",
'rendermode': [ClearBase(),
MineralOverlay(minerals=[
(29, (255,0,0)),
(33, (255,0,0)),
(34, (255,0,0)),
(154, (255,0,0)),
(218, (255,0,0))
]), EdgeLines()],
"overlay": ["day_complete_smooth","night_complete","cave_complete"]
}
'''
'''
renders["nether"] = {
"world": "pudel_nether",
"title": "Nether",
"rendermode": "nether",
"dimension": "nether",
'crop': (-200, -200, 200, 200)
}
'''
# Import the Observers
from .observer import MultiplexingObserver, ProgressBarObserver, JSObserver
# Construct the ProgressBarObserver
pbo = ProgressBarObserver()
# Construct a basic JSObserver
jsObserver = JSObserver(outputdir, 30)
# Set the observer to a MultiplexingObserver
observer = MultiplexingObserver(pbo, jsObserver)
'''
renders["day_smooth"] = {
'world': 'pudel',
'title': 'Day',
'rendermode': 'smooth_lighting',
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
renders["night_smooth"] = {
'world': 'pudel',
'title': 'Night',
'rendermode': 'smooth_night',
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
renders["cave"] = {
'world': 'pudel',
'title': 'Cave',
'rendermode': my_cave,
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
'''
| 26.013793
| 82
| 0.593054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,272
| 0.602333
|
b832db34004caeef160a328496546197b3b692d7
| 1,764
|
py
|
Python
|
SurveyManager/survey/models.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
SurveyManager/survey/models.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
SurveyManager/survey/models.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Encuesta(models.Model):
nombre=models.CharField(max_length=150)
descripcion=models.TextField()
estructura=models.TextField()
fecha = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.nombre)
class Pregunta(models.Model):
descripcion=models.CharField(max_length=150)
encuesta = models.ForeignKey('Encuesta', on_delete=models.CASCADE)
tipo=models.IntegerField(null=True)
numero=models.IntegerField(default=1)
json_id=models.CharField(max_length=50,null=True)
def __str__(self):
return str(self.descripcion)
class Respuesta(models.Model):
valor=models.CharField(max_length=150)
pregunta = models.ForeignKey('Pregunta', on_delete=models.CASCADE)
json_id=models.CharField(max_length=50,null=True)
def __str__(self):
return str(self.valor)
class Carrera(models.Model):
nombre=models.CharField(max_length=150)
generacion = models.ForeignKey('Generacion', on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return "%s %s" % (self.nombre, self.generacion)
class Generacion(models.Model):
generacion=models.CharField(max_length=150,null=True,blank=True)
def __str__(self):
return str(self.generacion)
class Alumno(models.Model):
email=models.CharField(max_length=100)
nombre=models.CharField(max_length=100)
apellidos=models.CharField(max_length=100)
carrera = models.ForeignKey('Carrera', on_delete=models.CASCADE)
def __str__(self):
return str(self.nombre)
class RespuestaPregunta(models.Model):
respuesta = models.ForeignKey('Respuesta', on_delete=models.CASCADE)
pregunta = models.ForeignKey('Pregunta', on_delete=models.CASCADE)
alumno=models.ForeignKey('Alumno',on_delete=models.CASCADE,blank=True,null=True)
| 30.413793
| 92
| 0.786848
| 1,692
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.05839
|
b8361f78932036e9f23fbe61c22ab2ba8ac449f7
| 3,150
|
py
|
Python
|
pythainlp/corpus/__init__.py
|
petetanru/pythainlp
|
83fa999336ce8c7f7b5431fc2fc41c53c5cb7604
|
[
"Apache-2.0"
] | 1
|
2018-10-10T19:01:43.000Z
|
2018-10-10T19:01:43.000Z
|
pythainlp/corpus/__init__.py
|
Khawoat6/pythainlp
|
05979c0ac9a596bb7957fb8a050a32c87ea098e8
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/corpus/__init__.py
|
Khawoat6/pythainlp
|
05979c0ac9a596bb7957fb8a050a32c87ea098e8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import,unicode_literals
from pythainlp.tools import get_path_db,get_path_data
from tinydb import TinyDB,Query
from future.moves.urllib.request import urlopen
from tqdm import tqdm
import requests
import os
import math
import requests
from nltk.corpus import names
#__all__ = ["thaipos", "thaiword","alphabet","tone","country","wordnet"]
path_db_=get_path_db()
def get_file(name):
db=TinyDB(path_db_)
temp = Query()
if len(db.search(temp.name==name))>0:
path= get_path_data(db.search(temp.name==name)[0]['file'])
db.close()
if not os.path.exists(path):
download(name)
return path
def download_(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(urlopen(url).info().get('Content-Length', -1))
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(get_path_data(dst), 'wb')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
#return file_size
def download(name,force=False):
db=TinyDB(path_db_)
temp = Query()
data=requests.get("https://raw.githubusercontent.com/PyThaiNLP/pythainlp-corpus/master/db.json")
data_json=data.json()
if name in list(data_json.keys()):
temp_name=data_json[name]
print("Download : "+name)
if len(db.search(temp.name==name))==0:
print(name+" "+temp_name['version'])
download_(temp_name['download'],temp_name['file_name'])
db.insert({'name': name, 'version': temp_name['version'],'file':temp_name['file_name']})
else:
if len(db.search(temp.name==name and temp.version==temp_name['version']))==0:
print("have update")
print("from "+name+" "+db.search(temp.name==name)[0]['version']+" update to "+name+" "+temp_name['version'])
yes_no="y"
if force==False:
yes_no=str(input("y or n : ")).lower()
if "y"==yes_no:
download_(temp_name['download'],temp_name['file_name'])
db.update({'version':temp_name['version']},temp.name==name)
else:
print("re-download")
print("from "+name+" "+db.search(temp.name==name)[0]['version']+" update to "+name+" "+temp_name['version'])
yes_no="y"
if force==False:
yes_no=str(input("y or n : ")).lower()
if "y"==yes_no:
download_(temp_name['download'],temp_name['file_name'])
db.update({'version':temp_name['version']},temp.name==name)
db.close()
| 40.384615
| 124
| 0.586032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 645
| 0.204762
|
b83a4b8131231e8ffeccb27881d8404fa73c602e
| 649
|
py
|
Python
|
dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
# Given an integer array nums, find the sum of the elements between indices i and j (i ≤ j), inclusive.
# Example:
# Given nums = [-2, 0, 3, -5, 2, -1]
# sumRange(0, 2) -> 1
# sumRange(2, 5) -> -1
# sumRange(0, 5) -> -3
class NumArray:
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.array = [0]
for num in nums:
self.array.append(self.array[-1] + num)
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
return self.array[j+1] - self.array[i]
# Time: O(n)
# Space: O(n)
# Difficulty: easy
| 20.935484
| 103
| 0.497689
| 378
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.585253
|
b83d0a4d0944019fd7f267fd6043e0bc64496350
| 8,257
|
py
|
Python
|
py/garage/garage/asyncs/messaging/reqrep.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/garage/garage/asyncs/messaging/reqrep.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/garage/garage/asyncs/messaging/reqrep.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
__all__ = [
'Terminated',
'Unavailable',
'client',
'server',
]
import logging
import time
import curio
import nanomsg as nn
from garage import asyncs
from garage.assertions import ASSERT
from garage.asyncs import futures
from garage.asyncs import queues
LOG = logging.getLogger(__name__)
class Terminated(Exception):
"""Client agent is terminated."""
class Unavailable(Exception):
"""Service is unavailable."""
def _transform_error(exc):
if isinstance(exc, curio.TaskTimeout):
new_exc = Unavailable()
new_exc.__cause__ = exc
return new_exc
elif isinstance(exc, (nn.EBADF, queues.Closed)):
new_exc = Terminated()
new_exc.__cause__ = exc
return new_exc
else:
return exc
async def client(graceful_exit, sockets, request_queue, timeout=None):
"""Act as client-side in the reqrep protocol.
NOTE: Because we want end-to-end functionality (non-raw sockets), a
socket can only handle one request at a time; to overcome this, we
use a pool of sockets.
In additional to handling requests, this waits for the graceful exit
event and then clean up itself.
When cleaning up, it:
* Close socket so that pump_requests will not send any further
requests.
* Close the queue so that upstream will not enqueue any further
requests.
The requests still in the queue will be "processed", with their
result being set to EBADF, since the socket is closed. This signals
and unblocks all blocked upstream tasks.
"""
for socket in sockets:
ASSERT.equal(socket.options.nn_domain, nn.AF_SP)
ASSERT.equal(socket.options.nn_protocol, nn.NN_REQ)
async def pump_requests(socket):
LOG.info('client: start sending requests to: %s', socket)
while True:
try:
request, response_promise = await request_queue.get()
except queues.Closed:
break
if not response_promise.set_running_or_notify_cancel():
LOG.debug('client: drop request: %r', request)
continue
try:
async with curio.timeout_after(timeout):
await socket.send(request)
with await socket.recv() as message:
response = bytes(message.as_memoryview())
except Exception as exc:
if response_promise.cancelled():
LOG.exception(
'client: err but request is cancelled: %r',
request,
)
else:
response_promise.set_exception(_transform_error(exc))
else:
response_promise.set_result(response)
LOG.info('client: stop sending requests to: %s', socket)
async with asyncs.TaskStack() as stack:
for socket in sockets:
await stack.spawn(pump_requests(socket))
stack.sync_callback(request_queue.close)
for socket in sockets:
stack.sync_callback(socket.close)
await stack.spawn(graceful_exit.wait())
await (await stack.wait_any()).join()
async def server(
graceful_exit,
socket,
request_queue,
timeout=None,
error_handler=None):
"""Act as server-side in the reqrep protocol.
NOTE: error_handler is not asynchronous because you should probably
send back error messages without being blocked indefinitely.
In additional to handling requests, this waits for the graceful exit
event and then clean up itself.
When cleaning up, it:
* Close socket so that the pump_requests will not recv new requests
and will exit.
* Close the queue so that downstream will not dequeue any request.
The requests still in the queue will be dropped (since socket is
closed, their response cannot be sent back to the client).
"""
ASSERT.equal(socket.options.nn_domain, nn.AF_SP_RAW)
ASSERT.equal(socket.options.nn_protocol, nn.NN_REP)
if error_handler is None:
error_handler = lambda *_: None
async def pump_requests(handlers):
LOG.info('server: start receiving requests from: %s', socket)
while True:
try:
message = await socket.recvmsg()
except nn.EBADF:
break
with message:
response_message = nn.Message()
# NOTE: It is important to set control header in the
# response message from the request so that response can
# be correctly routed back to the right sender.
response_message.adopt_control(*message.disown_control())
request = bytes(message.as_memoryview())
# Enqueue request here rather than in handle_request so that
# pump_requests may apply back pressure to socket.
begin_time = time.perf_counter()
try:
response_future = futures.Future()
async with curio.timeout_after(timeout):
await request_queue.put((
request,
response_future.promise(),
))
except Exception as exc:
await on_error(exc, request, response_message)
continue
await handlers.spawn(handle_request(
begin_time,
request,
response_future,
response_message,
))
LOG.info('server: stop receiving requests from: %s', socket)
async def handle_request(
begin_time, request, response_future, response_message):
if timeout is not None:
remaining_time = timeout - (time.perf_counter() - begin_time)
if remaining_time <= 0:
response_future.cancel()
await on_error(
Unavailable(), request, response_message,
exc_info=False,
)
return
else:
remaining_time = None
try:
async with curio.timeout_after(remaining_time), response_future:
response = await response_future.result()
except Exception as exc:
await on_error(exc, request, response_message)
else:
await send_response(request, response, response_message)
async def on_error(exc, request, response_message, *, exc_info=True):
if isinstance(exc, curio.TaskTimeout):
# Timeout is very common is distributed system; whether it
# is an error should be decided at application level, and we
# will just log a warning here.
log = LOG.warning
else:
log = LOG.error
log(
'server: err when processing request: %r',
request, exc_info=exc_info,
)
error_response = error_handler(request, _transform_error(exc))
if error_response is not None:
await send_response(request, error_response, response_message)
async def send_response(request, response, response_message):
response_message.adopt_message(response, len(response), False)
try:
await socket.sendmsg(response_message)
except nn.EBADF:
LOG.debug('server: drop response: %r, %r', request, response)
async def join_handlers(handlers):
async for handler in handlers:
if handler.exception:
LOG.error(
'server: err in request handler',
exc_info=handler.exception,
)
def close_queue():
num_dropped = len(request_queue.close(graceful=False))
if num_dropped:
LOG.info('server: drop %d requests', num_dropped)
async with asyncs.TaskSet() as handlers, asyncs.TaskStack() as stack:
await stack.spawn(join_handlers(handlers))
await stack.spawn(pump_requests(handlers))
stack.sync_callback(close_queue)
stack.sync_callback(socket.close)
await stack.spawn(graceful_exit.wait())
await (await stack.wait_any()).join()
| 33.294355
| 76
| 0.608696
| 129
| 0.015623
| 0
| 0
| 0
| 0
| 7,479
| 0.905777
| 2,217
| 0.268499
|
b83f80c89541762b358261a94161b094315b1f52
| 1,412
|
py
|
Python
|
fasm_utils/segbits.py
|
antmicro/quicklogic-fasm-utils
|
83c867e3269e1186b9bcd71767bb810c82b3905d
|
[
"Apache-2.0"
] | null | null | null |
fasm_utils/segbits.py
|
antmicro/quicklogic-fasm-utils
|
83c867e3269e1186b9bcd71767bb810c82b3905d
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:38:43.000Z
|
2021-06-25T15:38:43.000Z
|
fasm_utils/segbits.py
|
antmicro/quicklogic-fasm-utils
|
83c867e3269e1186b9bcd71767bb810c82b3905d
|
[
"Apache-2.0"
] | 1
|
2020-05-18T12:04:40.000Z
|
2020-05-18T12:04:40.000Z
|
from collections import namedtuple
Bit = namedtuple('Bit', 'x y isset')
def parsebit(val: str):
"""Parses bit notation for .db files to Bit class.
Parameters
----------
val: str
A string containing .db bit notation, i.e. "!012_23" => (12, 23, False)
Returns
-------
Bit: A named tuple Bit with parsed word column, word bit and value
"""
isset = True
# Default is 0. Skip explicit call outs
if val[0] == '!':
isset = False
val = val[1:]
# 28_05 => 28, 05
seg_word_column, word_bit_n = val.split('_')
return Bit(
x=int(seg_word_column),
y=int(word_bit_n),
isset=isset,
)
def read_segbits_line(line: str):
'''Parses segbits from line.'''
linestrip = line.strip()
if linestrip:
parts = linestrip.split(' ')
assert len(parts) > 1
return parts[0], [parsebit(val) for val in parts[1:]]
def read_segbits_file(filepath: str):
"""Parses bits from the lines of the .db file.
Parameters
----------
f: str
A path to .db file.
Returns
-------
dict of str: Bit: Dictionary containing parsed .db file.
"""
segbits = {}
with open(filepath, 'r') as f:
for l in f:
# CLBLM_L.SLICEL_X1.ALUT.INIT[10] 29_14
name, bits = read_segbits_line(l)
segbits[name] = bits
return segbits
| 21.723077
| 79
| 0.563739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 643
| 0.455382
|
b84015aceb9a117ef3d45102bccf99b010e44535
| 927
|
py
|
Python
|
docs/_api/_build/delira/logging/visdom_backend.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | 1
|
2019-10-03T21:00:20.000Z
|
2019-10-03T21:00:20.000Z
|
docs/_api/_build/delira/logging/visdom_backend.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | null | null | null |
docs/_api/_build/delira/logging/visdom_backend.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | null | null | null |
import tensorboardX
from threading import Event
from queue import Queue
from delira.logging.writer_backend import WriterLoggingBackend
class VisdomBackend(WriterLoggingBackend):
"""
A Visdom Logging backend
"""
def __init__(self, writer_kwargs: dict = None,
abort_event: Event = None, queue: Queue = None):
"""
Parameters
----------
writer_kwargs : dict
arguments to initialize a writer
abort_event : :class:`threading.Event`
the abortion event
queue : :class:`queue.Queue`
the queue holding all logging tasks
"""
if writer_kwargs is None:
writer_kwargs = {}
super().__init__(
tensorboardX.visdom_writer.VisdomWriter,
writer_kwargs,
abort_event,
queue)
@property
def name(self):
return "VisdomBackend"
| 23.769231
| 65
| 0.593312
| 788
| 0.850054
| 0
| 0
| 60
| 0.064725
| 0
| 0
| 346
| 0.373247
|
b842118c3400dc6b3842e04f1499ebec381bda43
| 7,706
|
py
|
Python
|
node/substitute.py
|
treverson/coin-buildimage
|
a868250733f65140a6d11a5fbd3b4a7e1509f8d5
|
[
"MIT"
] | 1
|
2018-09-28T11:51:06.000Z
|
2018-09-28T11:51:06.000Z
|
node/substitute.py
|
treverson/coin-buildimage
|
a868250733f65140a6d11a5fbd3b4a7e1509f8d5
|
[
"MIT"
] | null | null | null |
node/substitute.py
|
treverson/coin-buildimage
|
a868250733f65140a6d11a5fbd3b4a7e1509f8d5
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.5
# vim:ts=4:sw=4:ai:et:si:sts=4
import argparse
import json
import re
import os
import uuid
import shutil
import sys
import requests
filterRe = re.compile(r'(?P<block>^%=(?P<mode>.)?\s+(?P<label>.*?)\s+(?P<value>[^\s\n$]+)(?:\s*.*?)?^(?P<section>.*?)^=%.*?$)', re.M | re.S)
subItemRe = re.compile(r'@_@')
def convertConfig(config):
keys = list(config.keys())
regexes = list(map(lambda x: re.compile(r"@%s@" % x, re.I), keys))
values = list(config.values())
subst = zip(keys, regexes, values)
subst = {key: {'regex': regex, 'value': value}
for (key, regex, value) in subst}
return subst
def substituteFile(infile, outfile, subst):
if infile == "stdin":
text = sys.stdin.read()
else:
with open(infile, "r") as f:
text = f.read()
print("Subtituting from %s to %s" % (infile, outfile))
for item in subst.values():
regex = item.get('regex', None)
repl = item.get('value', None)
if regex is None or repl is None:
continue
text = regex.sub(str(repl), text)
blocks = filterRe.findall(text)
for (block, mode, label, value, section) in blocks:
subvalue = subst.get(label.lower(), {}).get('value', None)
print(mode, label, value, subvalue)
if mode == '+' or mode == '':
if subvalue is not None and str(subvalue) != value:
section = ""
elif mode == '-':
if subvalue is None or str(subvalue) != value:
section = ""
elif mode == '?':
if subvalue is None:
section = ""
elif mode == '!':
if subvalue is not None:
section = ""
sections = ''
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subval in subvalue:
sections += subItemRe.sub(str(subval), section)
text = text.replace(block, sections)
with open(outfile, "w") as f:
f.write(text)
def copyfile(coin, infile, outfile=None):
if not os.path.exists(infile):
return
if not outfile:
outfile = infile
outfile = os.path.join("build", coin, outfile)
print("Copying %s to %s" % (infile, outfile))
shutil.copyfile(infile, outfile)
parser = argparse.ArgumentParser(description="Substitute in variables")
parser.add_argument('--coin', '-c', required=True, help="Which coin")
parser.add_argument('--nodaemon', '-D', action="store_false", dest="daemon",
help="Don't copy daemon")
parser.add_argument('--pool', '-p', action="store_true",
help="Grab pool wallet")
parser.add_argument('--explorer', '-e', action="store_true",
help="Use explorer")
args = parser.parse_args()
buildDir = os.path.join("build", args.coin)
# First read the config file
with open("config/%s.json" % args.coin, "r") as f:
config = json.load(f)
config = {key.lower(): value for (key, value) in config.items()}
if args.pool:
config["poolnode"] = 1
config.pop("grabwallet", None)
if args.explorer:
config['useexplorer'] = 1
else:
config['useexplorer'] = 0
subst = convertConfig(config)
if args.coin == 'coiniumserv' or args.coin == 'yiimp':
result = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4")
subst.update(convertConfig({"hostip": result.text}))
else:
# Create a config file
outconfig = {
"daemon": 1,
"dns": 1,
"server": 1,
"listen": 1,
"rpcport": config['rpcport'],
"rpcuser": "%srpc" % config['coinname'],
}
if not args.pool:
rpcallowip = "127.0.0.1"
rpcpassword = str(uuid.uuid4())
else:
rpcallowip = ["127.0.0.1", "172.17.0.*"]
rpcpassword = "pool-%s" % args.coin
outconfig["rpcallowip"] = rpcallowip
outconfig["rpcpassword"] = rpcpassword
addnodes = config.get('addnodes', [])
if not isinstance(addnodes, list):
addnodes = [addnodes]
if addnodes:
outconfig['addnode'] = addnodes
# Add the config setting to the mapping
subst.update(convertConfig(outconfig))
conffile = os.path.join(buildDir, "%s.conf" % config['coinname'])
with open(conffile, "w") as f:
for (key, values) in sorted(outconfig.items()):
if not isinstance(values, list):
values = [values]
for value in values:
f.write("%s=%s\n" % (key, value))
# Create the Dockerfile
if args.coin == 'coiniumserv':
infile = "Dockerfile.coiniumserv.in"
elif args.coin == 'yiimp':
infile = "Dockerfile.yiimp.in"
else:
infile = "Dockerfile.in"
outfile = os.path.join(buildDir, "Dockerfile")
substituteFile(infile, outfile, subst)
# Create the node run Dockerfile
infile = "Dockerfile.node.in"
if args.pool:
outfile = os.path.join(buildDir, "Dockerfile.pool")
elif args.explorer:
outfile = os.path.join(buildDir, "Dockerfile.explorer")
else:
outfile = os.path.join(buildDir, "Dockerfile.node")
substituteFile(infile, outfile, subst)
# Create the startup script
if args.coin == 'coiniumserv':
infile = "startup.sh-coiniumserv.in"
elif args.coin == 'yiimp':
infile = "startup.sh-yiimp.in"
else:
infile = "startup.sh.in"
if args.pool:
suffix = "-pool.sh"
else:
suffix = "-node.sh"
outfile = os.path.join(buildDir, "startup%s" % suffix)
substituteFile(infile, outfile, subst)
# Create the ports file
ports = []
port = config.get('p2pport', None)
if port:
ports.append(port)
port = config.get('explorerport', None)
useexplorer = config.get('useexplorer', None)
if port and useexplorer:
ports.append(port)
port = config.get('p2poolport', None)
usep2pool = config.get('usep2pool', None)
if port and usep2pool:
ports.append(port)
port = config.get('poolport', None)
if port:
ports.append(port)
if args.pool:
port = config.get("rpcport", None)
if port:
ports.append(port)
poolports = config.get('stratumports', None)
if poolports:
if not isinstance(poolports, list):
poolports = [poolports]
ports.extend(poolports)
ports = list(map(lambda x: "-p %s:%s" % (x, x), ports))
links = config.get('links', None)
if links:
links = list(map(lambda x: "--link %s" % x, links))
ports.extend(links)
ports = " ".join(ports)
outfile = os.path.join(buildDir, "ports.txt")
with open(outfile, "w") as f:
f.write(ports)
# Copy over the daemon
if args.daemon and args.coin != 'coiniumserv' and args.coin != 'yiimp':
infile = os.path.join("..", "build", "artifacts", config["coinname"],
"linux", config['daemonname'])
copyfile(args.coin, infile, config['daemonname'])
if config.get('installexplorer', False):
# Create the Explorer settings file
infile = "explorer-settings.json.in"
outfile = os.path.join(buildDir, "explorer-settings.json")
substituteFile(infile, outfile, subst)
# Create the Explorer layout template
infile = "explorer-layout.jade.in"
outfile = os.path.join(buildDir, "explorer-layout.jade")
substituteFile(infile, outfile, subst)
# Copy over the mongo init script and the crontab for explorer
copyfile(args.coin, "explorer.mongo")
copyfile(args.coin, "explorer-crontab")
## Copy the nodejs archive
copyfile(args.coin, "build/cache/node-v8.7.0-linux-x64.tar.xz",
"node-v8.7.0-linux-x64.tar.xz")
# Copy the sudoers.d file
copyfile(args.coin, "sudoers-coinnode")
# Copy the coin-cli script
copyfile(args.coin, "coin-cli")
if config.get('copyawscreds', False):
copyfile(args.coin, os.path.expanduser("~/.aws/credentials"),
"aws-credentials")
| 29.189394
| 140
| 0.616922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,061
| 0.267454
|
b842ca4df0f85a27ac428ca98c508bc0fd8473bb
| 379
|
py
|
Python
|
pages/page1.py
|
kalimuthu123/dash-app
|
90bf4c570abb1770ea0f082989e8f97d62b98346
|
[
"MIT"
] | null | null | null |
pages/page1.py
|
kalimuthu123/dash-app
|
90bf4c570abb1770ea0f082989e8f97d62b98346
|
[
"MIT"
] | null | null | null |
pages/page1.py
|
kalimuthu123/dash-app
|
90bf4c570abb1770ea0f082989e8f97d62b98346
|
[
"MIT"
] | null | null | null |
import dash_html_components as html
from utils import Header
def create_layout(app):
# Page layouts
return html.Div(
[
html.Div([Header(app)]),
# page 1
# add your UI here, and callbacks go at the bottom of app.py
# assets and .js go in assets folder
# csv or images go in data folder
],
)
| 25.266667
| 72
| 0.564644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.398417
|
b8431428845abd267d2447bb2c266f7ad3458a5b
| 318
|
py
|
Python
|
polrev/offices/admin/office_admin.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | 1
|
2021-12-10T05:54:16.000Z
|
2021-12-10T05:54:16.000Z
|
polrev/offices/admin/office_admin.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | null | null | null |
polrev/offices/admin/office_admin.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from offices.models import OfficeType, Office
class OfficeTypeAdmin(admin.ModelAdmin):
search_fields = ['title']
admin.site.register(OfficeType, OfficeTypeAdmin)
'''
class OfficeAdmin(admin.ModelAdmin):
search_fields = ['title']
admin.site.register(Office, OfficeAdmin)
'''
| 22.714286
| 48
| 0.77044
| 70
| 0.220126
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.386792
|
b84346e9d501185aa45dba40c444e9fe20860224
| 6,511
|
py
|
Python
|
tests/spec/test_schema_parser.py
|
tclh123/aio-openapi
|
7c63eb628b7735501508aea6c83e458715fb070b
|
[
"BSD-3-Clause"
] | 19
|
2019-03-04T22:50:38.000Z
|
2022-03-02T09:28:17.000Z
|
tests/spec/test_schema_parser.py
|
tclh123/aio-openapi
|
7c63eb628b7735501508aea6c83e458715fb070b
|
[
"BSD-3-Clause"
] | 4
|
2019-03-04T23:03:08.000Z
|
2022-01-16T11:32:54.000Z
|
tests/spec/test_schema_parser.py
|
tclh123/aio-openapi
|
7c63eb628b7735501508aea6c83e458715fb070b
|
[
"BSD-3-Clause"
] | 3
|
2020-05-20T17:43:08.000Z
|
2021-10-06T10:47:41.000Z
|
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Dict, List
import pytest
from openapi.data.fields import (
as_field,
bool_field,
data_field,
date_time_field,
number_field,
)
from openapi.exc import InvalidSpecException, InvalidTypeException
from openapi.spec import SchemaParser
def test_get_schema_ref():
@dataclass
class MyClass:
str_field: str = data_field(description="String field")
parser = SchemaParser()
schema_ref = parser.get_schema_info(MyClass)
assert schema_ref == {"$ref": "#/components/schemas/MyClass"}
assert "MyClass" in parser.schemas_to_parse
def test_schema2json():
@dataclass
class OtherClass:
str_field: str = data_field(description="String field")
@dataclass
class MyClass:
"""Test data"""
str_field: str = data_field(
required=True, format="uuid", description="String field"
)
int_field: int = data_field(format="uint64", description="Int field")
float_field: float = number_field(description="Float field")
boolean_field: bool = bool_field(description="Bool field")
map_field: Dict[str, int] = data_field(description="Dict field")
free_field: Dict[str, str] = data_field(description="Free field")
datetime_field: datetime = date_time_field(description="Datetime field")
ref_field: OtherClass = field(
metadata={"required": True, "description": "Ref field"}, default=None
)
list_ref_field: List[OtherClass] = data_field(description="List field")
parser = SchemaParser()
schema_json = parser.schema2json(MyClass)
expected = {
"type": "object",
"description": "Test data",
"properties": {
"str_field": {
"type": "string",
"format": "uuid",
"description": "String field",
},
"int_field": {
"type": "integer",
"format": "uint64",
"description": "Int field",
},
"float_field": {
"type": "number",
"format": "float",
"description": "Float field",
},
"boolean_field": {"type": "boolean", "description": "Bool field"},
"map_field": {
"type": "object",
"additionalProperties": {"type": "integer", "format": "int32"},
"description": "Dict field",
},
"free_field": {
"type": "object",
"additionalProperties": {"type": "string"},
"description": "Free field",
},
"datetime_field": {
"type": "string",
"format": "date-time",
"description": "Datetime field",
},
"ref_field": {
"$ref": "#/components/schemas/OtherClass",
"description": "Ref field",
},
"list_ref_field": {
"type": "array",
"items": {"$ref": "#/components/schemas/OtherClass"},
"description": "List field",
},
},
"required": ["str_field", "ref_field"],
"additionalProperties": False,
}
assert schema_json == expected
def test_field2json():
parser = SchemaParser([])
str_json = parser.field2json(str)
int_json = parser.field2json(int)
float_json = parser.field2json(float)
bool_json = parser.field2json(bool)
datetime_json = parser.field2json(datetime)
assert str_json == {"type": "string"}
assert int_json == {"type": "integer", "format": "int32"}
assert float_json == {"type": "number", "format": "float"}
assert bool_json == {"type": "boolean"}
assert datetime_json == {"type": "string", "format": "date-time"}
def test_field2json_format():
parser = SchemaParser([])
str_json = parser.field2json(as_field(str, format="uuid"))
int_json = parser.field2json(as_field(int, format="int64"))
assert str_json == {"type": "string", "format": "uuid"}
assert int_json == {"type": "integer", "format": "int64"}
def test_field2json_invalid_type():
class MyType:
pass
parser = SchemaParser()
with pytest.raises(InvalidTypeException):
parser.field2json(MyType)
def test_field2json_missing_description():
@dataclass
class MyClass:
desc_field: str = data_field(description="Valid field")
no_desc_field: str = data_field()
parser = SchemaParser(validate_docs=True)
with pytest.raises(InvalidSpecException):
parser.schema2json(MyClass)
def test_enum2json():
class MyEnum(Enum):
FIELD_1 = 0
FIELD_2 = 1
FIELD_3 = 2
parser = SchemaParser([])
json_type = parser.field2json(MyEnum)
assert json_type == {"type": "string", "enum": ["FIELD_1", "FIELD_2", "FIELD_3"]}
def test_list2json() -> None:
@dataclass
class MyClass:
list_field: List[str]
parser = SchemaParser()
info = parser.get_schema_info(MyClass)
assert info == {"$ref": "#/components/schemas/MyClass"}
assert len(parser.schemas_to_parse) == 1
parsed = parser.parsed_schemas()
myclass = parsed["MyClass"]
list_json = myclass["properties"]["list_field"]
assert list_json["type"] == "array"
assert list_json["items"] == {"type": "string"}
def test_field2json_again():
@dataclass
class MyClass:
str_field: str = field(
metadata={"format": "uuid", "description": "String field"}
)
int_field: int = number_field(
min_value=0, max_value=100, description="Int field"
)
parser = SchemaParser([])
fields = MyClass.__dataclass_fields__
str_json = parser.field2json(fields["str_field"])
int_json = parser.field2json(fields["int_field"])
assert str_json == {
"type": "string",
"format": "uuid",
"description": "String field",
}
assert int_json == {
"type": "integer",
"format": "int32",
"minimum": 0,
"maximum": 100,
"description": "Int field",
}
def test_non_string_keys():
@dataclass
class MyClass:
map_field: Dict[int, str] = data_field(description="Map field")
parser = SchemaParser()
with pytest.raises(InvalidTypeException):
parser.schema2json(MyClass)
| 30.283721
| 85
| 0.587775
| 1,562
| 0.239902
| 0
| 0
| 1,562
| 0.239902
| 0
| 0
| 1,605
| 0.246506
|
b8437331efb5465038081e91c134bce49f22a468
| 394
|
py
|
Python
|
models/losses/MSE.py
|
johnrachwan123/SNIP-it
|
a578a0693318f261492331298b6602de225fe21f
|
[
"MIT"
] | null | null | null |
models/losses/MSE.py
|
johnrachwan123/SNIP-it
|
a578a0693318f261492331298b6602de225fe21f
|
[
"MIT"
] | null | null | null |
models/losses/MSE.py
|
johnrachwan123/SNIP-it
|
a578a0693318f261492331298b6602de225fe21f
|
[
"MIT"
] | 1
|
2021-11-08T16:34:45.000Z
|
2021-11-08T16:34:45.000Z
|
import torch
from torch import nn
from models.GeneralModel import GeneralModel
class MSE(GeneralModel):
def __init__(self, device, l1_reg=0, lp_reg=0, **kwargs):
super(MSE, self).__init__(device, **kwargs)
self.loss = nn.MSELoss()
def forward(self, output=None, target=None, weight_generator=None, **kwargs):
return self.loss.forward(output, target.float())
| 28.142857
| 81
| 0.700508
| 312
| 0.791878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b845201c7741d5e90f7173c09fe9315087e66057
| 2,046
|
py
|
Python
|
svca_limix/limix/core/covar/test/test_categorical.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 65
|
2015-01-20T20:46:26.000Z
|
2021-06-27T14:40:35.000Z
|
svca_limix/limix/core/covar/test/test_categorical.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 29
|
2015-02-01T22:35:17.000Z
|
2017-08-07T08:18:23.000Z
|
svca_limix/limix/core/covar/test/test_categorical.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 35
|
2015-02-01T17:26:50.000Z
|
2019-09-13T07:06:16.000Z
|
"""LMM testing code"""
import unittest
import scipy as sp
import numpy as np
from limix.core.covar import CategoricalCov
from limix.utils.check_grad import mcheck_grad
class TestCategoricalLowRank(unittest.TestCase):
"""test class for CategoricalCov cov"""
def setUp(self):
sp.random.seed(1)
self.n = 30
categories = sp.random.choice(['a', 'b', 'c'], self.n)
self.rank =2
self.C = CategoricalCov(categories,self.rank)
self.name = 'categorical'
self.C.setRandomParams()
def test_grad(self):
def func(x, i):
self.C.setParams(x)
return self.C.K()
def grad(x, i):
self.C.setParams(x)
return self.C.K_grad_i(i)
x0 = self.C.getParams()
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0., decimal = 6)
# def test_param_activation(self):
# self.assertEqual(len(self.C.getParams()), 8)
# self.C.act_X = False
# self.assertEqual(len(self.C.getParams()), 0)
#
# self.C.setParams(np.array([]))
# with self.assertRaises(ValueError):
# self.C.setParams(np.array([0]))
#
# with self.assertRaises(ValueError):
# self.C.K_grad_i(0)
class TestCategoricalFreeForm(unittest.TestCase):
"""test class for Categorical cov"""
def setUp(self):
sp.random.seed(1)
self.n = 30
categories = sp.random.choice(['a', 'b', 'c'], self.n)
self.rank =None
self.C = CategoricalCov(categories,self.rank)
self.name = 'categorical'
self.C.setRandomParams()
def test_grad(self):
def func(x, i):
self.C.setParams(x)
return self.C.K()
def grad(x, i):
self.C.setParams(x)
return self.C.K_grad_i(i)
x0 = self.C.getParams()
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0., decimal = 6)
if __name__ == '__main__':
unittest.main()
| 26.921053
| 62
| 0.580645
| 1,826
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0
| 500
| 0.244379
|
b8459a275062134e5f40c7584623582d09c9aa02
| 4,274
|
py
|
Python
|
code/stephen/005/005.py
|
Stephen0910/python-practice-for-game-tester
|
e17b2666d18a51e5bff31ad0355ad4a6775191a4
|
[
"MIT"
] | 29
|
2019-03-07T03:03:42.000Z
|
2021-12-25T04:55:58.000Z
|
code/stephen/005/005.py
|
Stephen0910/python-practice-for-game-tester
|
e17b2666d18a51e5bff31ad0355ad4a6775191a4
|
[
"MIT"
] | null | null | null |
code/stephen/005/005.py
|
Stephen0910/python-practice-for-game-tester
|
e17b2666d18a51e5bff31ad0355ad4a6775191a4
|
[
"MIT"
] | 19
|
2019-03-11T02:40:37.000Z
|
2021-09-24T08:57:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/13 0013 下午 3:44
# @Author : Stephen
# @Site :
# @File : 005.py
# @Purpose :
# @Software : PyCharm
# @Copyright: (c) Stephen 2019
# @Licence : <your licence>
import os, re, time
from pyecharts import Line
"""
配置游戏包名和名字
"""
package_name = "archery.elite.shooting.free.game.android" # 配置测试包名
game_name = "游戏名字"
class Phone():
def __init__(self):
self.mem = os.popen("adb shell dumpsys meminfo %s" % package_name)
self.cpu = os.popen("adb shell cat /proc/6410/stat")
for i in self.mem.readlines():
if "MEMINFO" in i:
self.pid_info = i
break
self.mem.close()
try:
self.pid = int(self.pid_info.split(" ")[4])
print("pid:", self.pid)
except:
raise IOError("检查adb连接并启动游戏")
f = os.popen("adb shell cat /proc/stat")
data = f.readlines()
f.close()
count = 0
for i in data:
if "cpu" in i:
count += 1
else:
count -= 1
break
self.count = count
print("进程数:", self.count)
def cpu_test(self):
"""
测试cpu数据,总体数据根据乘以核
:return:
"""
def cpu_time():
f = os.popen("adb shell cat /proc/stat")
data = f.readlines()
f.close()
time_list = map(lambda x: int(x), data[0].split(" ")[2:-1])
return sum(time_list)
def thread_time():
z = os.popen("adb shell cat /proc/%s/stat" % self.pid)
data = z.readlines()[0].split(" ")
z.close()
processCPUtime = sum(map(lambda x: int(x), [data[13], data[14], data[15], data[16]]))
return processCPUtime
cpu_time1, thread_time1 = cpu_time(), thread_time()
cpu_time2, thread_time2 = cpu_time(), thread_time()
cpu_usage = 100 * (thread_time2 - thread_time1) / (cpu_time2 - cpu_time1)
print(cpu_usage)
return cpu_usage * self.count
def total_test(self, test_time, duration):
"""
测试pss和cpu
:param test_time: 测试时间,单位s
:param duration: 测试刷新间隔
:return: 时间list, psstotal数据, cpu数据的list
"""
i = 0
time_init = int(time.time())
time_end = time_init + test_time
current_time = int(time.time())
psslist, time_list, cpu_list = [], [], []
while current_time < time_end:
t = os.popen("adb shell dumpsys meminfo %s" % self.pid)
content = t.readlines()
t.close()
for item in content:
if "TOTAL" in item:
pss_info = item
break
cpu_info = float("%.2f" % self.cpu_test())
pss = float(re.findall("\d+\d|\d", pss_info)[0]) / 1000
psstotal = float("%.2f" % pss)
current_time = int(time.time())
# print ("测试倒计时:%s秒"%(current_time-time_init))
time_test = time.strftime("%H:%M:%S")
# time_test = time.strftime("%Y-%m-%d %H:%M:%S")
print(time_test, "PssTotal=", psstotal, "CPU=", cpu_info)
psslist.append(psstotal)
time_list.append(time_test)
cpu_list.append(cpu_info)
time.sleep(duration)
i += 1
maxlist = sorted(psslist, reverse=True)
average_pss = sum(psslist) / i
print("平均PssTotal", average_pss)
print("最高PssTotal", maxlist[0])
print("最低PSSTotal", maxlist[-1])
return [psslist, time_list, cpu_list]
def graphic(self, test_time=600, duration=2):
"""
作图,调用测试
:param test_time:测试时间,单位s
:param duration: 刷新间隔
:return:
"""
pss_list = self.total_test(test_time=test_time, duration=duration)
attr = pss_list[1]
v1 = pss_list[0]
v2 = pss_list[2]
line = Line(game_name)
line.add("PSS_total(M)", attr, v1, mark_point=["max"])
line.add("CPU(%)", attr, v2, mark_point=["max"])
line.render()
p = Phone()
p.graphic(20, 1)
# p.cpu_test()
# z = os.popen("adb shell cat /proc/15402/stat")
# print(z.readlines())
| 30.312057
| 97
| 0.523631
| 3,918
| 0.874554
| 0
| 0
| 0
| 0
| 0
| 0
| 1,353
| 0.302009
|
b846bfa9679bd871993a1750e2cf6c621e13bfac
| 13,470
|
py
|
Python
|
sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py
|
Tecnarca/whitenoise-system
|
9dfc1425bca77f6e30afe1eea253a6b580bfa847
|
[
"MIT"
] | 1
|
2021-12-30T15:21:54.000Z
|
2021-12-30T15:21:54.000Z
|
sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py
|
Tecnarca/whitenoise-system
|
9dfc1425bca77f6e30afe1eea253a6b580bfa847
|
[
"MIT"
] | null | null | null |
sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py
|
Tecnarca/whitenoise-system
|
9dfc1425bca77f6e30afe1eea253a6b580bfa847
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torch import optim
from torch.nn import functional
import torch.nn as nn
import torch.utils.data
from torch.nn import BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,Sigmoid
from torch.nn import functional as F
from opendp.smartnoise.synthesizers.base import SDGYMBaseSynthesizer
import ctgan
from ctgan.transformer import DataTransformer
from ctgan.conditional import ConditionalGenerator
from ctgan.models import Generator
from ctgan.sampler import Sampler
from ctgan import CTGANSynthesizer
import opacus
from opacus import autograd_grad_sample
from opacus import PrivacyEngine, utils
class Discriminator(Module):
def calc_gradient_penalty(self, real_data, fake_data, device='cpu', pac=10, lambda_=10):
alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device)
alpha = alpha.repeat(1, pac, real_data.size(1))
alpha = alpha.view(-1, real_data.size(1))
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = self(interpolates)
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True
)[0]
gradient_penalty = ((
gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1
) ** 2).mean() * lambda_
return gradient_penalty
def __init__(self, input_dim, dis_dims, loss, pack):
super(Discriminator, self).__init__()
torch.cuda.manual_seed(0)
torch.manual_seed(0)
dim = input_dim * pack
# print ('now dim is {}'.format(dim))
self.pack = pack
self.packdim = dim
seq = []
for item in list(dis_dims):
seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)]
dim = item
seq += [Linear(dim, 1)]
if loss == 'cross_entropy':
seq += [Sigmoid()]
self.seq = Sequential(*seq)
def forward(self, input):
assert input.size()[0] % self.pack == 0
return self.seq(input.view(-1, self.packdim))
# custom for calcuate grad_sample for multiple loss.backward()
def _custom_create_or_extend_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, batch_dim: int
) -> None:
"""
Create a 'grad_sample' attribute in the given parameter, or accumulate it
if the 'grad_sample' attribute already exists.
This custom code will not work when using optimizer.virtual_step()
"""
#print ("now this happen")
if hasattr(param, "grad_sample"):
param.grad_sample = param.grad_sample + grad_sample
#param.grad_sample = torch.cat((param.grad_sample, grad_sample), batch_dim)
else:
param.grad_sample = grad_sample
class DPCTGAN(CTGANSynthesizer):
"""Differential Private Conditional Table GAN Synthesizer
This code adds Differential Privacy to CTGANSynthesizer from https://github.com/sdv-dev/CTGAN
"""
def __init__(self,
embedding_dim=128,
gen_dim=(256, 256),
dis_dim=(256, 256),
l2scale=1e-6,
batch_size=500,
epochs=300,
pack=1,
log_frequency=True,
disabled_dp=False,
target_delta=None,
sigma = 5,
max_per_sample_grad_norm=1.0,
epsilon = 1,
verbose=True,
loss = 'cross_entropy'):
# CTGAN model specific parameters
self.embedding_dim = embedding_dim
self.gen_dim = gen_dim
self.dis_dim = dis_dim
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.pack=pack
self.log_frequency = log_frequency
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# opacus parameters
self.sigma = sigma
self.disabled_dp = disabled_dp
self.target_delta = target_delta
self.max_per_sample_grad_norm = max_per_sample_grad_norm
self.epsilon = epsilon
self.epsilon_list = []
self.alpha_list = []
self.loss_d_list = []
self.loss_g_list = []
self.verbose=verbose
self.loss=loss
if self.loss != "cross_entropy":
# Monkeypatches the _create_or_extend_grad_sample function when calling opacus
opacus.supported_layers_grad_samplers._create_or_extend_grad_sample = _custom_create_or_extend_grad_sample
def train(self, data, categorical_columns=None, ordinal_columns=None, update_epsilon=None):
if update_epsilon:
self.epsilon = update_epsilon
self.transformer = DataTransformer()
self.transformer.fit(data, discrete_columns=categorical_columns)
train_data = self.transformer.transform(data)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dimensions
self.cond_generator = ConditionalGenerator(train_data, self.transformer.output_info, self.log_frequency)
self.generator = Generator(
self.embedding_dim + self.cond_generator.n_opt,
self.gen_dim,
data_dim).to(self.device)
discriminator = Discriminator(
data_dim + self.cond_generator.n_opt,
self.dis_dim,
self.loss,
self.pack).to(self.device)
optimizerG = optim.Adam(
self.generator.parameters(), lr=2e-4, betas=(0.5, 0.9), weight_decay=self.l2scale)
optimizerD = optim.Adam(discriminator.parameters(), lr=2e-4, betas=(0.5, 0.9))
privacy_engine = opacus.PrivacyEngine(
discriminator,
batch_size=self.batch_size,
sample_size=train_data.shape[0],
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=self.sigma,
max_grad_norm=self.max_per_sample_grad_norm,
clip_per_layer=True
)
if not self.disabled_dp:
privacy_engine.attach(optimizerD)
one = torch.tensor(1, dtype=torch.float).to(self.device)
mone = one * -1
REAL_LABEL = 1
FAKE_LABEL = 0
criterion = nn.BCELoss()
assert self.batch_size % 2 == 0
mean = torch.zeros(self.batch_size, self.embedding_dim, device=self.device)
std = mean + 1
steps_per_epoch = len(train_data) // self.batch_size
for i in range(self.epochs):
for id_ in range(steps_per_epoch):
fakez = torch.normal(mean=mean, std=std)
condvec = self.cond_generator.sample(self.batch_size)
if condvec is None:
c1, m1, col, opt = None, None, None, None
real = data_sampler.sample(self.batch_size, col, opt)
else:
c1, m1, col, opt = condvec
c1 = torch.from_numpy(c1).to(self.device)
m1 = torch.from_numpy(m1).to(self.device)
fakez = torch.cat([fakez, c1], dim=1)
perm = np.arange(self.batch_size)
np.random.shuffle(perm)
real = data_sampler.sample(self.batch_size, col[perm], opt[perm])
c2 = c1[perm]
fake = self.generator(fakez)
fakeact = self._apply_activate(fake)
real = torch.from_numpy(real.astype('float32')).to(self.device)
if c1 is not None:
fake_cat = torch.cat([fakeact, c1], dim=1)
real_cat = torch.cat([real, c2], dim=1)
else:
real_cat = real
fake_cat = fake
optimizerD.zero_grad()
if self.loss == 'cross_entropy':
y_fake = discriminator(fake_cat)
# print ('y_fake is {}'.format(y_fake))
label_fake = torch.full((int(self.batch_size/self.pack),), FAKE_LABEL, dtype=torch.float, device=self.device)
# print ('label_fake is {}'.format(label_fake))
errD_fake = criterion(y_fake, label_fake)
errD_fake.backward()
optimizerD.step()
# train with real
label_true = torch.full((int(self.batch_size/self.pack),), REAL_LABEL, dtype=torch.float, device=self.device)
y_real = discriminator(real_cat)
errD_real = criterion(y_real, label_true)
errD_real.backward()
optimizerD.step()
loss_d = errD_real + errD_fake
else:
y_fake = discriminator(fake_cat)
mean_fake = torch.mean(y_fake)
mean_fake.backward(one)
y_real = discriminator(real_cat)
mean_real = torch.mean(y_real)
mean_real.backward(mone)
optimizerD.step()
loss_d = -(mean_real - mean_fake)
max_grad_norm = []
for p in discriminator.parameters():
param_norm = p.grad.data.norm(2).item()
max_grad_norm.append(param_norm)
#pen = calc_gradient_penalty(discriminator, real_cat, fake_cat, self.device)
#pen.backward(retain_graph=True)
#loss_d.backward()
#optimizerD.step()
fakez = torch.normal(mean=mean, std=std)
condvec = self.cond_generator.sample(self.batch_size)
if condvec is None:
c1, m1, col, opt = None, None, None, None
else:
c1, m1, col, opt = condvec
c1 = torch.from_numpy(c1).to(self.device)
m1 = torch.from_numpy(m1).to(self.device)
fakez = torch.cat([fakez, c1], dim=1)
fake = self.generator(fakez)
fakeact = self._apply_activate(fake)
if c1 is not None:
y_fake = discriminator(torch.cat([fakeact, c1], dim=1))
else:
y_fake = discriminator(fakeact)
#if condvec is None:
cross_entropy = 0
#else:
# cross_entropy = self._cond_loss(fake, c1, m1)
if self.loss=='cross_entropy':
label_g = torch.full((int(self.batch_size/self.pack),), REAL_LABEL,
dtype=torch.float, device=self.device)
#label_g = torch.full(int(self.batch_size/self.pack,),1,device=self.device)
loss_g = criterion(y_fake, label_g)
loss_g = loss_g + cross_entropy
else:
loss_g = -torch.mean(y_fake) + cross_entropy
optimizerG.zero_grad()
loss_g.backward()
optimizerG.step()
if not self.disabled_dp:
#if self.loss == 'cross_entropy':
# autograd_grad_sample.clear_backprops(discriminator)
#else:
for p in discriminator.parameters():
if hasattr(p, "grad_sample"):
del p.grad_sample
if self.target_delta is None:
self.target_delta = 1/train_data.shape[0]
epsilon, best_alpha = optimizerD.privacy_engine.get_privacy_spent(self.target_delta)
self.epsilon_list.append(epsilon)
self.alpha_list.append(best_alpha)
#if self.verbose:
if not self.disabled_dp:
if self.epsilon < epsilon:
break
self.loss_d_list.append(loss_d)
self.loss_g_list.append(loss_g)
if self.verbose:
print("Epoch %d, Loss G: %.4f, Loss D: %.4f" %
(i + 1, loss_g.detach().cpu(), loss_d.detach().cpu()),
flush=True)
print ('epsilon is {e}, alpha is {a}'.format(e=epsilon, a = best_alpha))
return self.loss_d_list, self.loss_g_list, self.epsilon_list, self.alpha_list
def generate(self, n):
self.generator.eval()
#output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
mean = torch.zeros(self.batch_size, self.embedding_dim)
std = mean + 1
fakez = torch.normal(mean=mean, std=std).to(self.device)
condvec = self.cond_generator.sample_zero(self.batch_size)
if condvec is None:
pass
else:
c1 = condvec
c1 = torch.from_numpy(c1).to(self.device)
fakez = torch.cat([fakez, c1], dim=1)
fake = self.generator(fakez)
fakeact = self._apply_activate(fake)
data.append(fakeact.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
data = data[:n]
return self.transformer.inverse_transform(data, None)
| 36.307278
| 129
| 0.566592
| 12,153
| 0.902227
| 0
| 0
| 0
| 0
| 0
| 0
| 1,459
| 0.108315
|
b846da72c1b90ad2cd7931c2938c866fd817d9f6
| 1,814
|
py
|
Python
|
client/client.py
|
MasonDiGi/chat_server
|
2100eb012f8bce359b51e0dc8684a82949ba1c17
|
[
"MIT"
] | null | null | null |
client/client.py
|
MasonDiGi/chat_server
|
2100eb012f8bce359b51e0dc8684a82949ba1c17
|
[
"MIT"
] | null | null | null |
client/client.py
|
MasonDiGi/chat_server
|
2100eb012f8bce359b51e0dc8684a82949ba1c17
|
[
"MIT"
] | null | null | null |
import socket
import threading
import time
# Create constants
HEADER = 64
PORT = 5050
FORMAT = 'utf-8'
DC_MSG = "!DISCONNECT"
SERVER = "localhost"
ADDR = (SERVER, PORT)
# Set up client var and connect to the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
erase = '\x1b[1A\x1b[K'
# Handles sending a message to the server
def send(sendMsg):
# Encode and create header
message = sendMsg.encode(FORMAT)
msg_len = len(message)
send_len = str(msg_len).encode(FORMAT)
send_len += b' ' * (HEADER - len(send_len))
client.send(send_len)
# Send the actual text
client.send(message)
# A thread to handle receiving messages broadcast from the server
def recvThread():
try:
# Wait for a message from the server and then decode and print it, while keeping the prompt on the same line
while True:
msg_len = client.recv(HEADER).decode(FORMAT)
if msg_len:
msg_len = int(msg_len)
recvMsg = client.recv(msg_len).decode(FORMAT)
print(f"\n{erase}{recvMsg}\n[{uname}]: ", end="")
except Exception as e:
return e
# Main thread
try:
# Send initial message to set up username
uname = input("Enter a username: ")
send(uname)
# Start handling received messages
RECVTHREAD = threading.Thread(target=recvThread)
RECVTHREAD.start()
# Handle the prompt and sending messages
while True:
msg = input(f"[{uname}]: ")
send(msg)
print("\x1b[A\x1b[K", end="")
if msg == DC_MSG:
break
# Close everything if ctrl+c is pressed
finally:
send(DC_MSG)
time.sleep(0.5)
client.close()
print("\ngoodbye")
exit()
| 26.676471
| 117
| 0.615215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 651
| 0.358875
|
b8481e8c9248a5d340e323d7d3c83d87b3a95b6f
| 9,183
|
py
|
Python
|
src/cogs/ide/dialogs/edit_view.py
|
osam7a/Jarvide
|
9a4424c293ae40b21968b5118f60862860ff5247
|
[
"MIT"
] | null | null | null |
src/cogs/ide/dialogs/edit_view.py
|
osam7a/Jarvide
|
9a4424c293ae40b21968b5118f60862860ff5247
|
[
"MIT"
] | null | null | null |
src/cogs/ide/dialogs/edit_view.py
|
osam7a/Jarvide
|
9a4424c293ae40b21968b5118f60862860ff5247
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import disnake
from disnake.ext import commands
from typing import TYPE_CHECKING
from src.utils.utils import EmbedFactory, ExitButton, SaveButton, add_lines, get_info
if TYPE_CHECKING:
from src.utils import File
def clear_codeblock(content: str):
content.strip("\n")
if content.startswith("```"):
content = "\n".join(content.splitlines()[1:])
if content.endswith("```"):
content = content[:-3]
if "`" in content:
content.replace("`", "\u200b")
return content
def update_buttons(cls: EditView):
if cls.page == 1:
cls.previous_button.disabled = True
else:
cls.previous_button.disabled = False
if cls.page == len(cls.pages) - 2:
cls.next_button.disabled = True
else:
cls.next_button.disabled = False
class EditView(disnake.ui.View):
async def interaction_check(self, interaction: disnake.MessageInteraction) -> bool:
return (
interaction.author == self.ctx.author
and interaction.channel == self.ctx.channel
)
def __init__(
self,
ctx,
file_: "File",
bot_message=None,
file_view=None,
lines: list[str] = None,
):
super().__init__()
self.ctx = ctx
self.bot = ctx.bot
self.file = file_
self.content = file_.content
self.bot_message = bot_message
self.file_view = file_view
self.undo = self.file_view.file.undo
self.redo = self.file_view.file.redo
self.pages = [lines[x : x + 50] for x in range(0, len(lines), 50)]
self.page = 0
self.SUDO = self.ctx.me.guild_permissions.manage_messages
self.add_item(ExitButton(ctx, bot_message, row=3))
self.add_item(SaveButton(ctx, bot_message, file_, row=2))
async def edit(self, inter):
await inter.response.defer()
await self.bot_message.edit(
embed=EmbedFactory.code_embed(
self.ctx,
"".join(add_lines(self.file_view.file.content)),
self.file.filename,
),
)
@disnake.ui.button(label="Write", style=disnake.ButtonStyle.gray)
async def write_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
...
@disnake.ui.button(label="Replace", style=disnake.ButtonStyle.gray)
async def replace_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
await interaction.response.send_message(
"**Format:**\n[line number]\n```py\n<code>\n```**Example:**"
"\n12-25\n```py\nfor i in range(10):\n\tprint('foo')\n```"
"\n`[Click save to see the result]`",
ephemeral=True,
)
content: str = (
await self.ctx.bot.wait_for(
"message",
check=lambda m: m.author == interaction.author
and m.channel == interaction.channel,
)
).content
if content[0].isdigit():
line_no = content.splitlines()[0]
if "-" in line_no:
from_, to = (
int(line_no.split("-")[0]) - 1,
int(line_no.split("-")[1]) - 1,
)
else:
from_, to = int(line_no) - 1, int(line_no) - 1
code = clear_codeblock("\n".join(content.splitlines()[1:]))
else:
from_, to = 0, len(self.file_view.file.content) - 1
code = clear_codeblock(content)
self.undo.append(self.content)
sliced = self.file_view.file.content.splitlines()
del sliced[from_ : to + 1]
sliced.insert(from_, code)
self.file_view.file.content = "\n".join(sliced)
@disnake.ui.button(label="Append", style=disnake.ButtonStyle.gray)
async def append_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
await interaction.response.send_message(
"Type something... (This will append your code with a new line) `[Click save to see the result]`",
ephemeral=True,
)
self.undo.append(self.file_view.file.content)
self.file_view.file.content += "\n" + clear_codeblock(
(
await self.ctx.bot.wait_for(
"message",
check=lambda m: m.author == interaction.author
and m.channel == interaction.channel,
)
).content
)
@disnake.ui.button(label="Rename", style=disnake.ButtonStyle.grey)
async def rename_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
await interaction.response.send_message(
"What would you like the filename to be?", ephemeral=True
)
filename = await self.bot.wait_for(
"message",
check=lambda m: self.ctx.author == m.author
and m.channel == self.ctx.channel,
)
if len(filename.content) > 12:
if self.SUDO:
await filename.delete()
return await interaction.channel.send(
"That filename is too long! The maximum limit is 12 character"
)
file_ = File(filename=filename, content=self.file.content, bot=self.bot)
description = await get_info(file_)
self.file = file_
self.extension = file_.filename.split(".")[-1]
embed = EmbedFactory.ide_embed(self.ctx, description)
await self.bot_message.edit(embed=embed)
@disnake.ui.button(label="Prev", style=disnake.ButtonStyle.blurple, row=2)
async def previous_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
await interaction.response.defer()
update_buttons(self)
self.page -= 1
embed = (
disnake.Embed(
description=f"```py\n{''.join(self.pages[self.page])}\n```\nPage: {self.page + 1}/{len(self.pages)}",
timestamp=self.ctx.message.created_at,
)
.set_author(
name=f"{self.ctx.author.name}'s automated paginator for {self.file.filename}",
icon_url=self.ctx.author.avatar.url,
)
.set_footer(text="The official jarvide text editor and ide")
)
await self.bot_message.edit(embed=embed, view=self)
@disnake.ui.button(label="Next", style=disnake.ButtonStyle.blurple, row=2)
async def next_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
await interaction.response.defer()
update_buttons(self)
self.page += 1
embed = (
disnake.Embed(
description=f"```py\n{''.join(self.pages[self.page])}\n```\nPage: {self.page + 1}/{len(self.pages)}",
timestamp=self.ctx.message.created_at,
)
.set_author(
name=f"{self.ctx.author.name}'s automated paginator for {self.file.filename}",
icon_url=self.ctx.author.avatar.url,
)
.set_footer(text="The official jarvide text editor and ide")
)
await self.bot_message.edit(embed=embed, view=self)
@disnake.ui.button(label="Undo", style=disnake.ButtonStyle.blurple, row=2)
async def undo_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
if not self.undo:
return await interaction.response.send_message(
"You have made no changes and have nothing to undo!", ephemeral=True
)
self.redo.append(self.file_view.file.content)
self.file_view.file.content = self.undo.pop(-1)
await self.edit(interaction)
@disnake.ui.button(label="Redo", style=disnake.ButtonStyle.blurple, row=2)
async def redo_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
if not self.redo:
return await interaction.response.send_message(
"You have made no changes and have nothing to undo!", ephemeral=True
)
self.undo.append(self.file_view.file.content)
self.file_view.file.content = self.redo.pop(-1)
await self.edit(interaction)
@disnake.ui.button(label="Clear", style=disnake.ButtonStyle.danger, row=3)
async def clear_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
self.undo.append(self.file_view.file.content)
self.file_view.file.content = ""
await self.edit(interaction)
@disnake.ui.button(label="Back", style=disnake.ButtonStyle.danger, row=3)
async def settings_button(
self, button: disnake.ui.Button, interaction: disnake.MessageInteraction
):
embed = EmbedFactory.ide_embed(self.ctx, await get_info(self.file))
self.undo = []
self.redo = []
await self.bot_message.edit(embed=embed, view=self.file_view)
def setup(bot: commands.Bot):
pass
| 35.871094
| 117
| 0.597299
| 8,302
| 0.904062
| 0
| 0
| 6,932
| 0.754873
| 6,689
| 0.728411
| 1,024
| 0.11151
|
b8493d2511af44620ab30010ea879f211db8a17b
| 11,878
|
py
|
Python
|
modules/administrator.py
|
Gaeta/Delta
|
c76e149d0c17e025fe2648964e2512440fc0b4c7
|
[
"MIT"
] | 1
|
2021-07-04T10:34:11.000Z
|
2021-07-04T10:34:11.000Z
|
modules/administrator.py
|
Gaeta/Delta
|
c76e149d0c17e025fe2648964e2512440fc0b4c7
|
[
"MIT"
] | null | null | null |
modules/administrator.py
|
Gaeta/Delta
|
c76e149d0c17e025fe2648964e2512440fc0b4c7
|
[
"MIT"
] | null | null | null |
import discord, sqlite3, asyncio, utils, re
from discord.ext import commands
from datetime import datetime
TIME_REGEX = re.compile("(?:(\d{1,5})\s?(h|hours|hrs|hour|hr|s|seconds|secs|sec|second|m|mins|minutes|minute|min|d|days|day))+?")
TIME_DICT = {"h": 3600, "s": 1, "m": 60, "d": 86400}
class TimeConverter(commands.Converter):
async def convert(self, argument):
if argument is None:
return 0
args = argument.lower()
matches = re.findall(TIME_REGEX, args)
time = 0
for v, k in matches:
try:
for key in ("h", "s", "m", "d"):
if k.startswith(key):
k = key
break
time += TIME_DICT[k]*float(v)
except KeyError:
raise commands.BadArgument("{} is an invalid time-key! h/m/s/d are valid!".format(k))
except ValueError:
raise commands.BadArgument("{} is not a number!".format(v))
return time
class AdministratorCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(usage="poll <ping> <question> | <answer 1> | <answer2...>")
@utils.guild_only()
@utils.is_admin()
@commands.bot_has_permissions(manage_roles=True)
@commands.cooldown(1, 60, commands.BucketType.guild)
async def poll(self, ctx, ping_member, *, args):
"""Creates a poll with up to 5 answers."""
ping = ping_member.lower()
if ping not in ("yes", "no", "true", "false", "y", "n", "t", "f"):
return await utils.embed(ctx, discord.Embed(title="Poll Failed", description=f"Sorry, the `ping_member` argument should be \"Yes\" or \"No\". Please use `{self.bot.config.prefix}help poll` for more information."), error=True)
if ping in ("yes", "y", "true", "t"):
ping = True
if ping in ("no", "n", "no", "n"):
ping = False
ques_ans = args.split(" | ")
if len(ques_ans) <= 2:
return await utils.embed(ctx, discord.Embed(title="Poll Failed", description=f"Sorry, the `args` argument should be follow this syntax: `question | answer 1 | answer 2...`."), error=True)
question = ques_ans[0]
answers = ques_ans[1:6]
channel_id = self.bot.config.channels.announcements
channel = self.bot.get_channel(channel_id)
if channel is None:
return await utils.embed(ctx, discord.Embed(title="Poll Failed", description=f"Sorry, the `announcements` channel hasn't been configured."), error=True)
reactions = []
text = ""
i = 1
for answer in answers:
react = {1: "1\u20e3", 2: "2\u20e3", 3: "3\u20e3", 4: "4\u20e3", 5: "5\u20e3"}[i]
reactions.append(react)
text += f"{react} {answers[i-1]}\n\n"
i += 1
embed = await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Server Poll", description=f"**{question}**\n\n{text}").set_footer(text=f"Poll by {ctx.author}"), send=False)
if ping:
ping_role = utils.get_ping_role(ctx)
if ping_role != ctx.guild.default_role:
if not ping_role.mentionable:
edited = False
try:
await ping_role.edit(mentionable=True)
edited = True
except discord.Forbidden:
return await utils.embed(ctx, discord.Embed(title="Poll Failed", description=f"I do not have permission to **edit** {ping_role.mention}."), error=True)
try:
message = await channel.send(ping_role.mention, embed=embed)
await utils.embed(ctx, discord.Embed(title="Poll Created", description=f"Your poll was successfully posted in {channel.mention}."), error=True)
for r in reactions:
await message.add_reaction(r)
except:
if channel.permissions_for(ctx.guild.me).add_reactions is False:
issue = f"I do not have permission to **add reactions** in <#{channel.mention}>."
if channel.permissions_for(ctx.guild.me).send_messages is False:
issue = f"I do not have permission to **send messages** in <#{channel.mention}>."
return await utils.embed(ctx, discord.Embed(title="Poll Failed", description=issue), error=True)
if edited:
await ping_role.edit(mentionable=False)
return
try:
message = await channel.send(content="@everyone" if ping else None, embed=embed)
await utils.embed(ctx, discord.Embed(title="Poll Created", description=f"Your poll was successfully posted in {channel.mention}."), error=True)
for r in reactions:
await message.add_reaction(r)
except:
if channel.permissions_for(ctx.guild.me).add_reactions is False:
issue = f"I do not have permission to **add reactions** in <#{channel.mention}>."
if channel.permissions_for(ctx.guild.me).send_messages is False:
issue = f"I do not have permission to **send messages** in <#{channel.mention}>."
await utils.embed(ctx, discord.Embed(title="Poll Failed", description=issue), error=True)
@commands.command(usage="announce <ping> <announcement>")
@utils.guild_only()
@utils.is_admin()
async def announce(self, ctx, ping_member, *, announcement):
"""Creates an announcement."""
ping = ping_member.lower()
if ping not in ("yes", "no", "true", "false", "y", "n", "t", "f"):
return await utils.embed(ctx, discord.Embed(title="Announcement Failed", description=f"Sorry, the `ping_member` argument should be \"Yes\" or \"No\". Please use `{self.bot.config.prefix}help announce` for more information."), error=True)
if ping in ("yes", "y", "true", "t"):
ping = True
if ping in ("no", "n", "no", "n"):
ping = False
channel_id = self.bot.config.channels.announcements
channel = self.bot.get_channel(channel_id)
if channel is None:
return await utils.embed(ctx, discord.Embed(title="Announcement Failed", description=f"Sorry, the `announcements` channel hasn't been configured."), error=True)
if ping:
ping_role = utils.get_ping_role(ctx)
if ping_role != ctx.guild.default_role:
if not ping_role.mentionable:
edited = False
try:
await ping_role.edit(mentionable=True)
edited = True
except discord.Forbidden:
return await utils.embed(ctx, discord.Embed(title="Announcement Failed", description=f"I do not have permission to **edit** {ping_role.mention}."), error=True)
try:
await channel.send(f"{ping_role.mention}\n{announcement}")
await utils.embed(ctx, discord.Embed(title="Announcement Sent", description=f"Your announcement was successfully posted in {channel.mention}."), error=True)
except:
if channel.permissions_for(ctx.guild.me).send_messages is False:
issue = f"I do not have permission to **send messages** in <#{channel.mention}>."
return await utils.embed(ctx, discord.Embed(title="Announcement Failed", description=issue), error=True)
if edited:
await ping_role.edit(mentionable=False)
return
try:
await channel.send("@everyone\n" if ping else "" + announcement)
await utils.embed(ctx, discord.Embed(title="Announcement Sent", description=f"Your announcement was successfully posted in {channel.mention}."), error=True)
except:
if channel.permissions_for(ctx.guild.me).send_messages is False:
issue = f"I do not have permission to **send messages** in <#{channel.mention}>."
await utils.embed(ctx, discord.Embed(title="Poll Failed", description=issue), error=True)
@commands.command(aliases=["resetcase"], usage="resetid")
@utils.guild_only()
@utils.is_admin()
async def resetid(self, ctx):
"""Resets the case ID."""
with sqlite3.connect(self.bot.config.database) as db:
db.cursor().execute("UPDATE Settings SET Case_ID='0'")
db.cursor().execute("DELETE FROM Cases")
db.commit()
await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Data Wiped", description="All case data has been successfully cleared."))
@commands.command(aliases=["reloadconfig"], usage="reload")
@utils.guild_only()
@utils.is_admin()
async def reload(self, ctx):
"""Reloads the config file."""
del self.bot.config
self.bot.config = utils.Config()
await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Config Reloaded", description="All config data has been successfully reloaded."))
@commands.command(usage="lockdown [time]")
@utils.guild_only()
@commands.bot_has_permissions(manage_channels=True)
@utils.is_admin()
async def lockdown(self, ctx, *, time=None):
"""Locks or unlocks a channel for a specified amount of time."""
member_role = utils.get_member_role(ctx)
ows = ctx.channel.overwrites_for(member_role)
if ows.read_messages is False:
return await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Lockdown Failed", description=f"Sorry, I can only lock channels that can be seen by {member_role.mention if member_role != ctx.guild.default_role else member_role}."), error=True)
if ows.send_messages is False:
await ctx.channel.set_permissions(member_role, send_messages=None)
await ctx.channel.set_permissions(ctx.guild.me, send_messages=None)
return await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Lockdown Deactivated", description=f"Lockdown has been lifted by **{ctx.author}**."))
if ows.send_messages in (True, None):
seconds = await TimeConverter().convert(time)
await ctx.channel.set_permissions(member_role, send_messages=False)
await ctx.channel.set_permissions(ctx.guild.me, send_messages=True)
if seconds < 1:
return await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Lockdown Activated", description=f"Lockdown has been activated by **{ctx.author}**."))
await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Lockdown Activated", description=f"Lockdown has been activated by **{ctx.author}** for {utils.display_time(round(seconds), 4)}."))
await asyncio.sleep(seconds)
ows = ctx.channel.overwrites_for(member_role)
if ows.send_messages is False:
await ctx.channel.set_permissions(member_role, send_messages=None)
await ctx.channel.set_permissions(ctx.guild.me, send_messages=None)
return await utils.embed(ctx, discord.Embed(timestamp=datetime.utcnow(), title="Lockdown Deactivated", description=f"Lockdown has been lifted."))
def setup(bot):
bot.add_cog(AdministratorCommands(bot))
| 46.217899
| 272
| 0.594124
| 11,509
| 0.968934
| 0
| 0
| 10,583
| 0.890975
| 10,596
| 0.892069
| 3,039
| 0.255851
|
b84a30c58e64eb7a73321b156d6da42908f33f1f
| 23,650
|
py
|
Python
|
models/feature_extraction/gcn_resnest.py
|
hoangtuanvu/rad_chestxray
|
b29c2bf98ae41d85258b21674e8826847a0cc647
|
[
"MIT"
] | 2
|
2020-09-07T00:06:41.000Z
|
2020-09-29T07:08:24.000Z
|
models/feature_extraction/gcn_resnest.py
|
hoangtuanvu/rad_chestxray
|
b29c2bf98ae41d85258b21674e8826847a0cc647
|
[
"MIT"
] | 7
|
2020-09-25T22:12:53.000Z
|
2021-08-25T16:06:24.000Z
|
models/feature_extraction/gcn_resnest.py
|
hoangtuanvu/rad_chestxray
|
b29c2bf98ae41d85258b21674e8826847a0cc647
|
[
"MIT"
] | null | null | null |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""ResNet variants"""
import os
import math
import torch
import numpy as np
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from models.attention_map import SEModule, SpatialCGNL, SAModule
from models.feature_extraction.splat import SplAtConv2d
from models.utils import gen_adj_num, gen_adj
from models.common import conv1x1
_url_format = 'https://hangzh.s3.amazonaws.com/encoding/models/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in
[('528c19ca', 'resnest50'), ('22405ba7', 'resnest101'), ('75117900', 'resnest200'),
('0cc87c48', 'resnest269'), ]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for name in
_model_sha256.keys()}
__all__ = ['ResNet', 'Bottleneck']
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1,
bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False,
rectified_conv=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0,
last_gamma=False, use_se=False):
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplAtConv2d(group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=cardinality,
bias=False, radix=radix, rectify=rectified_conv,
rectify_avg=rectify_avg, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=cardinality,
bias=False, average_mode=rectify_avg)
self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=cardinality,
bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
self.use_se = use_se
if use_se:
self.se = SEModule(planes * 4)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.use_se:
out = self.se(out) + residual
else:
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet Variants
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64, num_classes=1000,
dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False,
rectified_conv=False, rectify_avg=False, avd=False, avd_first=False,
final_drop=0.0, dropblock_prob=0, last_gamma=False, use_se=False, in_channels=300,
word_file='/workspace/Projects/cxr/models/feature_extraction/diseases_embeddings.npy',
# word_file='diseases_embeddings.npy',
# word_file='/home/hoangvu/Projects/cxr/models/feature_extraction/diseases_embeddings.npy',
extract_fields='0,1,2,3,4,5', agree_rate=0.5, csv_path='',
norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width * 2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
self.use_se = use_se
super(ResNet, self).__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False,
**conv_kwargs), norm_layer(stem_width), nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False,
**conv_kwargs), norm_layer(stem_width), nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1,
bias=False, **conv_kwargs), )
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False,
**conv_kwargs)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2,
norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4,
norm_layer=norm_layer, dropblock_prob=dropblock_prob)
elif dilation == 2:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilation=1,
norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2,
norm_layer=norm_layer, dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
num_classes = len(extract_fields.split(','))
_adj = gen_adj_num(labels=extract_fields, agree_rate=agree_rate, csv_path=csv_path)
self.adj = Parameter(torch.from_numpy(_adj).float())
if not os.path.exists(word_file):
word = np.random.randn(num_classes, 300)
print('graph input: random')
else:
with open(word_file, 'rb') as point:
word = np.load(point)
print('graph input: loaded from {}'.format(word_file))
self.word = Parameter(torch.from_numpy(word).float())
self.gc0 = GraphConvolution(in_channels, 128, bias=True)
self.gc1 = GraphConvolution(128, 256, bias=True)
self.gc2 = GraphConvolution(256, 512, bias=True)
self.gc3 = GraphConvolution(512, 1024, bias=True)
self.gc4 = GraphConvolution(1024, 2048, bias=True)
self.gc_relu = nn.LeakyReLU(0.2)
self.gc_tanh = nn.Tanh()
self.merge_conv0 = nn.Conv2d(num_classes, 128, kernel_size=1, stride=1, bias=False)
self.merge_conv1 = nn.Conv2d(num_classes, 256, kernel_size=1, stride=1, bias=False)
self.merge_conv2 = nn.Conv2d(num_classes, 512, kernel_size=1, stride=1, bias=False)
self.merge_conv3 = nn.Conv2d(num_classes, 1024, kernel_size=1, stride=1, bias=False)
self.conv1x1 = conv1x1(in_channels=2048, out_channels=num_classes, bias=True)
# self.spatial_attention = SAModule(2048)
# self.spatial_attention = SpatialCGNL(2048, 1024)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None,
dropblock_prob=0.0, is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True,
count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True,
count_include_pad=False))
down_layers.append(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1,
bias=False))
else:
down_layers.append(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride,
bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(
block(self.inplanes, planes, stride, downsample=downsample, radix=self.radix,
cardinality=self.cardinality, bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first, dilation=1, is_first=is_first,
rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma, use_se=self.use_se))
elif dilation == 4:
layers.append(
block(self.inplanes, planes, stride, downsample=downsample, radix=self.radix,
cardinality=self.cardinality, bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first, dilation=2, is_first=is_first,
rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma, use_se=self.use_se))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(self.inplanes, planes, radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width, avd=self.avd,
avd_first=self.avd_first, dilation=dilation,
rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma, use_se=self.use_se))
return nn.Sequential(*layers)
def forward(self, feature):
adj = gen_adj(self.adj).detach()
word = self.word.detach()
feature = self.conv1(feature)
feature = self.bn1(feature)
feature = self.relu(feature)
feature = self.maxpool(feature)
x_raw = self.gc0(word, adj)
x = self.gc_tanh(x_raw)
feature = merge_gcn_residual(feature, x, self.merge_conv0)
feature = self.layer1(feature)
x = self.gc_relu(x_raw)
x_raw = self.gc1(x, adj)
x = self.gc_tanh(x_raw)
feature = merge_gcn_residual(feature, x, self.merge_conv1)
feature = self.layer2(feature)
x = self.gc_relu(x_raw)
x_raw = self.gc2(x, adj)
x = self.gc_tanh(x_raw)
feature = merge_gcn_residual(feature, x, self.merge_conv2)
feature = self.layer3(feature)
x = self.gc_relu(x_raw)
x_raw = self.gc3(x, adj)
x = self.gc_tanh(x_raw)
feature = merge_gcn_residual(feature, x, self.merge_conv3)
feature = self.layer4(feature)
# feature = self.spatial_attention(feature)
feature_raw = self.global_pool(feature)
if self.drop is not None:
feature_raw = self.drop(feature_raw)
feature = feature_raw.view(feature_raw.size(0), -1)
x = self.gc_relu(x_raw)
x = self.gc4(x, adj)
x = self.gc_tanh(x)
x = x.transpose(0, 1)
x = torch.matmul(feature, x)
y = self.conv1x1(feature_raw)
y = y.view(y.size(0), -1)
x = x + y
return x
def gcn_resnest200(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
use_se=cfg.use_se, extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
# model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64,
# deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
# use_se=False, extract_fields='0,1,2,3,4,5', agree_rate=0.5,
# csv_path='D:/Dataset/Vinmec/Noise/train_sss.csv', **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest200'], progress=True),
strict=False)
return model
def gcn_resnest101(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
use_se=cfg.use_se, extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest101'], progress=True),
strict=False)
return model
def gcn_resnest50(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True,
stem_width=32, avg_down=True, avd=True, avd_first=False, use_se=cfg.use_se,
extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest50'], progress=True),
strict=False)
return model
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
middle_features = max(32, (in_features + out_features) // 16)
self.weight1 = Parameter(torch.Tensor(in_features, middle_features))
self.weight2 = Parameter(torch.Tensor(middle_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight1.size(1))
self.weight1.data.uniform_(-stdv, stdv)
stdv = 1. / math.sqrt(self.weight2.size(1))
self.weight2.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight1)
support = torch.matmul(support, self.weight2)
output = torch.matmul(adj, support)
if self.bias is not None:
output = output + self.bias
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(
self.out_features) + ')'
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout=0, alpha=0.2, concat=True, bias=False):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if bias:
self.bias = Parameter(torch.Tensor(1, out_features))
else:
self.register_parameter('bias', None)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(1))
self.W.data.uniform_(-stdv, stdv)
stdv = 1. / math.sqrt(self.a.size(1))
self.a.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1,
2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.bias is not None:
h_prime = h_prime + self.bias
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(
self.out_features) + ')'
def merge_gcn_residual(feature, x, merge_conv):
feature_raw = feature
feature = feature_raw.transpose(1, 2)
feature = feature.transpose(2, 3).contiguous()
feature = feature.view(-1, feature.shape[-1])
reshape_x = x.transpose(0, 1)
feature = torch.matmul(feature, reshape_x)
feature = feature.view(feature_raw.shape[0], feature_raw.shape[2], feature_raw.shape[3], -1)
feature = feature.transpose(2, 3)
feature = feature.transpose(1, 2)
feature = merge_conv(feature)
return feature_raw + feature
if __name__ == "__main__":
import torchsummary
x = torch.randn([2, 3, 224, 224])
model = gcn_resnest200(num_classes=6, word_file='diseases_embeddings.npy')
logits = model(x)
# print(torchsummary.summary(model, input_size=(3, 512, 512), device='cpu'))
print(logits)
# x = torch.randn([2, 2048, 7, 7])
# word = torch.randn([6, 300])
# adj = torch.randn([6, 6]) #
# # gcn = GraphConvolution(in_features=300, out_features=256, bias=True)
# gcn = GraphAttentionLayer(in_features=300, out_features=256, bias=True)
# output = gcn(word, adj)
# print(output)
# feature = torch.randn([2, 128, 56, 56]) # x = torch.randn([11, 128]) # merge_conv = nn.Conv2d(11, 128, kernel_size=1, stride=1, bias=False) # # output = merge_gcn_residual(feature, x, merge_conv) # print(output.size())
| 42.383513
| 229
| 0.602199
| 18,929
| 0.800381
| 0
| 0
| 0
| 0
| 0
| 0
| 3,244
| 0.137167
|
b84bfe3e24cf3fa88c7b90891f02c84318e2faae
| 7,473
|
py
|
Python
|
nextai_lib/inference.py
|
jav0927/nextai
|
9de0c338a41a3ce0297b95f625290fa814a83344
|
[
"Apache-2.0"
] | null | null | null |
nextai_lib/inference.py
|
jav0927/nextai
|
9de0c338a41a3ce0297b95f625290fa814a83344
|
[
"Apache-2.0"
] | 1
|
2021-09-28T05:33:17.000Z
|
2021-09-28T05:33:17.000Z
|
nextai_lib/inference.py
|
jav0927/nextai
|
9de0c338a41a3ce0297b95f625290fa814a83344
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_inference.ipynb (unless otherwise specified).
__all__ = ['device', 'pad_output', 'get_activ_offsets_mns']
# Cell
#from fastai.vision.all import *
from fastai import *
from typing import *
from torch import tensor, Tensor
import torch
import torchvision # Needed to invoke torchvision.ops.mns function
# Cell
# Automatically sets for GPU or CPU environments
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Cell
# Pad tensors so that they have uniform dimentions: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)
def pad_output(l_bb:List, l_scr:List, l_idx:List, no_classes:int):
'''Pad tensors so that they have uniform dimentions: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)
Inputs: l_bb - list of tensors containing individual non-uniform sized bounding boxes
l_scr - list of tensors containing class index values (i.e. 1 - airplane)
l_idx - list of tensors containing class index values (i.e. 1 - airplane)
no_classes - Number of classes, Integer
Outputs: Uniform-sized tensors: bounding box tensor and score tensor with dims: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)'''
if len([len(img_bb) for img_bb in l_bb]) == 0.:
print(F'Image did not pass the scoring threshold')
return
mx_len = max([len(img_bb) for img_bb in l_bb]) # Calculate maximun lenght of the boxes in the batch
l_b, l_c, l_x, l_cat = [], [], [], []
# Create Bounding Box tensors # zeroed tensor accumulators
for i, ntr in enumerate(zip(l_bb, l_scr, l_idx)):
bbox, cls, idx = ntr[0], ntr[1], ntr[2] # Unpack variables
tsr_len = mx_len - bbox.shape[0] # Calculate the number of zero-based rows to add
m = nn.ConstantPad2d((0, 0, 0, tsr_len), 0.) # Prepare to pad the box tensor with zero entries
l_b.append(m(bbox)) # Add appropriate zero-based box rows and add to list
# Create Category tensors
cat_base = torch.zeros(mx_len-bbox.shape[0], dtype=torch.int32)
img_cat = torch.cat((idx, cat_base), dim=0)
l_cat.append(img_cat)
# Create Score tensors
img_cls = [] # List to construct class vectors
for ix in range(idx.shape[0]): # Construct class vectors of dim(no of classes)
cls_base = torch.zeros(no_classes).to(device) # Base zero-based class vector
cls_base[idx[ix]] = cls[ix] # Add the score in the nth position
img_cls.append(cls_base)
img_stack = torch.stack(img_cls) # Create single tensor per image
img_stack_out = m(img_stack)
l_c.append( img_stack_out ) # Add appropriate zero-based class rows and add to list
return (TensorBBox(torch.stack(l_b,0)), TensorMultiCategory(torch.stack(l_c,0)), TensorMultiCategory(torch.stack(l_cat,0)) )
# Cell
def get_activ_offsets_mns(anchrs:Tensor, activs:Tensor, no_classes:int, threshold:float=0.5):
''' Takes in activations and calculates corresponding anchor box offsets.
It then filters the resulting boxes through MNS
Inputs:
anchrs - Anchors as Tensor
activs - Activations as Tensor
no_classes - Number of classes (categories)
threshold - Coarse filtering. Default = 0.5
Output:
one_batch_boxes, one_batch_scores as Tuple'''
p_bboxes, p_classes = activs # Read p_bboxes: [32, 189,4] Torch.Tensor and p_classes: [32, 189, 21] Torch.Tensor from self.learn.pred
#scores = torch.sigmoid(p_classes) # Calculate the confidence levels, scores, for class predictions [0, 1]
scores = torch.softmax(p_classes, -1) # Calculate the confidence levels, scores, for class predictions [0, 1] - Probabilistic
offset_boxes = activ_decode(p_bboxes, anchrs) # Return anchors + anchor offsets wiith format (batch, No Items in Batch, 4)
# For each item in batch, and for each class in the item, filter the image by passing it through NMS. Keep preds with IOU > thresshold
one_batch_boxes = []; one_batch_scores = []; one_batch_cls_pred = [] # Agregators at the bath level
for i in range(p_classes.shape[0]): # For each image in batch ...
batch_p_boxes = offset_boxes[i] # box preds for the current batch
batch_scores = scores[i] # Keep scores for the current batch
max_scores, cls_idx = torch.max(batch_scores, 1 ) # Keep batch class indexes
bch_th_mask = max_scores > threshold # Threshold mask for batch
bch_keep_boxes = batch_p_boxes[bch_th_mask] # "
bch_keep_scores = batch_scores[bch_th_mask] # "
bch_keep_cls_idx = cls_idx[bch_th_mask]
# Agregators per image in a batch
img_boxes = [] # Bounding boxes per image
img_scores = [] # Scores per image
img_cls_pred = [] # Class predictons per image
for c in range (1,no_classes): # Loop through each class
cls_mask = bch_keep_cls_idx==c # Keep masks for the current class
if cls_mask.sum() == 0: continue # Weed out images with no positive class masks
cls_boxes = bch_keep_boxes[cls_mask] # Keep boxes per image
cls_scores = bch_keep_scores[cls_mask].max(dim=1)[0] # Keep class scores for the current image
nms_keep_idx = torchvision.ops.nms(cls_boxes, cls_scores, iou_threshold=0.5) # Filter images by passing them through NMS
img_boxes += [*cls_boxes[nms_keep_idx]] # Agregate cls_boxes into tensors for all classes
box_stack = torch.stack(img_boxes,0) # Transform individual tensors into a single box tensor
img_scores += [*cls_scores[nms_keep_idx]] # Agregate cls_scores into tensors for all classes
score_stack = torch.stack(img_scores, 0) # Transform individual tensors into a single score tensor
img_cls_pred += [*tensor([c]*len(nms_keep_idx))]
cls_pred_stack = torch.stack(img_cls_pred, 0)
batch_mask = score_stack > threshold # filter final lists tto be greater than threshold
box_stack = box_stack[batch_mask] # "
score_stack = score_stack[batch_mask] # "
cls_pred_stack = cls_pred_stack[batch_mask] # "
if 'box_stack' not in locals(): continue # Failed to find any valid classes
one_batch_boxes.append(box_stack) # Agregate bounding boxes for the batch
one_batch_scores.append(score_stack) # Agregate scores for the batch
one_batch_cls_pred.append(cls_pred_stack)
# Pad individual box and score tensors into uniform-sized box and score tensors of shapes: (batch, no 0f items in batch, 4) and (batch, no 0f items in batch, 21)
one_batch_boxes, one_batch_scores, one_batch_cats = pad_output(one_batch_boxes, one_batch_scores, one_batch_cls_pred, no_classes)
return (one_batch_boxes, one_batch_cats)
| 59.784
| 174
| 0.640707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,690
| 0.493778
|
b84c1c6e378f4059bee57b13f1d84bcf63b4ae74
| 2,141
|
py
|
Python
|
code.py
|
ashweta81/data-wrangling-pandas-code-along-practice
|
af49250a45c616f46d763990f2321f470d439916
|
[
"MIT"
] | null | null | null |
code.py
|
ashweta81/data-wrangling-pandas-code-along-practice
|
af49250a45c616f46d763990f2321f470d439916
|
[
"MIT"
] | null | null | null |
code.py
|
ashweta81/data-wrangling-pandas-code-along-practice
|
af49250a45c616f46d763990f2321f470d439916
|
[
"MIT"
] | null | null | null |
# --------------
import pandas as pd
import numpy as np
# Read the data using pandas module.
data=pd.read_csv(path)
# Find the list of unique cities where matches were played
print("The unique cities where matches were played are ", data.city.unique())
print('*'*80)
# Find the columns which contains null values if any ?
print("The columns which contain null values are ", data.columns[data.isnull().any()])
print('*'*80)
# List down top 5 most played venues
print("The top 5 most played venues are", data.venue.value_counts().head(5))
print('*'*80)
# Make a runs count frequency table
print("The frequency table for runs is", data.runs.value_counts())
print('*'*80)
# How many seasons were played and in which year they were played
data['year']=data.date.apply(lambda x : x[:4])
seasons=data.year.unique()
print('The total seasons and years are', seasons)
print('*'*80)
# No. of matches played per season
ss1=data.groupby(['year'])['match_code'].nunique()
print('The total matches played per season are', ss1)
print("*"*80)
# Total runs across the seasons
ss2=data.groupby(['year']).agg({'total':'sum'})
print("Total runs are",ss2)
print("*"*80)
# Teams who have scored more than 200+ runs. Show the top 10 results
w1=data.groupby(['match_code','batting_team']).agg({'total':'sum'}).sort_values(by='total', ascending=False)
w1[w1.total>200].reset_index().head(10)
print("The top 10 results are",w1[w1.total>200].reset_index().head(10))
print("*"*80)
# What are the chances of chasing 200+ target
dt1=data.groupby(['match_code','batting_team','inning'])['total'].sum().reset_index()
dt1.head()
dt1.loc[((dt1.total>200) & (dt1.inning==2)),:].reset_index()
data.match_code.unique().shape[0]
probability=(dt1.loc[((dt1.total>200) & (dt1.inning==2)),:].shape[0])/(data.match_code.unique().shape[0])*100
print("Chances are", probability)
print("*"*80)
# Which team has the highest win count in their respective seasons ?
dt2=data.groupby(['year','winner'])['match_code'].nunique()
dt3=dt2.groupby(level=0,group_keys=False)
dt4=dt3.apply(lambda x: x.sort_values(ascending=False).head(1))
print("The team with the highes win count is", dt4)
| 40.396226
| 109
| 0.712751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,055
| 0.49276
|
b84e3b8a7a2a09cb215aab0d692cf00fa2446655
| 794
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
from plugin.scrobbler.core import SessionEngine, SessionHandler
@SessionEngine.register
class PlayingHandler(SessionHandler):
__event__ = 'playing'
__src__ = ['create', 'pause', 'stop', 'start']
__dst__ = ['start', 'stop']
@classmethod
def process(cls, session, payload):
# Handle media change
if cls.has_media_changed(session, payload) and session.state in ['start', 'pause']:
yield 'stop', session.payload
# Handle current media
if cls.has_finished(session, payload):
if session.state in ['start', 'pause']:
yield 'stop', payload
elif session.state in ['create', 'pause', 'stop']:
yield 'start', payload
elif session.state == 'start':
yield None, payload
| 31.76
| 91
| 0.61461
| 703
| 0.88539
| 533
| 0.671285
| 727
| 0.915617
| 0
| 0
| 168
| 0.211587
|
b84e7cc9d16e3f0b3e8a9ecacf33341e96af47cb
| 102
|
py
|
Python
|
Desafio 46.py
|
MisaelGuilherme/100_Exercicios_Em_Python
|
8c4cdad7e60201abcdd2c4a5646f52aed4e7041e
|
[
"MIT"
] | null | null | null |
Desafio 46.py
|
MisaelGuilherme/100_Exercicios_Em_Python
|
8c4cdad7e60201abcdd2c4a5646f52aed4e7041e
|
[
"MIT"
] | null | null | null |
Desafio 46.py
|
MisaelGuilherme/100_Exercicios_Em_Python
|
8c4cdad7e60201abcdd2c4a5646f52aed4e7041e
|
[
"MIT"
] | null | null | null |
print('====== DESAFIO 46 ======')
import time
for c in range(10,-1,-1):
time.sleep(1)
print(c)
| 20.4
| 33
| 0.539216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.254902
|
b850754dddf9940614a7ecc4de4bab7929800a85
| 4,329
|
py
|
Python
|
samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py
|
aguzev/sql-server-samples
|
498c47f2ac8e45d052ed61878a2ce11eb32394bf
|
[
"MIT"
] | 4,474
|
2019-05-06T23:05:37.000Z
|
2022-03-31T23:30:31.000Z
|
samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py
|
aguzev/sql-server-samples
|
498c47f2ac8e45d052ed61878a2ce11eb32394bf
|
[
"MIT"
] | 256
|
2019-05-07T07:07:19.000Z
|
2022-03-29T17:11:41.000Z
|
samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py
|
aguzev/sql-server-samples
|
498c47f2ac8e45d052ed61878a2ce11eb32394bf
|
[
"MIT"
] | 5,075
|
2019-05-07T00:07:21.000Z
|
2022-03-31T23:31:15.000Z
|
# Placeholder for adding logic specific to application
# and backend key store.
#
import os
import json
import sys
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
# Append the current application path to sys path to be able to resolve local modules.
#
sys.path.append('.')
sys.path.append('./model')
from constants import ConfigurationConstants, Operations, CryptoConstants
import utils
from json_objects import EncryptDecryptRequest, JsonWebKeyResponse, EncryptDecryptResponse
def decrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for decrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
decrypted_payload = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(decrypted_payload.plaintext)
return response
def encrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for encrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
encrypted_payload = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(encrypted_payload.ciphertext)
return response
def get_key(json_key_attributes_dict, pin, version):
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
# JsonWebKeyResponse expects integer inputs and converts them to byte array
# However AKV SDK already provides byte arrays for Exponent and Modulus.
# We will instantiate the object with a dummy value and then overwrite the
# exponent and module value.
#
dummy_val = 1
key_response = JsonWebKeyResponse(1,1)
key_response.e = utils.urlsafe_b64encode_as_str(key_vault_key.key.e)
key_response.n = utils.urlsafe_b64encode_as_str(key_vault_key.key.n)
return key_response
def get_akv_key(json_key_attributes_dict, credential):
"""
Gets the AKV key object.
"""
if "vault_url" in json_key_attributes_dict:
vault_url = json_key_attributes_dict["vault_url"]
else:
raise KeyError('vault_url was expected in the parameters but not found')
if "keyname" in json_key_attributes_dict:
key_name = json_key_attributes_dict["keyname"]
else:
raise KeyError('keyname was expected in the parameters but not found')
if "keyversion" in json_key_attributes_dict:
key_version = json_key_attributes_dict["keyversion"]
else:
raise KeyError('keyversion was expected in the parameters but not found')
key_client = KeyClient(vault_url=vault_url, credential=credential)
key_vault_key = key_client.get_key(key_name, key_version)
return key_vault_key
def set_env(json_key_attributes_dict, pin):
"""
Sets the environment variables for the MS identity credential lookup to work.
"""
if "azure_client_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_client_id"]
else:
raise KeyError('azure_client_id was expected in the parameters but not found')
if "azure_tenant_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_tenant_id"]
else:
raise KeyError('azure_tenant_id was expected in the parameters but not found')
os.environ["AZURE_CLIENT_ID"]=json_key_attributes_dict["azure_client_id"]
os.environ["AZURE_TENANT_ID"]=json_key_attributes_dict["azure_tenant_id"]
os.environ["AZURE_CLIENT_SECRET"]=pin
| 40.839623
| 90
| 0.769924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,498
| 0.346038
|
b85115da00994686b76087d8e81c839619f86fa0
| 338
|
py
|
Python
|
scss/setup.py
|
Jawbone/pyScss
|
b1f483c253ec4aaceb3b8d4d630ca5528590e9b8
|
[
"MIT"
] | null | null | null |
scss/setup.py
|
Jawbone/pyScss
|
b1f483c253ec4aaceb3b8d4d630ca5528590e9b8
|
[
"MIT"
] | null | null | null |
scss/setup.py
|
Jawbone/pyScss
|
b1f483c253ec4aaceb3b8d4d630ca5528590e9b8
|
[
"MIT"
] | null | null | null |
from distutils.core import setup, Extension
setup(name='jawbonePyScss',
version='1.1.8',
description='jawbonePyScss',
ext_modules=[
Extension(
'_scss',
sources=['src/_scss.c', 'src/block_locator.c', 'src/scanner.c'],
libraries=['pcre'],
optional=True
)
]
)
| 22.533333
| 76
| 0.553254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.292899
|
b851d6b0de112cf236b222d90e4d36785001355b
| 18,301
|
py
|
Python
|
spanner_orm/tests/query_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 37
|
2018-11-01T18:29:03.000Z
|
2022-03-30T17:24:39.000Z
|
spanner_orm/tests/query_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 48
|
2018-11-05T18:51:23.000Z
|
2021-12-17T20:28:11.000Z
|
spanner_orm/tests/query_test.py
|
MetaOfX/python-spanner-orm
|
59063eb6989b845d1658118a7a0282eede19d8bf
|
[
"Apache-2.0"
] | 19
|
2019-05-04T06:05:31.000Z
|
2021-12-17T20:52:53.000Z
|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import unittest
from unittest import mock
from absl.testing import parameterized
from spanner_orm import condition
from spanner_orm import error
from spanner_orm import field
from spanner_orm import query
from spanner_orm.tests import models
from google.cloud.spanner_v1.proto import type_pb2
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
class QueryTest(parameterized.TestCase):
@mock.patch('spanner_orm.table_apis.sql_query')
def test_where(self, sql_query):
sql_query.return_value = []
models.UnittestModel.where_equal(int_=3, transaction=True)
(_, sql, parameters, types), _ = sql_query.call_args
expected_sql = 'SELECT .* FROM table WHERE table.int_ = @int_0'
self.assertRegex(sql, expected_sql)
self.assertEqual(parameters, {'int_0': 3})
self.assertEqual(types, {'int_0': field.Integer.grpc_type()})
@mock.patch('spanner_orm.table_apis.sql_query')
def test_count(self, sql_query):
sql_query.return_value = [[0]]
column, value = 'int_', 3
models.UnittestModel.count_equal(int_=3, transaction=True)
(_, sql, parameters, types), _ = sql_query.call_args
column_key = '{}0'.format(column)
expected_sql = r'SELECT COUNT\(\*\) FROM table WHERE table.{} = @{}'.format(
column, column_key)
self.assertRegex(sql, expected_sql)
self.assertEqual({column_key: value}, parameters)
self.assertEqual(types, {column_key: field.Integer.grpc_type()})
def test_count_allows_force_index(self):
force_index = condition.force_index('test_index')
count_query = query.CountQuery(models.UnittestModel, [force_index])
sql = count_query.sql()
expected_sql = 'SELECT COUNT(*) FROM table@{FORCE_INDEX=test_index}'
self.assertEqual(expected_sql, sql)
@parameterized.parameters(
condition.limit(1), condition.order_by(
('int_', condition.OrderType.DESC)))
def test_count_only_allows_where_and_from_segment_conditions(self, condition):
with self.assertRaises(error.SpannerError):
query.CountQuery(models.UnittestModel, [condition])
def select(self, *conditions):
return query.SelectQuery(models.UnittestModel, list(conditions))
def test_query_limit(self):
key, value = 'limit0', 2
select_query = self.select(condition.limit(value))
self.assertEndsWith(select_query.sql(), ' LIMIT @{}'.format(key))
self.assertEqual(select_query.parameters(), {key: value})
self.assertEqual(select_query.types(), {key: field.Integer.grpc_type()})
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'LIMIT')
def test_query_limit_offset(self):
limit_key, limit = 'limit0', 2
offset_key, offset = 'offset0', 5
select_query = self.select(condition.limit(limit, offset=offset))
self.assertEndsWith(select_query.sql(),
' LIMIT @{} OFFSET @{}'.format(limit_key, offset_key))
self.assertEqual(select_query.parameters(), {
limit_key: limit,
offset_key: offset
})
self.assertEqual(select_query.types(), {
limit_key: field.Integer.grpc_type(),
offset_key: field.Integer.grpc_type()
})
def test_query_order_by(self):
order = ('int_', condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), ' ORDER BY table.int_ DESC')
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'ORDER BY')
def test_query_order_by_with_object(self):
order = (models.UnittestModel.int_, condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), ' ORDER BY table.int_ DESC')
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'ORDER BY')
@parameterized.parameters(('int_', 5, field.Integer.grpc_type()),
('string', 'foo', field.String.grpc_type()),
('timestamp', now(), field.Timestamp.grpc_type()))
def test_query_where_comparison(self, column, value, grpc_type):
condition_generators = [
condition.greater_than, condition.not_less_than, condition.less_than,
condition.not_greater_than, condition.equal_to, condition.not_equal_to
]
for condition_generator in condition_generators:
current_condition = condition_generator(column, value)
select_query = self.select(current_condition)
column_key = '{}0'.format(column)
expected_where = ' WHERE table.{} {} @{}'.format(
column, current_condition.operator, column_key)
self.assertEndsWith(select_query.sql(), expected_where)
self.assertEqual(select_query.parameters(), {column_key: value})
self.assertEqual(select_query.types(), {column_key: grpc_type})
@parameterized.parameters(
(models.UnittestModel.int_, 5, field.Integer.grpc_type()),
(models.UnittestModel.string, 'foo', field.String.grpc_type()),
(models.UnittestModel.timestamp, now(), field.Timestamp.grpc_type()))
def test_query_where_comparison_with_object(self, column, value, grpc_type):
condition_generators = [
condition.greater_than, condition.not_less_than, condition.less_than,
condition.not_greater_than, condition.equal_to, condition.not_equal_to
]
for condition_generator in condition_generators:
current_condition = condition_generator(column, value)
select_query = self.select(current_condition)
column_key = '{}0'.format(column.name)
expected_where = ' WHERE table.{} {} @{}'.format(
column.name, current_condition.operator, column_key)
self.assertEndsWith(select_query.sql(), expected_where)
self.assertEqual(select_query.parameters(), {column_key: value})
self.assertEqual(select_query.types(), {column_key: grpc_type})
@parameterized.parameters(
('int_', [1, 2, 3], field.Integer.grpc_type()),
('int_', (4, 5, 6), field.Integer.grpc_type()),
('string', ['a', 'b', 'c'], field.String.grpc_type()),
('timestamp', [now()], field.Timestamp.grpc_type()))
def test_query_where_list_comparison(self, column, values, grpc_type):
condition_generators = [condition.in_list, condition.not_in_list]
for condition_generator in condition_generators:
current_condition = condition_generator(column, values)
select_query = self.select(current_condition)
column_key = '{}0'.format(column)
expected_sql = ' WHERE table.{} {} UNNEST(@{})'.format(
column, current_condition.operator, column_key)
list_type = type_pb2.Type(
code=type_pb2.ARRAY, array_element_type=grpc_type)
self.assertEndsWith(select_query.sql(), expected_sql)
self.assertEqual(select_query.parameters(), {column_key: values})
self.assertEqual(select_query.types(), {column_key: list_type})
def test_query_combines_properly(self):
select_query = self.select(
condition.equal_to('int_', 5),
condition.not_equal_to('string_array', ['foo', 'bar']),
condition.limit(2),
condition.order_by(('string', condition.OrderType.DESC)))
expected_sql = ('WHERE table.int_ = @int_0 AND table.string_array != '
'@string_array1 ORDER BY table.string DESC LIMIT @limit2')
self.assertEndsWith(select_query.sql(), expected_sql)
def test_only_one_limit_allowed(self):
with self.assertRaises(error.SpannerError):
self.select(condition.limit(2), condition.limit(2))
def test_force_index(self):
select_query = self.select(condition.force_index('test_index'))
expected_sql = 'FROM table@{FORCE_INDEX=test_index}'
self.assertEndsWith(select_query.sql(), expected_sql)
def test_force_index_with_object(self):
select_query = self.select(
condition.force_index(models.UnittestModel.test_index))
expected_sql = 'FROM table@{FORCE_INDEX=test_index}'
self.assertEndsWith(select_query.sql(), expected_sql)
def includes(self, relation, *conditions, foreign_key_relation=False):
include_condition = condition.includes(relation, list(conditions),
foreign_key_relation)
return query.SelectQuery(
models.ForeignKeyTestModel
if foreign_key_relation else models.RelationshipTestModel,
[include_condition],
)
@parameterized.parameters((models.RelationshipTestModel.parent, True),
(models.ForeignKeyTestModel.foreign_key_1, False))
def test_bad_includes_args(self, relation_key, foreign_key_relation):
with self.assertRaisesRegex(ValueError, 'Must pass'):
self.includes(
relation_key,
foreign_key_relation=foreign_key_relation,
)
@parameterized.named_parameters(
(
'legacy_relationship',
{
'relation': 'parent'
},
r'SELECT RelationshipTestModel\S* RelationshipTestModel\S* '
r'ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* '
r'SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = '
r'RelationshipTestModel.parent_key\)',
),
(
'legacy_relationship_with_object_arg',
{
'relation': models.RelationshipTestModel.parent
},
r'SELECT RelationshipTestModel\S* RelationshipTestModel\S* '
r'ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* '
r'SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = '
r'RelationshipTestModel.parent_key\)',
),
(
'foreign_key_relationship',
{
'relation': 'foreign_key_1',
'foreign_key_relation': True
},
r'SELECT ForeignKeyTestModel\S* ForeignKeyTestModel\S* ForeignKeyTestModel\S* ForeignKeyTestModel\S* '
r'ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* '
r'SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = '
r'ForeignKeyTestModel.referencing_key_1\)',
),
(
'foreign_key_relationship_with_object_arg',
{
'relation': models.ForeignKeyTestModel.foreign_key_1,
'foreign_key_relation': True
},
r'SELECT ForeignKeyTestModel\S* ForeignKeyTestModel\S* ForeignKeyTestModel\S* ForeignKeyTestModel\S* '
r'ARRAY\(SELECT AS STRUCT SmallTestModel\S* SmallTestModel\S* '
r'SmallTestModel\S* FROM SmallTestModel WHERE SmallTestModel.key = '
r'ForeignKeyTestModel.referencing_key_1\)',
),
)
def test_includes(self, includes_kwargs, expected_sql):
select_query = self.includes(**includes_kwargs)
# The column order varies between test runs
self.assertRegex(select_query.sql(), expected_sql)
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
@parameterized.parameters(({
'relation': models.RelationshipTestModel.parent,
'foreign_key_relation': True
},), ({
'relation': models.ForeignKeyTestModel.foreign_key_1,
'foreign_key_relation': False
},))
def test_error_mismatched_params(self, includes_kwargs):
with self.assertRaisesRegex(ValueError, 'Must pass'):
self.includes(**includes_kwargs)
def test_includes_subconditions_query(self):
select_query = self.includes('parents', condition.equal_to('key', 'value'))
expected_sql = (
'WHERE SmallTestModel.key = RelationshipTestModel.parent_key '
'AND SmallTestModel.key = @key0')
self.assertRegex(select_query.sql(), expected_sql)
def includes_result(self, related=1):
child = {'parent_key': 'parent_key', 'child_key': 'child'}
result = [child[name] for name in models.RelationshipTestModel.columns]
parent = {'key': 'key', 'value_1': 'value_1', 'value_2': None}
parents = []
for _ in range(related):
parents.append([parent[name] for name in models.SmallTestModel.columns])
result.append(parents)
return child, parent, [result]
def fk_includes_result(self, related=1):
child = {
'referencing_key_1': 'parent_key',
'referencing_key_2': 'child',
'referencing_key_3': 'child',
'self_referencing_key': 'child'
}
result = [child[name] for name in models.ForeignKeyTestModel.columns]
parent = {'key': 'key', 'value_1': 'value_1', 'value_2': None}
parents = []
for _ in range(related):
parents.append([parent[name] for name in models.SmallTestModel.columns])
result.append(parents)
return child, parent, [result]
@parameterized.named_parameters(
(
'legacy_relationship',
{
'relation': 'parent'
},
lambda x: x.parent,
lambda x: x.includes_result(related=1),
),
(
'foreign_key_relationship',
{
'relation': 'foreign_key_1',
'foreign_key_relation': True
},
lambda x: x.foreign_key_1,
lambda x: x.fk_includes_result(related=1),
),
)
def test_includes_single_related_object_result(
self,
includes_kwargs,
referenced_table_fn,
includes_result_fn,
):
select_query = self.includes(**includes_kwargs)
child_values, parent_values, rows = includes_result_fn(self)
result = select_query.process_results(rows)[0]
self.assertIsInstance(
referenced_table_fn(result),
models.SmallTestModel,
)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
for name, value in parent_values.items():
self.assertEqual(getattr(referenced_table_fn(result), name), value)
@parameterized.named_parameters(
(
'legacy_relationship',
{
'relation': 'parent'
},
lambda x: x.parent,
lambda x: x.includes_result(related=0),
),
(
'foreign_key_relationship',
{
'relation': 'foreign_key_1',
'foreign_key_relation': True
},
lambda x: x.foreign_key_1,
lambda x: x.fk_includes_result(related=0),
),
)
def test_includes_single_no_related_object_result(self, includes_kwargs,
referenced_table_fn,
includes_result_fn):
select_query = self.includes(**includes_kwargs)
child_values, _, rows = includes_result_fn(self)
result = select_query.process_results(rows)[0]
self.assertIsNone(referenced_table_fn(result))
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
def test_includes_subcondition_result(self):
select_query = self.includes('parents', condition.equal_to('key', 'value'))
child_values, parent_values, rows = self.includes_result(related=2)
result = select_query.process_results(rows)[0]
self.assertLen(result.parents, 2)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
for name, value in parent_values.items():
self.assertEqual(getattr(result.parents[0], name), value)
@parameterized.named_parameters(
(
'legacy_relationship',
{
'relation': 'parent'
},
lambda x: x.includes_result(related=2),
),
(
'foreign_key_relationship',
{
'relation': 'foreign_key_1',
'foreign_key_relation': True
},
lambda x: x.fk_includes_result(related=2),
),
)
def test_includes_error_on_multiple_results_for_single(
self, includes_kwargs, includes_result_fn):
select_query = self.includes(**includes_kwargs)
_, _, rows = includes_result_fn(self)
with self.assertRaises(error.SpannerError):
_ = select_query.process_results(rows)
@parameterized.parameters(True, False)
def test_includes_error_on_invalid_relation(self, foreign_key_relation):
with self.assertRaises(error.ValidationError):
self.includes('bad_relation', foreign_key_relation=foreign_key_relation)
@parameterized.parameters(
('bad_column', 0, 'parent', False),
('bad_column', 0, 'foreign_key_1', True),
('child_key', 'good value', 'parent', False),
('child_key', 'good value', 'foreign_key_1', False),
('key', ['bad value'], 'parent', False),
('key', ['bad value'], 'foreign_key_1', False),
)
def test_includes_error_on_invalid_subconditions(self, column, value,
relation,
foreign_key_relation):
with self.assertRaises(error.ValidationError):
self.includes(
relation,
condition.equal_to(column, value),
foreign_key_relation,
)
def test_or(self):
condition_1 = condition.equal_to('int_', 1)
condition_2 = condition.equal_to('int_', 2)
select_query = self.select(condition.or_([condition_1], [condition_2]))
expected_sql = '((table.int_ = @int_0) OR (table.int_ = @int_1))'
self.assertEndsWith(select_query.sql(), expected_sql)
self.assertEqual(select_query.parameters(), {'int_0': 1, 'int_1': 2})
self.assertEqual(select_query.types(), {
'int_0': field.Integer.grpc_type(),
'int_1': field.Integer.grpc_type()
})
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
| 38.855626
| 112
| 0.674335
| 17,246
| 0.942353
| 0
| 0
| 11,131
| 0.608218
| 0
| 0
| 3,741
| 0.204415
|
b8521a1784e7669f76ae670720d2483ecddff419
| 518
|
py
|
Python
|
leetcode/1859_sorting_the_sentence.py
|
jacquerie/leetcode
|
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
|
[
"MIT"
] | 3
|
2018-05-10T09:56:49.000Z
|
2020-11-07T18:09:42.000Z
|
leetcode/1859_sorting_the_sentence.py
|
jacquerie/leetcode
|
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
|
[
"MIT"
] | null | null | null |
leetcode/1859_sorting_the_sentence.py
|
jacquerie/leetcode
|
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class Solution:
def sortSentence(self, s: str) -> str:
tokens = s.split()
result = [None] * len(tokens)
for token in tokens:
word, index = token[:-1], int(token[-1])
result[index - 1] = word
return ' '.join(result)
if __name__ == '__main__':
solution = Solution()
assert 'This is a sentence' == solution.sortSentence('is2 sentence4 This1 a3')
assert 'Me Myself and I' == solution.sortSentence('Myself2 Me1 I4 and3')
| 25.9
| 82
| 0.579151
| 276
| 0.532819
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.227799
|
b8522b1cda4c464e1d7c573371d89f13b40ae37b
| 89
|
py
|
Python
|
todoapi/apps.py
|
Faysa1/Gestion-Tickets-Taches
|
eeba92df59c3217d15b02a5bb1ed3c6e673537a4
|
[
"Apache-2.0"
] | 51
|
2018-12-12T20:18:31.000Z
|
2022-03-11T20:23:35.000Z
|
todoapi/apps.py
|
Faysa1/Gestion-Tickets-Taches
|
eeba92df59c3217d15b02a5bb1ed3c6e673537a4
|
[
"Apache-2.0"
] | 11
|
2018-12-17T08:48:07.000Z
|
2022-03-02T02:54:38.000Z
|
todoapi/apps.py
|
Faysa1/Gestion-Tickets-Taches
|
eeba92df59c3217d15b02a5bb1ed3c6e673537a4
|
[
"Apache-2.0"
] | 29
|
2018-12-12T20:19:00.000Z
|
2022-01-18T12:33:21.000Z
|
from django.apps import AppConfig
class TodoapiConfig(AppConfig):
name = 'todoapi'
| 14.833333
| 33
| 0.752809
| 52
| 0.58427
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.101124
|
b85283b049e0e58e8a7c62f87369d905b8440e5f
| 3,101
|
py
|
Python
|
src/flagon/backends/redis_backend.py
|
ashcrow/flagon
|
50e6aa96854468a89399ef08573e4f814a002d26
|
[
"MIT"
] | 18
|
2015-08-27T03:49:42.000Z
|
2021-05-12T21:48:17.000Z
|
src/flagon/backends/redis_backend.py
|
ashcrow/flagon
|
50e6aa96854468a89399ef08573e4f814a002d26
|
[
"MIT"
] | 2
|
2016-07-18T13:48:46.000Z
|
2017-05-20T15:56:03.000Z
|
src/flagon/backends/redis_backend.py
|
ashcrow/flagon
|
50e6aa96854468a89399ef08573e4f814a002d26
|
[
"MIT"
] | 5
|
2015-09-20T08:46:01.000Z
|
2021-06-10T03:41:04.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Redis backend.
"""
import redis
from flagon import errors
from flagon.backends import Backend
class RedisBackend(Backend):
def __init__(self, host, port, db):
"""
Creates an instance of the RedisBackend.
:rtype: RedisBackend
"""
# https://pypi.python.org/pypi/redis/2.10.1
pool = redis.ConnectionPool(host=host, port=port, db=db)
self._server = redis.Redis(
connection_pool=pool,
charset='utf-8',
errors='strict',
decode_responses=False)
def set(self, name, key, value):
"""
Sets a value for a feature. This is a proposed name only!!!
:param name: name of the feature.
:rtype: bool
"""
self._server.hset(name, key, value)
def exists(self, name, key):
"""
Checks if a feature exists.
:param name: name of the feature.
:rtype: bool
"""
return self._server.hexists(name, key)
def is_active(self, name, key):
"""
Checks if a feature is on.
:param name: name of the feature.
:rtype: bool
:raises: UnknownFeatureError
"""
if not self._server.hexists(name, key):
raise errors.UnknownFeatureError('Unknown feature: %s' % name)
if self._server.hget(name, key) == 'True':
return True
return False
def _turn(self, name, key, value):
"""
Turns a feature off.
:param name: name of the feature.
:param value: Value to turn name to.
:raises: UnknownFeatureError
"""
# TODO: Copy paste --- :-(
if not self._server.hexists(name, key):
raise errors.UnknownFeatureError('Unknown feature: %s %s' % (
name, key))
self._server.hset(name, key, value)
turn_on = lambda s, name: s._turn(name, 'active', True)
turn_off = lambda s, name: s._turn(name, 'active', False)
| 32.642105
| 78
| 0.639471
| 1,882
| 0.606901
| 0
| 0
| 0
| 0
| 0
| 0
| 1,945
| 0.627217
|
b8578eebc5523ec0f810b0739d30b92505082a9a
| 2,365
|
py
|
Python
|
6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py
|
dmNadim/Numerical-Methods
|
2c74312ea4efddd7db65483fef02fea710963dcf
|
[
"MIT"
] | null | null | null |
6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py
|
dmNadim/Numerical-Methods
|
2c74312ea4efddd7db65483fef02fea710963dcf
|
[
"MIT"
] | null | null | null |
6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py
|
dmNadim/Numerical-Methods
|
2c74312ea4efddd7db65483fef02fea710963dcf
|
[
"MIT"
] | null | null | null |
from math import sin, cos, pi
f = lambda x: 9*pi*cos(x) + 7*sin(x) + 4*x - 5*x*cos(x) # Analytical Solution
df = lambda x: -9*pi*sin(x) + 7*cos(x) + 4 - 5*(cos(x)-x*sin(x))
dy = lambda x,y,u: u # 1st Derivative, y' = u
du = lambda x,y,u: 4*x + 10*sin(x) - y # 2nd Derivative, u' = 4x+10sin(x)-y
x = pi # Lower limit, [π
xn = 2*pi # Upper limit, 2π]
y = 0 # Initial condition, y(π) = 0
u = 2 # Initial condition, u(π) = 2
h = 0.5 # Width of each division, step size
# h = 0.1 # Smaller step size gives less error
n = int((xn-x)/h) # Number of divisions of the domain
print('x \t\ty(RK4) \t\ty\'(RK4) \ty(Exact) \ty\'(Exact)') # Header of Output
print('%f \t%f \t%f \t%f \t%f' % (x, y, u, f(x), df(x))) # Initial x and y
for i in range(n):
L1 = h * du(x,y,u)
K1 = h * dy(x,y,u)
L2 = h * du(x + h/2, y + K1/2, u + L1/2)
K2 = h * dy(x + h/2, y + K1/2, u + L1/2)
L3 = h * du(x + h/2, y + K2/2, u + L2/2)
K3 = h * dy(x + h/2, y + K2/2, u + L2/2)
L4 = h * du(x + h, y + K3, u + L3)
K4 = h * dy(x + h, y + K3, u + L3)
u += 1/6*(L1 + 2*L2 + 2*L3 + L4) # u(x+h) = u(x) + 1/6(L1+2L2+2L3+L4)
y += 1/6*(K1 + 2*K2 + 2*K3 + K4) # y(x+h) = y(x) + 1/6(K1+2K2+2K3+K4)
x += h # x for next step, x = x + h
print('%f \t%f \t%f \t%f \t%f' % (x, y, u, f(x), df(x)))
"""
2nd order ODE y'' = f(x,y,y') should be divided into two first order ODE's
y' = u and u' = f(x,y,u)
The two equations are solved simultaneously using RK4
L1 = h u'(x,y,u)
K1 = h y'(x,y,u)
L2 = h u'(x + h/2, y + K1/2, u + L1/2)
K2 = h y'(x + h/2, y + K1/2, u + L1/2)
L3 = h u'(x + h/2, y + K2/2, u + L2/2)
K3 = h y'(x + h/2, y + K2/2, u + L2/2)
L4 = h u'(x + h, y + K3, u + L3)
K4 = h y'(x + h, y + K3, u + L3)
u(x+h) = u(x) + 1/6 (L1 + 2 L2 + 2 L3 + L4)
y(x+h) = y(x) + 1/6 (K1 + 2 K2 + 2 K3 + K4)
The initial condition is the value of y(x) at initial domain x
Find the numerical solution of the following differential equation
over the domain [π,2π]: y''+y = 4x+10sin(x), y(π) = 0, y'(π) = 2
y' = u, y(π) = 0
u' = 4x+10sin(x)-y, u(π) = 2
Analytical Solution: y = 9π cos(x) + 7sin(x) + 4x - 5x cos(x)
"""
| 36.384615
| 78
| 0.4537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,384
| 0.582492
|
b85874411e43ac8ab8f40e52f253f84cc102e824
| 1,029
|
py
|
Python
|
scripts/image_navigation.py
|
habibmuhammadthariq/iq_gnc
|
06752997c103b48db48efb2814923fdc3a0f74b8
|
[
"MIT"
] | null | null | null |
scripts/image_navigation.py
|
habibmuhammadthariq/iq_gnc
|
06752997c103b48db48efb2814923fdc3a0f74b8
|
[
"MIT"
] | null | null | null |
scripts/image_navigation.py
|
habibmuhammadthariq/iq_gnc
|
06752997c103b48db48efb2814923fdc3a0f74b8
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
#ros library
#import rospy
#import the API
#from iq_gnc.py_gnc_functions import *
#print the colours
#from iq_gnc.PrintColours import *
# Importing Point message from package geometry_msgs.
#from geometry_msgs.msg import Point
#import opencv library
import cv2
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#red color
low_red = np.array([161, 155, 84])
high_red = np_array([179, 255, 255])
#blue color
#low_blue = np.array([94, 80, 2])
#high_blue = np.array([126, 255, 255])
#green color
#low_green = np.array([25, 52, 72])
#high_green = np.array([102, 255, 255])
#every color except white
#low = np.array([0, 42, 0])
#high = np.array([179, 255, 255])
red_mask = cv2.inRange(hsv, low_red, high_red)
red = cv2.bitwise_and(image, image, mask=red_mask)
cv2.imshow("Original Image", image)
cv2.imshow("Red Filter", red)
key = cv2.waitKey(1)
if key == 27:
break
| 25.725
| 54
| 0.651118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.539359
|
b8588a227beffd14bd3ab5788c323affed1dda08
| 1,083
|
py
|
Python
|
switchmng/wsgi.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
switchmng/wsgi.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
switchmng/wsgi.py
|
AnsgarKlein/switchmng
|
d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d
|
[
"MIT"
] | null | null | null |
from switchmng import config
from switchmng.schema.base import Base
from switchmng.database import DatabaseConnection
from switchmng.routes import create_app
def app(*args, **kwargs):
"""
Entry point for wsgi server like `gunicorn` serving this
application.
Parse all command line arguments, initialize application then
start this application.
"""
help_str = 'Possible parameters:\n'\
+ ' config=FILE Use FILE for configuration file'
# Parse gunicorn parameters, convert them to normal sys.argv style
# parameters and pass them to parsing function.
params = []
for k in kwargs:
if k == 'config':
params.append('--config')
params.append(kwargs[k])
else:
print('Unknown parameter "{}"\n\n{}\n'.format(k, help_str))
return None
# Parse given arguments
config.parse_arguments(params)
# Initialize the database
db = DatabaseConnection(config.DB_TYPE, config.DB_PATH, config.DB_VERBOSE, Base)
# Return wsgi app
return create_app(db)
| 29.27027
| 84
| 0.665743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.448753
|
b8591745507d3ac646b22cef27786c56c597a729
| 1,598
|
py
|
Python
|
records_mover/db/postgres/copy_options/date_output_style.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
records_mover/db/postgres/copy_options/date_output_style.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
records_mover/db/postgres/copy_options/date_output_style.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
from records_mover.utils import quiet_remove
from records_mover.records.delimited import cant_handle_hint, ValidatedRecordsHints
from typing import Set, Tuple, Optional
from .types import DateOrderStyle, DateOutputStyle
def determine_date_output_style(unhandled_hints: Set[str],
hints: ValidatedRecordsHints,
fail_if_cant_handle_hint: bool) -> \
Tuple[DateOutputStyle, Optional[DateOrderStyle]]:
# see docs in the types module
dateformat = hints.dateformat
timeonlyformat = hints.timeonlyformat
datetimeformattz = hints.datetimeformattz
datetimeformat = hints.datetimeformat
date_order_style: Optional[DateOrderStyle] = None
if (dateformat == 'YYYY-MM-DD' and
timeonlyformat == 'HH24:MI:SS' and
datetimeformattz in ['YYYY-MM-DD HH:MI:SSOF',
'YYYY-MM-DD HH24:MI:SSOF'] and
datetimeformat == 'YYYY-MM-DD HH24:MI:SS'):
date_output_style: DateOutputStyle = 'ISO'
# date_order_style doesn't really matter, as ISO is not ambiguous
else:
# 'SQL', 'Postgres' and 'German' all support only alphabetic
# timezone indicators, which aren't yet supported in the
# records spec
cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformattz', hints)
quiet_remove(unhandled_hints, 'dateformat')
quiet_remove(unhandled_hints, 'timeonlyformat')
quiet_remove(unhandled_hints, 'datetimeformattz')
quiet_remove(unhandled_hints, 'datetimeformat')
return (date_output_style, date_order_style)
| 39.95
| 83
| 0.70025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 405
| 0.253442
|
b8596ffa290b85166791b3474bb6337caf557e75
| 6,239
|
py
|
Python
|
tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 1
|
2020-11-16T02:14:35.000Z
|
2020-11-16T02:14:35.000Z
|
# Copyright 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit import base
from tacker.vnfm.infra_drivers.openstack import vdu
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
stack_status = 'CREATE_COMPLETE'
outputs = [{u'output_value': u'192.168.120.31', u'description':
u'management ip address', u'output_key': u'mgmt_ip-vdu1'}]
def create(self, *args, **kwargs):
return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}}
def get(self, id):
return self.Stack()
def update(self, stack_id, **kwargs):
return self.Stack()
def resource_mark_unhealthy(self, stack_id, resource_name,
mark_unhealthy, resource_status_reason):
return self.Stack()
class TestVDU(base.TestCase):
def setUp(self):
super(TestVDU, self).setUp()
self.context = context.get_admin_context()
self._mock_heat_client()
mock.patch('tacker.vnfm.vim_client.VimClient.get_vim').start()
self.additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter='VDU1',
cause=["Unable to reach while monitoring resource: 'VDU1'"])
self.heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
stack_id=vnf_dict['instance_id'],
additional_params=[self.additional_paramas_obj])
self.heal_vdu = vdu.Vdu(self.context, vnf_dict,
self.heal_request_data_obj)
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin = \
common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
self.heat_client = mock.Mock(wraps=FakeHeatClient())
fake_heat_client = mock.Mock()
fake_heat_client.return_value = self.heat_client
self._mock(
'tacker.vnfm.infra_drivers.openstack.heat_client.HeatClient',
fake_heat_client)
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_heal_vdu(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu.heal_vdu()
self.heat_client.update.assert_called_once_with(
stack_id=vnf_dict['instance_id'], existing=True)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details=("HealVnfRequest invoked to update the stack '%s'" %
vnf_dict['instance_id']))
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_resource_mark_unhealthy(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu._resource_mark_unhealthy()
self.heat_client.resource_mark_unhealthy.assert_called_once_with(
stack_id=vnf_dict['instance_id'],
resource_name=self.additional_paramas_obj.parameter,
mark_unhealthy=True,
resource_status_reason=self.additional_paramas_obj.cause)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details="HealVnfRequest invoked to mark resource 'VDU1' "
"to unhealthy.")
| 38.042683
| 79
| 0.599295
| 3,563
| 0.571085
| 0
| 0
| 1,532
| 0.245552
| 0
| 0
| 2,141
| 0.343164
|
b85adde254fd21cc8c4987b399dbf5487b008f43
| 445
|
py
|
Python
|
tests/test_example.py
|
jlane9/mockerena
|
a3fd1bd39af6269dc96846967b4bba47759bab41
|
[
"MIT"
] | 1
|
2019-09-10T05:12:38.000Z
|
2019-09-10T05:12:38.000Z
|
tests/test_example.py
|
jlane9/mockerena
|
a3fd1bd39af6269dc96846967b4bba47759bab41
|
[
"MIT"
] | 10
|
2019-09-10T16:14:35.000Z
|
2019-12-19T17:13:51.000Z
|
tests/test_example.py
|
jlane9/mockerena
|
a3fd1bd39af6269dc96846967b4bba47759bab41
|
[
"MIT"
] | 2
|
2019-09-10T05:11:58.000Z
|
2020-04-29T17:59:47.000Z
|
"""test_example
.. codeauthor:: John Lane <john.lane93@gmail.com>
"""
from flask import url_for
from eve import Eve
import pytest
@pytest.mark.example
def test_example(client: Eve):
"""Example test for reference
:param Eve client: Mockerena app instance
:raises: AssertionError
"""
res = client.get(url_for('generate', schema_id='mock_example'))
assert res.status_code == 200
assert res.mimetype == 'text/csv'
| 19.347826
| 67
| 0.698876
| 0
| 0
| 0
| 0
| 309
| 0.694382
| 0
| 0
| 217
| 0.48764
|
b85d1ecfbfe5440d3438acef2b9c37a3da7e6e97
| 1,243
|
py
|
Python
|
tests/TestPoissonSpikeGeneration.py
|
VadimLopatkin/AtlasSnnController
|
25c87bd7c80cbb5a1163311b2fd87fad5344f978
|
[
"Apache-2.0"
] | 2
|
2016-05-22T12:30:41.000Z
|
2016-06-03T06:05:21.000Z
|
tests/TestPoissonSpikeGeneration.py
|
VadimLopatkin/AtlasSnnController
|
25c87bd7c80cbb5a1163311b2fd87fad5344f978
|
[
"Apache-2.0"
] | null | null | null |
tests/TestPoissonSpikeGeneration.py
|
VadimLopatkin/AtlasSnnController
|
25c87bd7c80cbb5a1163311b2fd87fad5344f978
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from brian2 import *
class TestPoissonSpikeGeneration(unittest.TestCase):
def test_simple_poisson_generator(self):
P = PoissonGroup(10, 5*Hz)
M = SpikeMonitor(P)
run(10*ms)
n = M.t
# plot(M.t/ms, M.v[0], '-b', lw=2, label='N0: membrane potential')
# xlabel('Time (ms)')
# ylabel('t')
# legend(loc='best')
# show()
if __name__ == '__main__':
unittest.main()
| 33.594595
| 74
| 0.697506
| 369
| 0.296862
| 0
| 0
| 0
| 0
| 0
| 0
| 907
| 0.729686
|
b85d56ed4c33e772f43301cfd59b3662ccd3560a
| 6,100
|
py
|
Python
|
stream.py
|
Abhishek-Aditya-bs/Streaming-Spark-For-Machine-Learning
|
76f9c97e66d6171bc83d1183fadc30bd492422a7
|
[
"MIT"
] | 1
|
2021-12-10T13:14:53.000Z
|
2021-12-10T13:14:53.000Z
|
stream.py
|
iVishalr/SSML-spark-streaming-for-machine-learning
|
ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f
|
[
"MIT"
] | null | null | null |
stream.py
|
iVishalr/SSML-spark-streaming-for-machine-learning
|
ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
import time
import json
import pickle
import socket
import argparse
import numpy as np
from tqdm import tqdm
parser = argparse.ArgumentParser(
description='Streams a file to a Spark Streaming Context')
parser.add_argument('--file', '-f', help='File to stream', required=False,type=str, default="cifar")
parser.add_argument('--batch-size', '-b', help='Batch size',required=False, type=int, default=100)
parser.add_argument('--endless', '-e', help='Enable endless stream',required=False, type=bool, default=False)
parser.add_argument('--split','-s', help="training or test split", required=False, type=str, default='train')
parser.add_argument('--sleep','-t', help="streaming interval", required=False, type=int, default=3)
TCP_IP = "localhost"
TCP_PORT = 6100
class Dataset:
def __init__(self) -> None:
self.data = []
self.labels = []
self.epoch = 0
def data_generator(self,file,batch_size):
batch = []
with open(f"cifar/{file}","rb") as batch_file:
batch_data = pickle.load(batch_file, encoding='bytes')
self.data.append(batch_data[b'data'])
self.labels.extend(batch_data[b'labels'])
data = np.vstack(self.data)
self.data = list(map(np.ndarray.tolist, data))
for ix in range(0,(len(self.data)//batch_size)*batch_size,batch_size):
image = self.data[ix:ix+batch_size]
label = self.labels[ix:ix+batch_size]
batch.append([image,label])
self.data = self.data[ix+batch_size:]
self.labels = self.labels[ix+batch_size:]
return batch
def sendCIFARBatchFileToSpark(self,tcp_connection, input_batch_file):
pbar = tqdm(total=int((5e4//batch_size)+1)) if train_test_split=='train' else tqdm(total=int((1e4//batch_size)+1))
data_received = 0
for file in input_batch_file:
batches = self.data_generator(file,batch_size)
for ix,batch in enumerate(batches):
image,labels = batch
image = np.array(image)
received_shape = image.shape
image = list(map(np.ndarray.tolist, image))
feature_size = len(image[0])
payload = dict()
for mini_batch_index in range(len(image)):
payload[mini_batch_index] = dict()
for feature_index in range(feature_size):
payload[mini_batch_index][f'feature{feature_index}'] = image[mini_batch_index][feature_index]
payload[mini_batch_index]['label'] = labels[mini_batch_index]
send_batch = (json.dumps(payload) + '\n').encode()
try:
tcp_connection.send(send_batch)
except BrokenPipeError:
print("Either batch size is too big for the dataset or the connection was closed")
except Exception as error_message:
print(f"Exception thrown but was handled: {error_message}")
data_received+=1
pbar.update(1)
pbar.set_description(f"epoch: {self.epoch} it: {data_received} | received : {received_shape} images")
time.sleep(sleep_time)
for batch in [[self.data,self.labels]]:
image,labels = batch
image = np.array(image)
received_shape = image.shape
image = list(map(np.ndarray.tolist, image))
feature_size = len(image[0])
payload = dict()
for mini_batch_index in range(len(image)):
payload[mini_batch_index] = dict()
for feature_index in range(feature_size):
payload[mini_batch_index][f'feature{feature_index}'] = image[mini_batch_index][feature_index]
payload[mini_batch_index]['label'] = labels[mini_batch_index]
send_batch = (json.dumps(payload) + '\n').encode()
try:
tcp_connection.send(send_batch)
except BrokenPipeError:
print("Either batch size is too big for the dataset or the connection was closed")
except Exception as error_message:
print(f"Exception thrown but was handled: {error_message}")
data_received+=1
pbar.update(1)
pbar.set_description(f"epoch: {self.epoch} it: {data_received} | received : {received_shape} images")
self.data = []
self.labels = []
time.sleep(sleep_time)
pbar.pos=0
self.epoch+=1
def connectTCP(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print(f"Waiting for connection on port {TCP_PORT}...")
connection, address = s.accept()
print(f"Connected to {address}")
return connection, address
def streamCIFARDataset(self,tcp_connection, dataset_type='cifar'):
CIFAR_BATCHES = [
'data_batch_1',
'data_batch_2',
'data_batch_3',
'data_batch_4',
'data_batch_5',
'test_batch'
]
CIFAR_BATCHES = CIFAR_BATCHES[:-1] if train_test_split=='train' else [CIFAR_BATCHES[-1]]
self.sendCIFARBatchFileToSpark(tcp_connection,CIFAR_BATCHES)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
input_file = args.file
batch_size = args.batch_size
endless = args.endless
sleep_time = args.sleep
train_test_split = args.split
dataset = Dataset()
tcp_connection, _ = dataset.connectTCP()
if input_file == "cifar":
_function = dataset.streamCIFARDataset
if endless:
while True:
_function(tcp_connection, input_file)
else:
_function(tcp_connection, input_file)
tcp_connection.close()
| 40.131579
| 122
| 0.59623
| 4,770
| 0.781967
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.159508
|
b85e1207d6e09dc9d3b5821470f14d0eed8e2190
| 394
|
py
|
Python
|
subcontent/backup/python3_closure_nonlocal.py
|
fingerkc/fingerkc.github.io
|
0bfe5163ea28be3747756c8b6be64ad4f09b2fbf
|
[
"MIT"
] | 2
|
2019-06-13T07:22:22.000Z
|
2019-11-23T03:55:21.000Z
|
subcontent/backup/python3_closure_nonlocal.py
|
fingerkc/fingerkc.github.io
|
0bfe5163ea28be3747756c8b6be64ad4f09b2fbf
|
[
"MIT"
] | 1
|
2019-12-15T04:10:59.000Z
|
2019-12-15T04:10:59.000Z
|
subcontent/backup/python3_closure_nonlocal.py
|
fingerkc/fingerkc.github.io
|
0bfe5163ea28be3747756c8b6be64ad4f09b2fbf
|
[
"MIT"
] | 1
|
2019-06-24T08:17:13.000Z
|
2019-06-24T08:17:13.000Z
|
#!/usr/bin/python3
##python3 闭包 与 nonlocal
#如果在一个内部函数里,对在外部作用域(但不是在全局作用域)的变量进行引用,
#那么内部函数就被认为是闭包(closure)
def A_():
var = 0
def clo_B():
var_b = 1 # 闭包的局部变量
var = 100
print(var) # 引用外部的var , 但是不会改变var 的值
return clo_B
#clo_B是一个闭包
#nonlocal 关键字
def A_():
var = 0
def clo_B():
nonlocal var # nonlocal关键字 指定var 不是闭包的局部变量
var = var + 1 # 若 不使用nonlocal 关键字 , 则此行代码会出现错误
| 15.153846
| 50
| 0.670051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 445
| 0.720065
|
b85e66013be32836e47b0a35176f156e7e93f2e2
| 352
|
py
|
Python
|
utils/tracker.py
|
emarche/Fashion-MNIST
|
f8183e33ab7c3df673a60de3b16f2c4c979b89bb
|
[
"MIT"
] | null | null | null |
utils/tracker.py
|
emarche/Fashion-MNIST
|
f8183e33ab7c3df673a60de3b16f2c4c979b89bb
|
[
"MIT"
] | null | null | null |
utils/tracker.py
|
emarche/Fashion-MNIST
|
f8183e33ab7c3df673a60de3b16f2c4c979b89bb
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
class Tracker:
def __init__(self, seed, model_name):
self.save_tag = model_name + '_seed_' + str(seed)
self.model_save = "models/"
if not os.path.exists(self.model_save): os.makedirs(self.model_save)
def save_model(self, model):
model.save(self.model_save + self.save_tag + '.h5')
| 29.333333
| 76
| 0.661932
| 322
| 0.914773
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.0625
|
b86107cdd1d04d347d396fb2227d46e8eb33bf64
| 2,663
|
py
|
Python
|
kittycad/models/cluster.py
|
KittyCAD/kittycad.py
|
7f7460d366dbd55fce50e5faa4a032b62e4baae4
|
[
"MIT"
] | 1
|
2022-02-06T05:07:25.000Z
|
2022-02-06T05:07:25.000Z
|
kittycad/models/cluster.py
|
KittyCAD/kittycad.py
|
7f7460d366dbd55fce50e5faa4a032b62e4baae4
|
[
"MIT"
] | 7
|
2022-02-04T11:29:25.000Z
|
2022-03-07T01:37:26.000Z
|
kittycad/models/cluster.py
|
KittyCAD/kittycad.py
|
7f7460d366dbd55fce50e5faa4a032b62e4baae4
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Cluster")
@attr.s(auto_attribs=True)
class Cluster:
""" """
addr: Union[Unset, str] = UNSET
auth_timeout: Union[Unset, int] = UNSET
cluster_port: Union[Unset, int] = UNSET
name: Union[Unset, str] = UNSET
tls_timeout: Union[Unset, int] = UNSET
urls: Union[Unset, List[str]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
addr = self.addr
auth_timeout = self.auth_timeout
cluster_port = self.cluster_port
name = self.name
tls_timeout = self.tls_timeout
urls: Union[Unset, List[str]] = UNSET
if not isinstance(self.urls, Unset):
urls = self.urls
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if addr is not UNSET:
field_dict['addr'] = addr
if auth_timeout is not UNSET:
field_dict['auth_timeout'] = auth_timeout
if cluster_port is not UNSET:
field_dict['cluster_port'] = cluster_port
if name is not UNSET:
field_dict['name'] = name
if tls_timeout is not UNSET:
field_dict['tls_timeout'] = tls_timeout
if urls is not UNSET:
field_dict['urls'] = urls
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
addr = d.pop("addr", UNSET)
auth_timeout = d.pop("auth_timeout", UNSET)
cluster_port = d.pop("cluster_port", UNSET)
name = d.pop("name", UNSET)
tls_timeout = d.pop("tls_timeout", UNSET)
urls = cast(List[str], d.pop("urls", UNSET))
cluster = cls(
addr=addr,
auth_timeout=auth_timeout,
cluster_port=cluster_port,
name=name,
tls_timeout=tls_timeout,
urls=urls,
)
cluster.additional_properties = d
return cluster
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 28.945652
| 77
| 0.605332
| 2,488
| 0.934285
| 0
| 0
| 2,515
| 0.944424
| 0
| 0
| 137
| 0.051446
|
b8630346a106bda3978051caf8a5f0528b3d18fe
| 4,287
|
py
|
Python
|
Crypto.py
|
akshatsri89/Cryptogram
|
51ea1e658ecf82ea922a967299814e812da73c4e
|
[
"Apache-2.0"
] | 1
|
2021-08-10T14:09:34.000Z
|
2021-08-10T14:09:34.000Z
|
Crypto.py
|
akshatsri89/Cryptogram
|
51ea1e658ecf82ea922a967299814e812da73c4e
|
[
"Apache-2.0"
] | null | null | null |
Crypto.py
|
akshatsri89/Cryptogram
|
51ea1e658ecf82ea922a967299814e812da73c4e
|
[
"Apache-2.0"
] | null | null | null |
# import tkinter module
from tkinter import *
# import other necessery modules
import random
# Vigenère cipher for encryption and decryption
import base64
# creating root object
root = Tk()
# defining size of window
root.geometry("1200x4000")
# setting up the title of window
root.title("Message Encrypter and Decrypter")
Tops = Frame(root, width=1600, relief=SUNKEN)
Tops.pack(side=TOP)
f1 = Frame(root, width=800, relief=SUNKEN)
f1.pack(side=LEFT)
# ==============================================
lblInfo = Label(Tops, font=('helvetica', 40, 'bold', 'underline'),
text="SECRET MESSAGING",
fg="Black", bd=10, anchor='w')
lblInfo.grid(row=0, column=0)
# Initializing variables
Msg = StringVar()
key = StringVar()
mode = StringVar()
Result = StringVar()
# labels for the message
lblMsg = Label(f1, font=('arial', 16, 'bold'),
text="MESSAGE", bd=16, anchor="w")
lblMsg.grid(row=1, column=0)
# Entry box for the message
txtMsg = Entry(f1, font=('arial', 16, 'bold'),
textvariable=Msg, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtMsg.grid(row=1, column=1)
# labels for the key
lblkey = Label(f1, font=('arial', 16, 'bold'),
text="KEY (Only Integer)", bd=16, anchor="w")
lblkey.grid(row=2, column=0)
# Entry box for the key
txtkey = Entry(f1, font=('arial', 16, 'bold'),
textvariable=key, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtkey.grid(row=2, column=1)
# labels for the mode
lblmode = Label(f1, font=('arial', 16, 'bold'),
text="MODE(e for encrypt, d for decrypt)",
bd=16, anchor="w")
lblmode.grid(row=3, column=0)
# Entry box for the mode
txtmode = Entry(f1, font=('arial', 16, 'bold'),
textvariable=mode, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtmode.grid(row=3, column=1)
# labels for the result
lblResult = Label(f1, font=('arial', 16, 'bold'),
text="The Result-", bd=16, anchor="w")
lblResult.grid(row=2, column=2)
# Entry box for the result
txtResult = Entry(f1, font=('arial', 16, 'bold'),
textvariable=Result, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtResult.grid(row=2, column=3)
# Vigenère cipher
# Function to encode
def encode(key, msg):
enc = []
for i in range(len(msg)):
key_c = key[i % len(key)]
enc_c = chr((ord(msg[i]) +
ord(key_c)) % 256)
enc.append(enc_c)
print("enc:", enc)
return base64.urlsafe_b64encode("".join(enc).encode()).decode()
# Function to decode
def decode(key, enc):
dec = []
enc = base64.urlsafe_b64decode(enc).decode()
for i in range(len(enc)):
key_c = key[i % len(key)]
dec_c = chr((256 + ord(enc[i]) -
ord(key_c)) % 256)
dec.append(dec_c)
print("dec:", dec)
return "".join(dec)
def Results():
# print("Message= ", (Msg.get()))
msg = Msg.get()
k = key.get()
m = mode.get()
if (m == 'e'):
Result.set(encode(k, msg))
else:
Result.set(decode(k, msg))
# exit function
def qExit():
root.destroy()
# Function to reset the window
def Reset():
Msg.set("")
key.set("")
mode.set("")
Result.set("")
# Show message button
btnTotal = Button(f1, padx=16, pady=8, bd=16, fg="black",
font=('arial', 16, 'bold'), width=10,
text="Show Message", bg="yellow",
command=Results).grid(row=7, column=1)
# Reset button
btnReset = Button(f1, padx=16, pady=8, bd=16,
fg="black", font=('arial', 16, 'bold'),
width=10, text="Reset", bg="green",
command=Reset).grid(row=7, column=2)
# Exit button
btnExit = Button(f1, padx=16, pady=8, bd=16,
fg="black", font=('arial', 16, 'bold'),
width=10, text="Exit", bg="red",
command=qExit).grid(row=7, column=3)
# keeps window alive
root.mainloop()
| 24.497143
| 68
| 0.54397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,180
| 0.275122
|
b865512ce604a1054ccd890643255a1593208d7a
| 224
|
py
|
Python
|
bootstrap_rmsf/__init__.py
|
jeeberhardt/bootstrap_rmsf
|
1487251ffde91d34b7609aec147c0ff99fc7cded
|
[
"MIT"
] | 1
|
2021-08-06T02:31:32.000Z
|
2021-08-06T02:31:32.000Z
|
bootstrap_rmsf/__init__.py
|
jeeberhardt/bootstrap_rmsf
|
1487251ffde91d34b7609aec147c0ff99fc7cded
|
[
"MIT"
] | null | null | null |
bootstrap_rmsf/__init__.py
|
jeeberhardt/bootstrap_rmsf
|
1487251ffde91d34b7609aec147c0ff99fc7cded
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Jérôme Eberhardt 2018
# Bootstrap RMSF
# Author: Jérôme Eberhardt <qksonoe@gmail.com>
#
# License: MIT
from bootstrap_rmsf import Bootstrap_RMSF
from utils import plot_rmsf
| 18.666667
| 46
| 0.736607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.653509
|
b8658b22f0fd4c1d9dcc67d9f35f9aa1c9580dfe
| 1,221
|
py
|
Python
|
fixture/application.py
|
OSavchik/python_training
|
8e532c9f0da99e5f342467dd7bcc3a43e667daf6
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
OSavchik/python_training
|
8e532c9f0da99e5f342467dd7bcc3a43e667daf6
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
OSavchik/python_training
|
8e532c9f0da99e5f342467dd7bcc3a43e667daf6
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "Chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecoznized browse %s" % browser)
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
def get_serial_number_element_by_id(self, list_elements, id):
i = 0
for index_element in list_elements:
if index_element.id == id:
break
else:
i = i + 1
return i
| 24.918367
| 65
| 0.580672
| 1,062
| 0.869779
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.036855
|
b868a9af47b1de35f84902480574280915282a7c
| 7,601
|
py
|
Python
|
data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py
|
supernord/tools-iuc
|
9a0c41967765d120a8fc519c0c7f09cbe3a6efbe
|
[
"MIT"
] | 142
|
2015-03-13T18:08:34.000Z
|
2022-03-30T23:52:34.000Z
|
data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py
|
mtekman/tools-iuc
|
95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5
|
[
"MIT"
] | 3,402
|
2015-01-05T18:04:20.000Z
|
2022-03-30T22:09:36.000Z
|
data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py
|
willemdek11/tools-iuc
|
dc0a0cf275168c2a88ee3dc47652dd7ca1137871
|
[
"MIT"
] | 438
|
2015-01-07T20:33:59.000Z
|
2022-03-30T04:39:18.000Z
|
#!/usr/bin/env python
# Dave B.
# Uses fasta sorting functions written by Dan Blankenberg.
import json
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "fasta_indexes"
def get_id_name(params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_picard_index(data_manager_dict, fasta_filename, target_directory, dbkey, sequence_id, sequence_name, jar, data_table_name=DEFAULT_DATA_TABLE_NAME):
fasta_base_name = os.path.split(fasta_filename)[-1]
gatk_sorted_fasta_filename = os.path.join(target_directory, fasta_base_name)
shutil.copy(fasta_filename, gatk_sorted_fasta_filename)
_sort_fasta_gatk(gatk_sorted_fasta_filename)
sam_index_filename = '%s.fai' % gatk_sorted_fasta_filename
if not os.path.exists(sam_index_filename):
sam_command = ['samtools', 'faidx', gatk_sorted_fasta_filename]
_run_command(sam_command, target_directory)
args = ['java', '-jar', jar, 'R=%s' % gatk_sorted_fasta_filename, 'O=%s.dict' % sequence_id]
_run_command(args, target_directory)
data_table_entry = dict(value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name)
_add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
def _run_command(command, target_directory):
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-gatk_picard_index_builder-stderr")
proc = subprocess.Popen(args=command, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
sys.stderr.write("Error building index:\n")
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk)
sys.exit(return_code)
tmp_stderr.close()
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def _move_and_index_fasta_for_sorting(fasta_filename):
unsorted_filename = tempfile.NamedTemporaryFile().name
shutil.move(fasta_filename, unsorted_filename)
fasta_offsets = {}
with open(unsorted_filename) as unsorted_fh:
while True:
offset = unsorted_fh.tell()
line = unsorted_fh.readline()
if not line:
break
if line.startswith(">"):
line = line.split(None, 1)[0][1:]
fasta_offsets[line] = offset
current_order = [x[1] for x in sorted((x[1], x[0]) for x in fasta_offsets.items())]
return (unsorted_filename, fasta_offsets, current_order)
def _write_sorted_fasta(sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename):
with open(unsorted_fasta_filename, 'rb') as unsorted_fh, open(sorted_fasta_filename, 'wb+') as sorted_fh:
for name in sorted_names:
offset = fasta_offsets[name]
unsorted_fh.seek(offset)
sorted_fh.write(unsorted_fh.readline())
while True:
line = unsorted_fh.readline()
if not line or line.startswith(b">"):
break
sorted_fh.write(line)
def _int_to_roman(integer):
if not isinstance(integer, int):
raise TypeError("expected integer, got %s" % type(integer))
if not 0 < integer < 4000:
raise ValueError("Argument must be between 1 and 3999, got %s" % str(integer))
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for i in range(len(ints)):
count = int(integer / ints[i])
result += nums[i] * count
integer -= ints[i] * count
return result
def _sort_fasta_gatk(fasta_filename):
(unsorted_filename, fasta_offsets, current_order) = _move_and_index_fasta_for_sorting(fasta_filename)
sorted_names = list(map(str, range(1, 100))) + list(map(_int_to_roman, range(1, 100))) + ['X', 'Y', 'M']
# detect if we have chrN, or just N
has_chr = False
for chrom in sorted_names:
if "chr%s" % chrom in current_order:
has_chr = True
break
if has_chr:
sorted_names = ["chr%s" % x for x in sorted_names]
else:
sorted_names.insert(0, "MT")
sorted_names.extend(["%s_random" % x for x in sorted_names])
existing_sorted_names = []
for name in sorted_names:
# Append each chromosome only once.
if name in current_order and name not in existing_sorted_names:
existing_sorted_names.append(name)
for name in current_order:
# TODO: confirm that non-canonical names do not need to be sorted specially
if name not in existing_sorted_names:
existing_sorted_names.append(name)
if existing_sorted_names == current_order:
shutil.move(unsorted_filename, fasta_filename)
else:
_write_sorted_fasta(existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename)
def main():
parser = optparse.OptionParser()
parser.add_option('-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename')
parser.add_option('-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey')
parser.add_option('-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description')
parser.add_option('-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name')
parser.add_option('-j', '--jar', dest='jar', action='store', type="string", default=None, help='GATK .jar file')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
if options.fasta_dbkey in [None, '', '?']:
raise Exception('"%s" is not a valid dbkey. You must specify a valid dbkey.' % (options.fasta_dbkey))
sequence_id, sequence_name = get_id_name(params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description)
# build the index
build_picard_index(data_manager_dict,
options.fasta_filename,
target_directory,
options.fasta_dbkey,
sequence_id,
sequence_name,
options.jar,
data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME)
# save info to json file
with open(filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
| 40.865591
| 157
| 0.675043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,224
| 0.161031
|
b86adccb9d42d87933b32bb27aaf25b01696f8a9
| 818
|
py
|
Python
|
django_for_startups/django_customizations/drf_customizations.py
|
Alex3917/django_for_startups
|
9dda54f5777247f7367a963d668f25e797c9adf1
|
[
"MIT"
] | 102
|
2021-02-28T00:58:36.000Z
|
2022-03-30T09:29:34.000Z
|
django_for_startups/django_customizations/drf_customizations.py
|
Alex3917/django_for_startups
|
9dda54f5777247f7367a963d668f25e797c9adf1
|
[
"MIT"
] | 1
|
2021-07-11T18:45:29.000Z
|
2021-07-11T18:45:29.000Z
|
django_for_startups/django_customizations/drf_customizations.py
|
Alex3917/django_for_startups
|
9dda54f5777247f7367a963d668f25e797c9adf1
|
[
"MIT"
] | 16
|
2021-06-23T18:34:46.000Z
|
2022-03-30T09:27:34.000Z
|
# Standard Library imports
# Core Django imports
# Third-party imports
from rest_framework import permissions
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
# App imports
class BurstRateThrottle(UserRateThrottle):
scope = 'burst'
class SustainedRateThrottle(UserRateThrottle):
scope = 'sustained'
class HighAnonThrottle(AnonRateThrottle):
rate = '5000000/day'
class AccountCreation(permissions.BasePermission):
""" A user should be able to create an account without being authenticated, but only the
owner of an account should be able to access that account's data in a GET method.
"""
def has_permission(self, request, view):
if (request.method == "POST") or request.user.is_authenticated:
return True
return False
| 24.787879
| 94
| 0.734719
| 606
| 0.740831
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.376528
|
b86ccfc144647099cbf5ac1e80b91ec536893766
| 171,517
|
py
|
Python
|
python/mapCells.py
|
claraya/meTRN
|
a4e4911b26a295e22d7309d5feda026db3325885
|
[
"MIT"
] | 2
|
2019-11-18T22:54:13.000Z
|
2019-11-18T22:55:18.000Z
|
python/mapCells.py
|
claraya/meTRN
|
a4e4911b26a295e22d7309d5feda026db3325885
|
[
"MIT"
] | null | null | null |
python/mapCells.py
|
claraya/meTRN
|
a4e4911b26a295e22d7309d5feda026db3325885
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# perform cellular-resolution expression analyses!
import sys
import time
import optparse
import general
import hyper
import numpy
import math
import pickle
import pdb
import metrn
import modencode
import itertools
import os
import re
import datetime
import calendar
#import simplejson as json
from scipy.stats.stats import pearsonr
from runner import *
from scipy import stats
from network import Network
from network import export
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define functions of internal use """
""" define a function to recover cells in a time range """
def getTargetCells(inobject="", inpath="", mode="collection", timeRange=list()):
# grab cells from collection:
if mode == "collection":
# load collection cells:
cells = list()
for gene in os.listdir(inpath):
cells.extend(open(inpath + gene).read().split("\n"))
cells = general.clean(sorted(list(set(cells))))
print "Loading collection cells:", len(cells)
# grab cells from time-points:
elif mode == "time":
# load time-point cells:
cells = list()
for timePoint in os.listdir(inpath):
if int(timePoint) in timeRange:
cells += general.clean(open(inpath + timePoint).read().split("\n"))
cells = sorted(list(set(cells)))
print "Loading time-point/range cells:", len(cells)
# return collected cells:
return cells
""" define a function to construct a cell-parent relationships, and pedigree cell list """
def expressionBuilder(expressionfile, path, cutoff, minimum, metric="fraction.expression"):
# build header dict:
hd = general.build_header_dict(path + expressionfile)
# process input expression data:
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = dict(), dict(), dict(), list()
inlines = open(path + expressionfile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip().split("\t")
cell, gene, rawSignal, metricSignal = initems[hd["cell.name"]], initems[hd["gene"]], initems[hd["cell.expression"]], initems[hd[metric]]
trackedCells.append(cell)
# store expression value
if not gene in quantitation_matrix:
quantitation_matrix[gene] = dict()
quantitation_matrix[gene][cell] = float(metricSignal)
# store tracked and expressing cells:
if not gene in tracking_matrix:
expression_matrix[gene] = list()
tracking_matrix[gene] = list()
tracking_matrix[gene].append(cell)
if float(metricSignal) >= float(cutoff) and float(rawSignal) >= minimum:
expression_matrix[gene].append(cell)
trackedCells = list(set(trackedCells))
return quantitation_matrix, expression_matrix, tracking_matrix, trackedCells
""" define a function to construct a cell-parent relationships, and pedigree cell list """
def relationshipBuilder(pedigreefile, path, trackedCells=list(), lineages="complete", mechanism="simple"):
cell_dict, parent_dict = dict(), dict()
inlines = open(path + pedigreefile).readlines()
header = inlines.pop(0)
for inline in inlines:
cell, binCell, parent, binParent = inline.strip().split(",")[:4]
tissues = inline.strip().split(",")[5]
if not parent == "" and not cell == "":
if mechanism == "simple" or lineages == "complete" or (lineages == "tracked" and parent in trackedCells and cell in trackedCells):
if not parent in parent_dict:
parent_dict[parent] = list()
parent_dict[parent].append(cell)
cell_dict[cell] = parent
pedigreeCells = sorted(list(set(cell_dict.keys()).union(set(parent_dict.keys()))))
return cell_dict, parent_dict, pedigreeCells
""" define a function to generate the underlying tree of a given parent """
def treeBuilder(parent_dict, cell_dict, highlights=list(), nodeColor="#FFFFFF", lineColor="#336699", textColor="#000000", highlightColor="#CC0000"):
# set color rules:
groups = { "unknown" : textColor, "highlight" : highlightColor }
nodeColors = { "unknown" : nodeColor, "highlight" : highlightColor }
lineColors = { "unknown" : lineColor, "highlight" : highlightColor }
textColors = { "unknown" : textColor, "highlight" : highlightColor }
# initialize tree:
tree = {}
for child in cell_dict:
parent = cell_dict[child]
# determine whether to
pkey, ckey = "unknown", "unknown"
if parent in highlights:
pkey = "highlight"
if child in highlights:
ckey = "highlight"
# make an instance of a class for the parent if neccesary:
if not tree.has_key(parent):
tree[parent] = {'name':parent,'group':groups[pkey],'nodeColor':nodeColors[pkey],'lineColor':lineColors[pkey],'textColor':textColors[pkey],'children':[]}
# make an instance of a class for the child if neccesary:
if not tree.has_key(child):
tree[child] = {'name':child,'group':groups[ckey],'nodeColor':nodeColors[ckey],'lineColor':lineColors[ckey],'textColor':textColors[ckey],'children':[]}
# and child object to parent if necesary:
if not tree[child] in tree[parent]['children']:
tree[parent]['children'].append(tree[child])
return tree
""" define a function to generate the list of cells that are parents to a given cell """
def ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list(), sort=True):
if not cell in ascendants:
ascendants.append(cell)
if cell in cell_dict:
parent = cell_dict[cell]
ascendants.append(parent)
ascendants = ascendantsCollector(parent, parent_dict, cell_dict, ascendants, sort=sort)
if sort:
return sorted(list(set(ascendants)))
else:
return ascendants
""" define a function to generate the list of cells that are progeny to a given parent """
def descendantsCollector(parent, parent_dict, cell_dict, descendants=list(), sort=True):
if not parent in descendants:
descendants.append(parent)
if parent in parent_dict:
for cell in parent_dict[parent]:
descendants.append(cell)
descendants = descendantsCollector(cell, parent_dict, cell_dict, descendants, sort=sort)
if sort:
return sorted(list(set(descendants)))
else:
return descendants
""" define a function to generate the list of cells that are progeny to a given parent (using combinations function) """
def lineageGenerator(parent, parent_dict, cell_dict):
descendants = descendantsCollector(parent, parent_dict, cell_dict, descendants=list())
gList = list()
for r in range(1, len(descendants)+1):
for gCells in itertools.combinations(descendants, r):
process = True
gCells = list(gCells)
for gCell in gCells:
if gCell != parent:
if not cell_dict[gCell] in gCells:
process = False
if process:
gList.append(",".join(sorted(gCells)))
return gList
""" define a function to generate the list of cells that are progeny to a given parent (using lineage growth) """
def lineageBuilder(parent, parent_dict, cell_dict, limit="OFF", descendants="ON"):
mList = [parent]
for mCells in mList:
aCells, bCells, xCells, exit = str(mCells), str(mCells), str(mCells), False
for mCell in mCells.split(","):
if mCell in parent_dict and len(parent_dict[mCell]) == 2:
aCell, bCell = parent_dict[mCell]
if not aCell in aCells.split(","):
aCells = ",".join(sorted(mCells.split(",") + [aCell]))
if not bCell in bCells.split(","):
bCells = ",".join(sorted(mCells.split(",") + [bCell]))
if not aCell in xCells.split(",") and not bCell in xCells.split(","):
xCells = ",".join(sorted(mCells.split(",") + [aCell, bCell]))
if not aCells in mList:
mList.append(aCells)
if not bCells in mList:
mList.append(bCells)
if not xCells in mList:
mList.append(xCells)
if limit != "OFF" and len(mList) >= limit:
if descendants == "ON":
aCellx = sorted(list(set(mCells.split(",") + descendantsCollector(aCell, parent_dict, cell_dict, descendants=list()))))
bCellx = sorted(list(set(mCells.split(",") + descendantsCollector(bCell, parent_dict, cell_dict, descendants=list()))))
xCellx = sorted(list(set(aCellx).union(set(bCellx))))
aCellx = ",".join(aCellx)
bCellx = ",".join(bCellx)
xCellx = ",".join(xCellx)
if not aCellx in mList:
mList.append(aCellx)
if not bCellx in mList:
mList.append(bCellx)
if not xCellx in mList:
mList.append(xCellx)
exit = True
if exit:
break
return sorted(mList)
""" define a function to generate lists of related-cells from a given set of of cells """
def lineageCollector(cells, parent_dict, cell_dict, siblings="ON"):
collections, parent_tree, cell_tree = list(), dict(), dict()
#ascendants = ascendantsCollector(descendant, parent_tree, cell_tree, ascendants=list())
#descendants = descendantsCollector(parent, parent_dict, cell_dict, descendants=list())
print len(cells), cells
for cell in sorted(cells):
found, relatives = False, [cell]
if cell in cell_dict:
relatives.append(cell_dict[cell])
if cell in parent_dict:
relatives.extend(parent_dict[cell])
if siblings == "ON" and cell in cell_dict:
relatives.extend(parent_dict[cell_dict[cell]])
r, relatives = 0, list(set(relatives).intersection(set(cells)))
print cell, relatives, "<-- relatives"
updated = list()
for collection in collections:
if set(relatives).intersection(set(collection)):
print collection, "<-- collection"
collection.extend(relatives)
collection = list(set(collection))
print collection, "<-- updated"
r += 1
pdb.set_trace()
updated.append(collection)
if r == 0:
updated.append(relatives)
collections = updated
return collections
""" define a function to calculate the number of possible subsets """
def combinationCalculator(n, R):
combinations = 0
for r in range(1,R):
combinations += math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return combinations
""" define a function to calculate the number of divisions between two cells """
def divisionCalculator(aCell, aParent, parent_dict, cell_dict):
divisions = 0
while aCell in cell_dict and aCell != aParent:
if cell_dict[aCell] == aParent:
divisions += 1
break
else:
aCell = cell_dict[aCell]
divisions += 1
return divisions
""" define a function that calculates the lineage distance between two cells """
def lineageDistance(aCell, bCell, parent_dict, cell_dict):
aParents = ascendantsCollector(aCell, parent_dict, cell_dict)
bParents = ascendantsCollector(bCell, parent_dict, cell_dict)
xParents = set(aParents).intersection(set(bParents))
xDistances = dict()
#print len(xParents), aCell, bCell, ":", ", ".join(xParents)
for xParent in xParents:
aDistance = divisionCalculator(aCell, xParent, parent_dict, cell_dict)
bDistance = divisionCalculator(bCell, xParent, parent_dict, cell_dict)
xDistances[xParent] = aDistance + bDistance
xParents = general.valuesort(xDistances)
distance, ancestor = xDistances[xParents[0]], xParents[0]
return distance, ancestor
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action="store", type="string", dest="path", help="Path from script to files")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Target organism for operations...", default="OFF")
parser.add_option("--mode", action="store", type="string", dest="mode", help="Operation modes: import, map, or other...")
parser.add_option("--peaks", action="store", type="string", dest="peaks", help="Peaks set to be used.", default="OFF")
parser.add_option("--infile", action="store", type="string", dest="infile", help="Input file for abundance representation")
parser.add_option("--nuclear", action = "store", type = "string", dest = "nuclear", help = "Peaks are only nuclear?", default="ON")
parser.add_option("--expression", action="store", type="string", dest="expression", help="Input expression file for abundance representation", default="OFF")
parser.add_option("--pedigree", action="store", type="string", dest="pedigree", help="Input pedigree file", default="OFF")
parser.add_option("--mapping", action="store", type="string", dest="mapping", help="Input mapping file; associates tissue labels to more generic terms!", default="OFF")
parser.add_option("--tissues", action="store", type="string", dest="tissues", help="Input tissues file", default="OFF")
parser.add_option("--times", action="store", type="string", dest="times", help="Input cell times file", default="OFF")
parser.add_option("--name", action="store", type="string", dest="name", help="Output file name", default="")
parser.add_option("--nametag", action="store", type="string", dest="nametag", help="Output file name addition tag", default="")
parser.add_option("--collection", action="store", type="string", dest="collection", help="Cell collection subset name", default="OFF")
parser.add_option("--technique", action = "store", type = "string", dest = "technique", help = "What kind of matrix should I build? binary, fraction, or normal", default="binary")
parser.add_option("--neurons", action="store", type="string", dest="neurons", help="Neurons to be used for 'collection' analysis...", default="OFF")
parser.add_option("--factors", action="store", type="string", dest="factors", help="Infer factors (OFF) or load from file?", default="OFF")
parser.add_option("--measure", action="store", type="string", dest="measure", help="Maximum (cells) or mean", default="avg.expression")
parser.add_option("--fraction", action="store", type="float", dest="fraction", help="Fractional expression cutoff", default=0.1)
parser.add_option("--minimum", action="store", type="float", dest="minimum", help="Minimum raw expression cutoff", default=2000)
parser.add_option("--inherit", action="store", type="string", dest="inherit", help="Signal inheritance policy: 'max' or 'last' of ancestor expression signals...", default="last")
parser.add_option("--overlap", action="store", type="float", dest="overlap", help="Cellular overlap cutoff", default=0.75)
parser.add_option("--pvalue", action="store", type="float", dest="pvalue", help="Significance cutoff", default=0.01)
parser.add_option("--header", action="store", type="string", dest="header", help="Is there a header?", default="OFF")
parser.add_option("--format", action="store", type="string", dest="format", help="How should formatting be done?", default="bed")
parser.add_option("--reference", action="store", type="string", dest="reference", help="Gene-coordinate reference file", default="in2shape_ce_wormbased_COM_gx.bed")
parser.add_option("--up", action = "store", type = "int", dest = "up", help = "Upstream space", default=0)
parser.add_option("--dn", action = "store", type = "int", dest = "dn", help = "Downstream space", default=0)
parser.add_option("--method", action="store", type="string", dest="method", help="Should descendant cells or descendant lineages be examined?", default="lineages")
parser.add_option("--cells", action="store", type="string", dest="cells", help="Reduce lineage cells to tracked cells (tracked) or use complete lineage cells (complete)?", default="tracked")
parser.add_option("--lineages", action="store", type="string", dest="lineages", help="Reduce lineage tree to tracked cells (tracked) or use complete lineage tree (complete)?", default="tracked")
parser.add_option("--descendants", action="store", type="string", dest="descendants", help="Apply descendants cutoff?", default="OFF")
parser.add_option("--ascendants", action="store", type="string", dest="ascendants", help="Apply ascendants cutoff?", default="OFF")
parser.add_option("--extend", action="store", type="string", dest="extend", help="Extend to include 0 signal expression values for cells not measured?", default="OFF")
parser.add_option("--overwrite", action="store", type="string", dest="overwrite", help="Overwrite outputs?", default="OFF")
parser.add_option("--parameters", action="store", type="string", dest="parameters", help="Optional parameters...", default="OFF")
parser.add_option("--limit", action="store", type="string", dest="limit", help="Limit on lineage expansion? Numeric integer.", default="OFF")
parser.add_option("--query", action="store", type="string", dest="query", help="Query collections of cells whose enrichment will be searched in target cells", default="OFF")
parser.add_option("--source", action="store", type="string", dest="source", help="File source for inputs...", default="OFF")
parser.add_option("--target", action="store", type="string", dest="target", help="Target collections of cells in which enrichment is searched for", default="OFF")
parser.add_option("--domain", action="store", type="string", dest="domain", help="Domain of co-associations for hybrid-type analyses", default="OFF")
parser.add_option("--A", action = "store", type = "string", dest = "a", help = "Paths to files of interest", default="OFF")
parser.add_option("--B", action = "store", type = "string", dest = "b", help = "Files to be hybridized", default="OFF")
parser.add_option("--indexes", action = "store", type = "string", dest = "indexes", help = "Indexes for matrix construction...", default="OFF")
parser.add_option("--values", action = "store", type = "string", dest = "values", help = "Values for matrix construction...", default="OFF")
parser.add_option("--contexts", action = "store", type = "string", dest = "contexts", help = "What contexts of development should I track?", default="OFF")
parser.add_option("--exclude", action="store", type="string", dest="exclude", help="Are there items that should be excluded?", default="")
parser.add_option("--start", action = "store", type = "int", dest = "start", help = "Start development time for cell search", default=1)
parser.add_option("--stop", action = "store", type = "int", dest = "stop", help = "End development time for cell search", default=250)
parser.add_option("--step", action = "store", type = "int", dest = "step", help = "Step size", default=1)
parser.add_option("--total", action = "store", type = "int", dest = "total", help = "Total simulations (indexes) for 'master' operations ", default=1000)
parser.add_option("--threads", action = "store", type = "int", dest = "threads", help = "Parallel processing threads", default=1)
parser.add_option("--chunks", action = "store", type = "int", dest = "chunks", help = "", default=100)
parser.add_option("--module", action = "store", type = "string", dest = "module", help = "", default="md1")
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "Qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "Job name for cluster", default="OFF")
parser.add_option("--copy", action = "store", type = "string", dest = "copy", help = "Copy simulated peaks to analysis folder?", default="OFF")
parser.add_option("--tag", action = "store", type = "string", dest = "tag", help = "Add tag to TFBS?", default="")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
qsubpath = path_dict["qsub"]
coassociationspath = path_dict["coassociations"]
bindingpath = path_dict["binding"]
neuronspath = path_dict["neurons"]
cellspath = path_dict["cells"]
# standardize paths for analysis:
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
#organismIGV = "ce6"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
#organismIGV = "ce6"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
#organismIGV = "ce6"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
#organismIGV = "dm5"
# specify genome size file:
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
# load gene ID dictionaries:
id2name_dict, name2id_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], "Sequence Name (Gene)", "Gene Public Name", mode="label", header=True, idUpper=True, nameUpper=True)
# update peaks path:
peakspath = peakspath + option.peaks + "/"
# define input/output folders:
expressionpath = cellspath + "expression/"
correctionpath = cellspath + "correction/"
lineagepath = cellspath + "lineage/"
bindingpath = cellspath + "peaks/"
overlappath = cellspath + "overlap/"
cellsetpath = cellspath + "cellset/"
genesetpath = cellspath + "geneset/"
reportspath = cellspath + "reports/"
comparepath = cellspath + "compare/"
matrixpath = cellspath + "matrix/"
tissuespath = cellspath + "tissues/"
distancepath = cellspath + "distance/"
hybridpath = cellspath + "hybrid/"
dynamicspath = cellspath + "dynamics/"
cubismpath = cellspath + "cubism/"
timepath = cellspath + "time/"
cellnotationspath = cellspath + "annotations/"
general.pathGenerator(expressionpath)
general.pathGenerator(correctionpath)
general.pathGenerator(lineagepath)
general.pathGenerator(bindingpath)
general.pathGenerator(overlappath)
general.pathGenerator(cellsetpath)
general.pathGenerator(genesetpath)
general.pathGenerator(reportspath)
general.pathGenerator(comparepath)
general.pathGenerator(matrixpath)
general.pathGenerator(tissuespath)
general.pathGenerator(distancepath)
general.pathGenerator(timepath)
general.pathGenerator(hybridpath)
general.pathGenerator(dynamicspath)
general.pathGenerator(cubismpath)
general.pathGenerator(cellnotationspath)
# generate expression flag:
if option.measure == "max.expression":
expression_flag = "maxCel_"
elif option.measure == "avg.expression":
expression_flag = "avgExp_"
# check that the index range is coherent:
if option.stop > option.total:
print
print "Error: Range exceeded! Stop index is larger than total."
print
return
# master mode:
if "master" in option.mode:
# capture master mode:
master, mode = option.mode.split(":")
# prepare for qsub:
bash_path = str(option.path + "/data/cells/runs/").replace("//","/")
bash_base = "_".join([mode, option.peaks, option.name]) + "-M"
qsub_base = "_".join([mode, option.peaks, option.name])
general.pathGenerator(bash_path)
if option.qsub != "OFF":
qsub_header = open(qsubpath + option.qsub).read()
qsub = True
else:
qsub_header = ""
qsub = False
if option.job == "QSUB":
qsub_header = qsub_header.replace("qsubRunner", "qsub-" + qsub_base)
elif option.job != "OFF":
qsub_header = qsub_header.replace("qsubRunner", "qsub-" + option.job)
bash_base = option.job + "-M"
# update server path:
if option.qsub != "OFF":
option.path = serverPath(option.path)
# prepare slave modules:
m, steps, modules, commands, sequences, chunks, start, complete = 1, 0, list(), list(), list(), option.chunks, option.start, False
for index in range(option.start, option.stop+1, option.step):
run = "rn" + general.indexTag(index, option.total)
steps += 1
# cellular peak generation mode:
if mode == "cell.peaks":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --expression <<EXPRESSION>> --collection <<COLLECTION>> --times <<TIMES>> --fraction <<FRACTION>> --minimum <<MINIMUM>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<EXPRESSION>>", option.expression)
command = command.replace("<<COLLECTION>>", option.collection)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<FRACTION>>", str(option.fraction))
command = command.replace("<<MINIMUM>>", str(option.minimum))
command = command.replace("<<NAME>>", option.name + general.indexTag(index, option.total))
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# cellular peak generation mode:
if mode == "cell.annotation":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --infile <<INFILE>> --collection <<COLLECTION>> --times <<TIMES>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<INFILE>>", option.infile)
command = command.replace("<<COLLECTION>>", option.collection)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<NAME>>", option.name + general.indexTag(index, option.total) + option.nametag)
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# cellular overlap mode:
if mode == "cell.overlap":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --expression <<EXPRESSION>> --collection <<COLLECTION>> --times <<TIMES>> --fraction <<FRACTION>> --minimum <<MINIMUM>> --extend <<EXTEND>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<EXPRESSION>>", option.expression)
command = command.replace("<<COLLECTION>>", option.collection + general.indexTag(index, option.total) + option.nametag)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<NAME>>", option.name)
command = command.replace("<<FRACTION>>", str(option.fraction))
command = command.replace("<<MINIMUM>>", str(option.minimum))
command = command.replace("<<EXTEND>>", str(option.extend))
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# coassociations hybrid mode:
if mode == "cell.hybrid":
collection = option.collection + general.indexTag(index, option.total) + option.nametag
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --A <<A>> --B <<B>> --indexes <<INDEXES>> --values <<VALUES>> --contexts <<CONTEXTS>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<A>>", option.a)
command = command.replace("<<B>>", collection + "/mapcells_" + collection + "_matrix_overlap")
command = command.replace("<<INDEXES>>", option.indexes)
command = command.replace("<<VALUES>>", option.values)
command = command.replace("<<CONTEXTS>>", option.contexts)
# is it time to export a chunk?
if index-start+option.step == chunks:
# update start, modules, commands, and module count (m):
start = index + option.step
commands.append(command)
modules.append(commands)
commands = list()
complete = True
m += 1
# store whether the most recent index/command has been stored:
else:
complete = False
# update if there are additional commands:
if not complete:
commands.append(command)
modules.append(commands)
m += 1
# launch commands:
print
print "Launching comparisons:", len(modules)
#for module in modules:
# for command in module:
# print command
runCommands(modules, threads=option.threads, mode="module.run", run_mode="verbose", run_path=bash_path, run_base=bash_base, record=True, qsub_header=qsub_header, qsub=qsub)
print "Analyses performed:", len(modules)
print
# filter cells :
elif option.mode == "filter":
# load cells to filter:
filterCells = open(path_dict[option.source] + option.target).read().strip().split("\n")
# generate output file:
f_output = open(path_dict[option.source] + option.name, "w")
# process input lines:
f, k = 0, 0
inlines = open(path_dict[option.source] + option.infile).readlines()
for inline in inlines:
process = True
items = inline.strip().split(",")
for item in items:
if item in filterCells:
process = False
f += 1
if process:
print >>f_output, inline.strip()
k += 1
print
print "Input lines:", len(inlines)
print "Output lines:", k, "(" + str(f) + " filtered)"
print
# close output:
f_output.close()
# simplify cell annotations :
elif option.mode == "simply":
# generate output file:
f_output = open(path_dict[option.source] + option.name, "w")
# process input lines:
f, k = 0, 0
inlines = open(path_dict[option.source] + option.infile).read().strip().replace("\r","\n").split("\n")
for inline in inlines:
process = True
if "cell_mapping" in option.infile:
regExp, original, updated = inline.strip().split(",")
if updated == "":
annotation = str(original)
else:
annotation = str(updated)
print >>f_output, ",".join([regExp,annotation])
k += 1
print
print "Input lines:", len(inlines)
print "Output lines:", k, "(" + str(f) + " simplified)"
print
# close output:
f_output.close()
# robustness analysis mode:
elif option.mode == "robust":
import itertools
print
print "Loading input series data..."
signalDict, replicateDict = dict(), dict()
inlines = open(extraspath + option.infile).read().replace("\r","\n").split("\n")
columnDict = dict()
inline, index = inlines.pop(0), 0
for column in inline.strip().split(","):
columnDict[column] = index
index += 1
for inline in inlines:
valueDict, initems = dict(), inline.strip().split(",")
if initems != [""]:
for column in columnDict:
valueDict[column] = initems[columnDict[column]]
gene, series, cell, value = valueDict["Gene"], valueDict["Series"], valueDict["Cell"], valueDict["Express"]
if not gene in signalDict:
signalDict[gene] = dict()
if not cell in signalDict[gene]:
signalDict[gene][cell] = dict()
signalDict[gene][cell][series] = value
if not gene in replicateDict:
replicateDict[gene] = list()
replicateDict[gene].append(series)
replicateDict[gene] = sorted(list(set(replicateDict[gene])))
# define output file:
f_output = open(expressionpath + "mapcells_" + option.mode + "_" + option.infile.replace(".csv",".txt"), "w")
s_output = open(expressionpath + "mapcells_" + option.mode + "_" + option.infile.replace(".csv",".sum"), "w")
print >>f_output, "\t".join(["gene","series.count","i","j","cells","pearson.correlation","pearson.pvalue"])
print >>s_output, "\t".join(["series.count", "gene.count"])
print "Scoring replicate correlations .."
countDict = dict()
for gene in signalDict:
if not len(replicateDict[gene]) in countDict:
countDict[len(replicateDict[gene])] = list()
countDict[len(replicateDict[gene])].append(gene)
if len(replicateDict[gene]) > 1:
#print gene, len(replicateDict[gene])
for (i, j) in itertools.combinations(replicateDict[gene], 2):
iValues, jValues = list(), list()
for cell in signalDict[gene]:
if i in signalDict[gene][cell] and j in signalDict[gene][cell]:
iValues.append(float(signalDict[gene][cell][i]))
jValues.append(float(signalDict[gene][cell][j]))
correlation, corPvalue = pearsonr(iValues, jValues)
output = [gene, len(replicateDict[gene]), i, j, len(iValues), correlation, corPvalue]
print >>f_output, "\t".join(map(str, output))
#pdb.set_trace()
for count in sorted(countDict.keys()):
print >>s_output, "\t".join(map(str, [count, len(countDict[count])]))
# close output file:
f_output.close()
s_output.close()
print
# fillin mode:
elif option.mode == "fillin":
print
print "Loading annotation information..."
annotationDict = general.build2(extraspath + option.infile, id_column="lineage", split=",")
print "Checking parental annotation..."
missingCells = list()
for cell in annotationDict:
parent = cell[:len(cell)-1]
if not parent in annotationDict:
if not parent in missingCells:
missingCells.append(parent)
print parent, cell
print
# import mode:
elif option.mode == "import":
# Cell annotations are cell-type and tissue-type (in the new Murray version):
# specificDict: cell > cell-type
# generalDict: cell > tissue-type
# construct tissue dictionary (if necessary):
if option.tissues != "OFF":
print
print "Loading general and specific tissue information..."
specificDict = general.build2(extraspath + option.tissues, i="lineage", x="cell", mode="values", split=",")
specificTotal = specificDict.values()
generalDict = general.build2(extraspath + option.tissues, i="lineage", x="tissue", mode="values", split=",")
generalTotal = generalDict.values()
print "Generating tissue classes..."
classification = {
"rectal" : "excretory",
"na" : "other"
}
classDict, classTotal, classMissing = dict(), list(), 0
for cell in generalDict:
generalTissue = generalDict[cell]
generalHits, classHits = list(), list()
if generalTissue == "g":
classTissue = "neuron/glial"
generalHits.append(generalTissue)
classHits.append(classTissue)
else:
for classTag in classification:
if classTag in generalTissue:
classTissue = classification[classTag]
generalHits.append(generalTissue)
classHits.append(classTissue)
generalHits, classHits = list(set(generalHits)), list(set(classHits))
#print generalTissue, ":", ", ".join(classHits)
if len(classHits) > 1:
classTissue = "mixed"
elif len(classHits) == 1:
classTissue = classHits[0]
elif len(classHits) == 0:
classTissue = generalTissue
classMissing += 1
classDict[cell] = classTissue
classTotal.append(classTissue)
classTotal = sorted(list(set(classTotal)))
print
print "Specific tissue terms:", len(set(specificDict.values()))
print "General tissue terms:", len(set(generalDict.values()))
generalCounts = dict()
for cell in generalDict:
generalTissue = generalDict[cell]
if not generalTissue in generalCounts:
generalCounts[generalTissue] = 0
generalCounts[generalTissue] += 1
generalTissues = general.valuesort(generalCounts)
generalTissues.reverse()
for generalTissue in generalTissues:
print "\t" + generalTissue, ":", generalCounts[generalTissue]
print
print "Class tissue terms:", len(set(classDict.values()))
classCounts = dict()
for cell in classDict:
classTissue = classDict[cell]
if not classTissue in classCounts:
classCounts[classTissue] = 0
classCounts[classTissue] += 1
classTissues = general.valuesort(classCounts)
classTissues.reverse()
for classTissue in classTissues:
print "\t" + classTissue, ":", classCounts[classTissue]
#pdb.set_trace()
# prepare expression matrixes:
series2cell_dict, gene2cell_dict, cell2gene_dict, gene2cell_list, allCells = dict(), dict(), dict(), dict(), list()
# load expression data per series:
print
print "Loading cellular-expression data..."
inlines = open(extraspath + option.infile).read().replace("\r","\n").split("\n")
inheader = inlines.pop(0)
for inline in inlines:
if not inline == "":
series, cell, gene, expression = inline.strip().split(",")
gene = gene.upper()
if not gene in option.exclude.split(","):
if not cell in cell2gene_dict:
cell2gene_dict[cell] = dict()
if not gene in cell2gene_dict[cell]:
cell2gene_dict[cell][gene] = dict()
if not gene in gene2cell_dict:
gene2cell_dict[gene] = dict()
gene2cell_list[gene] = list()
if not cell in gene2cell_dict[gene]:
gene2cell_dict[gene][cell] = dict()
if not series in series2cell_dict:
series2cell_dict[series] = dict()
gene2cell_dict[gene][cell][series] = float(expression)
cell2gene_dict[cell][gene][series] = float(expression)
series2cell_dict[series][cell] = float(expression)
if not cell in gene2cell_list[gene]:
gene2cell_list[gene].append(cell)
if not cell in allCells:
allCells.append(cell)
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
# construct tissue dictionary (if necessary):
if option.tissues != "OFF":
print
print "Expanding cell tissue information..."
matchDict = { "specific":dict(), "general":dict(), "class":dict() }
matchExpansion, matchTotal, matchMissing = list(), 0, 0
for cell in pedigreeCells:
if cell in generalDict and generalDict[cell] != "na":
matchDict["specific"][cell] = specificDict[cell]
matchDict["general"][cell] = generalDict[cell]
matchDict["class"][cell] = classDict[cell]
else:
# find most closely-related, annotated cell (and use its associated tissue annotation):
distanceDict = dict()
queryDict, matchTissues = dict(), list(),
ancestorCells, descendantCells, matchCells, queryCells = list(), list(), list(), list()
for queryCell in generalDict:
relative = False
if cell == queryCell[:len(cell)]:
descendantCells.append(queryCell)
relative = True
if queryCell == cell[:len(queryCell)]:
ancestorCells.append(queryCell)
relative = True
if relative:
distance = abs(len(cell)-len(queryCell))
if not distance in distanceDict:
distanceDict[distance] = list()
distanceDict[distance].append(queryCell)
# determine which cells to obtain the annotations from:
if descendantCells != list():
queryCells = descendantCells
else:
queryCells = descendantCells + ancestorCells
# find and weigh, most-related tissues:
specificMatch, generalMatch, classMatch = dict(), dict(), dict()
for distance in sorted(distanceDict.keys()):
if distance != 0:
for distanceCell in distanceDict[distance]:
if distanceCell in queryCells:
specificTissue = specificDict[distanceCell]
generalTissue = generalDict[distanceCell]
classTissue = classDict[distanceCell]
if not specificTissue in specificMatch:
specificMatch[specificTissue] = 0
if not generalTissue in generalMatch:
generalMatch[generalTissue] = 0
if not classTissue in classMatch:
classMatch[classTissue] = 0
specificMatch[specificTissue] += float(1)/distance
generalMatch[generalTissue] += float(1)/distance
classMatch[classTissue] += float(1)/distance
# Note: This section controls whether tissue annotations are obtained from
# all related cells (parents and ancestors) or just subsets of these...
""" define a function that returns the highest-likelihood tissue """
def matchFunction(cell, matchDict, queryCells, verbose="OFF"):
matchTissues = general.valuesort(matchDict)
matchTissues.reverse()
printFlag = False
if len(matchTissues) > 1 and verbose == "ON":
printFlag = True
print cell, len(matchTissues), matchTissues, queryCells
for matchTissue in matchTissues:
print matchTissue, ":", matchDict[matchTissue]
# Filter tissues associated with father/daughter cells:
if len(matchTissues) > 1:
matchTissues = general.clean(matchTissues, "death")
if len(matchTissues) > 1:
matchTissues = general.clean(matchTissues, "other")
# Generate and store specific tissue label for cell:
if len(matchTissues) == 0:
matchTissue = "other"
else:
matchTissue = matchTissues[0]
if printFlag and verbose == "ON":
print ">", matchTissue
print
# return highest likelihood tissue match and ranked tissues:
return matchTissue, matchTissues
# assign highest-scoring tissue types:
#specificDict[cell], specificTissues = matchFunction(cell, specificMatch, queryCells, verbose="OFF")
#generalDict[cell], generalTissues = matchFunction(cell, generalMatch, queryCells, verbose="OFF")
#classDict[cell], classTissues = matchFunction(cell, classMatch, queryCells, verbose="OFF")
matchDict["specific"][cell], specificMatches = matchFunction(cell, specificMatch, queryCells, verbose="OFF")
matchDict["general"][cell], generalMatches = matchFunction(cell, generalMatch, queryCells, verbose="OFF")
matchDict["class"][cell], classMatches = matchFunction(cell, classMatch, queryCells, verbose="OFF")
# update tissue counts:
matchTotal += 1
if matchDict["class"][cell] == "na":
matchMissing += 1
# Update/expand cell-tissue dictionary:
matchTissue = matchDict["specific"][cell]
if not matchTissue in matchExpansion:
matchExpansion.append(matchTissue)
# record counts for each type of tissue:
specificCounts, generalCounts, classCounts = dict(), dict(), dict()
for cell in specificDict:
specificTissue = specificDict[cell]
generalTissue = generalDict[cell]
classTissue = classDict[cell]
if not specificTissue in specificCounts:
specificCounts[specificTissue] = 0
specificCounts[specificTissue] += 1
if not generalTissue in generalCounts:
generalCounts[generalTissue] = 0
generalCounts[generalTissue] += 1
if not classTissue in classCounts:
classCounts[classTissue] = 0
classCounts[classTissue] += 1
#print
#print "Specific tissue terms:", len(set(specificDict.values()))
#specificTissues = general.valuesort(specificCounts)
#specificTissues.reverse()
#for specificTissue in specificTissues:
# print "\t" + specificTissue, ":", specificCounts[specificTissue]
print
print "General tissue terms:", len(set(generalDict.values()))
generalTissues = general.valuesort(generalCounts)
generalTissues.reverse()
for generalTissue in generalTissues:
print "\t" + generalTissue, ":", generalCounts[generalTissue]
print
print "Class tissue terms:", len(set(classDict.values()))
classTissues = general.valuesort(classCounts)
classTissues.reverse()
for classTissue in classTissues:
print "\t" + classTissue, ":", classCounts[classTissue]
print
print "Tissue information expanded by:", len(matchExpansion)
print "Tissue information expansion terms:", ", ".join(list(sorted(matchExpansion)))
#pdb.set_trace()
# calculate unique expression values for each gene/cell combination:
print
print "Generating per gene/cell expression values..."
matrix, expression, expressing = dict(), dict(), dict()
for gene in gene2cell_dict:
for cell in gene2cell_list[gene]:
values, maxSeries, maxValue = list(), "NA", 0
for series in gene2cell_dict[gene][cell]:
values.append(gene2cell_dict[gene][cell][series])
if gene2cell_dict[gene][cell][series] >= maxValue:
maxSeries, maxValue = series, gene2cell_dict[gene][cell][series]
if not gene in matrix:
matrix[gene] = dict()
expression[gene] = dict()
matrix[gene][cell] = [max(values), numpy.mean(values), numpy.median(values), numpy.std(values), len(gene2cell_dict[gene][cell]), ",".join(sorted(gene2cell_dict[gene][cell].keys())), maxSeries]
if option.measure == "max.expression":
expression[gene][cell] = max(values)
elif option.measure == "avg.expression":
expression[gene][cell] = numpy.mean(values)
# calculate expression peaks...
print "Generating per gene/cell expression statistics..."
for gene in matrix:
# find peak expression:
peakCell, peakValue = "", 0
for cell in matrix[gene]:
maxValue, meanValue, medianValue, stdValue, seriesCount, seriesIDs, maxSeries = matrix[gene][cell]
cellValue = expression[gene][cell]
if cellValue > peakValue:
peakCell, peakValue = cell, cellValue
# calculate fractional expression, cell ranks, and add cells expressing the protein (above cutoff):
cellRanks = general.valuesort(expression[gene])
cellRanks.reverse()
for cell in matrix[gene]:
maxValue, meanValue, medianValue, stdValue, seriesCount, seriesIDs, maxSeries = matrix[gene][cell]
cellValue = expression[gene][cell]
fracValue = float(cellValue)/peakValue
cellRank = cellRanks.index(cell) + 1
if not gene in expressing:
expressing[gene] = list()
if fracValue >= option.fraction and cellValue >= option.minimum:
expressing[gene].append(cell)
matrix[gene][cell] = [cellValue, peakValue, fracValue, cellRank, maxValue, meanValue, medianValue, stdValue, seriesCount, seriesIDs, maxSeries]
# define the ascendants cutoff:
print
print "Defining minimum ascendants across experiments..."
cutAscendants = 0
for gene in matrix:
minAscendants, maxAscendants = 1000, 0
for cell in matrix[gene]:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
if len(ascendants) < minAscendants:
minAscendants = len(ascendants)
minCell = cell
if len(ascendants) > maxAscendants:
maxAscendants = len(ascendants)
maxCell = cell
if minAscendants > cutAscendants:
cutAscendants = minAscendants
# define the set of cells tracked in target experiments:
print "Defining cells focused: strict list of cells assayed in target experiments..."
focusedCells = list()
for gene in option.target.split(","):
if focusedCells == list():
focusedCells = gene2cell_list[gene]
else:
focusedCells = set(focusedCells).intersection(set(gene2cell_list[gene]))
# define the set of cells tracked in all experiments:
print "Defining cells tracked: strict list of cells assayed in all experiments..."
trackedCells = list()
for gene in gene2cell_dict:
if trackedCells == list():
trackedCells = gene2cell_list[gene]
else:
trackedCells = set(trackedCells).intersection(set(gene2cell_list[gene]))
# define the set of ancestor or tracked cells:
print "Defining cells started: parent-inclusive list of cells tracked in all experiments..."
startedCells = list()
for cell in pedigreeCells:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
if cell in trackedCells or len(ascendants) < int(option.ascendants):
startedCells.append(cell)
#if cell == "ABalaaaal":
# print cell, specificDict[cell], generalDict[cell], classDict[cell]
# pdb.set_trace()
print "Ascendants cutoff:", cutAscendants
# define output files:
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
focusedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_focused"
summaryfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_summary"
tissuesfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tissues"
# define cellular expression header:
expressionHeader = ["cell", "cell.name", "gene", "cell.expression", "peak.expression", "fraction.expression", "normal.expression", "rank", "max.expression", "avg.expression", "med.expression", "std.expression", "cells.expressing", "cells.count", "series.count", "time.series", "max.series", "specific.tissue", "general.tissue", "class.tissue", "match.tissue"]
reportHeader = ["gene", "cells.expressing", "cells.assayed", "cells.tracked", "cell.expression", "peak.expression", "fraction.expression", "series.count", "time.series", "max.series"]
tissueHeader = ["cell", "specific.tissue", "general.tissue", "class.tissue", "match.tissue"]
# create output files:
a_output = open(assayedfile, "w")
s_output = open(startedfile, "w")
t_output = open(trackedfile, "w")
f_output = open(focusedfile, "w")
r_output = open(summaryfile, "w")
x_output = open(tissuesfile, "w")
print >>a_output, "\t".join(expressionHeader)
print >>s_output, "\t".join(expressionHeader)
print >>t_output, "\t".join(expressionHeader)
print >>f_output, "\t".join(expressionHeader)
print >>r_output, "\t".join(reportHeader)
print >>x_output, "\t".join(tissueHeader)
# generate set-normalization values:
maxAssayed, maxStarted, maxTracked, maxFocused = dict(), dict(), dict(), dict()
for gene in sorted(matrix.keys()):
cellsStarted = startedCells
cellsAssayed = matrix[gene].keys()
cellsTracked = trackedCells
cellsFocused = focusedCells
peakAssayed, peakStarted, peakTracked, peakFocused = 0, 0, 0, 0
for cell in sorted(matrix[gene].keys()):
cellValue, peakValue, fracValue, cellRank, maxValue, meanValue, medianValue, stdValue, seriesCount, seriesIDs, maxSeries = matrix[gene][cell]
if cell in cellsAssayed and cellValue > peakAssayed:
peakAssayed = cellValue
if cell in cellsStarted and cellValue > peakStarted:
peakStarted = cellValue
if cell in cellsTracked and cellValue > peakTracked:
peakTracked = cellValue
if cell in cellsFocused and cellValue > peakFocused:
peakFocused = cellValue
maxAssayed[gene] = peakAssayed
maxStarted[gene] = peakStarted
maxTracked[gene] = peakTracked
maxFocused[gene] = peakFocused
# export expression data:
print "Exporting expression data..."
for gene in sorted(matrix.keys()):
cellsStarted = len(startedCells)
cellsAssayed = len(matrix[gene].keys())
cellsTracked = len(trackedCells)
cellsFocused = len(focusedCells)
cellsExpressingAssayed = len(set(expressing[gene]).intersection(set(matrix[gene].keys())))
cellsExpressingTracked = len(set(expressing[gene]).intersection(set(trackedCells)))
cellsExpressingFocused = len(set(expressing[gene]).intersection(set(focusedCells)))
cellValues, fracValues = list(), list()
for cell in sorted(matrix[gene].keys()):
if option.tissues == "OFF" or not cell in specificDict:
specificTissue = "*"
generalTissue = "*"
classTissue = "*"
matchTissue = "*"
else:
specificTissue = specificDict[cell]
generalTissue = generalDict[cell]
classTissue = classDict[cell]
matchTissue = matchDict["class"][cell]
cellValue, peakValue, fracValue, cellRank, maxValue, meanValue, medianValue, stdValue, seriesCount, seriesIDs, maxSeries = matrix[gene][cell]
print >>a_output, "\t".join(map(str, [cell, cell, gene, cellValue, peakValue, fracValue, float(cellValue)/maxAssayed[gene], cellRank, maxValue, meanValue, medianValue, stdValue, cellsExpressingAssayed, cellsAssayed, seriesCount, seriesIDs, maxSeries, specificTissue, generalTissue, classTissue, matchTissue]))
if cell in startedCells:
print >>s_output, "\t".join(map(str, [cell, cell, gene, cellValue, peakValue, fracValue, float(cellValue)/maxStarted[gene], cellRank, maxValue, meanValue, medianValue, stdValue, cellsExpressingTracked, cellsStarted, seriesCount, seriesIDs, maxSeries, specificTissue, generalTissue, classTissue, matchTissue]))
if cell in trackedCells:
print >>t_output, "\t".join(map(str, [cell, cell, gene, cellValue, peakValue, fracValue, float(cellValue)/maxTracked[gene], cellRank, maxValue, meanValue, medianValue, stdValue, cellsExpressingTracked, cellsTracked, seriesCount, seriesIDs, maxSeries, specificTissue, generalTissue, classTissue, matchTissue]))
if cell in focusedCells:
print >>f_output, "\t".join(map(str, [cell, cell, gene, cellValue, peakValue, fracValue, float(cellValue)/maxFocused[gene], cellRank, maxValue, meanValue, medianValue, stdValue, cellsExpressingFocused, cellsFocused, seriesCount, seriesIDs, maxSeries, specificTissue, generalTissue, classTissue, matchTissue]))
if fracValue >= option.fraction and cellValue >= option.minimum:
cellValues.append(cellValue)
fracValues.append(fracValue)
print >>r_output, "\t".join(map(str, [gene, cellsExpressingTracked, cellsAssayed, cellsTracked, numpy.mean(cellValues), peakValue, numpy.mean(fracValues), seriesCount, seriesIDs, maxSeries]))
# export tissue annotations:
print "Exporting tissue annotation data..."
print "Annotated cells:", len(specificDict)
for cell in sorted(specificDict.keys()):
specificTissue = specificDict[cell]
generalTissue = generalDict[cell]
classTissue = classDict[cell]
if cell in matchDict["class"]:
matchTissue = matchDict["class"][cell]
else:
matchTissue = str(classTissue)
print >>x_output, "\t".join([cell, specificTissue, generalTissue, classTissue, matchTissue])
# close output:
a_output.close()
s_output.close()
t_output.close()
f_output.close()
r_output.close()
x_output.close()
print
print "Focused cells:", len(focusedCells)
print "Tracked cells:", len(trackedCells)
print "Started cells:", len(startedCells)
print
# inherit expression mode:
elif option.mode == "inherit":
# define input files:
assayedinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
startedinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
focusedinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_focused"
summaryinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_summary"
tissuesinput = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tissues"
# define output files:
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_inassay"
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_instart"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_intrack"
focusedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_infocus"
inheritfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_inherit"
inleafsfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_inleafs"
maximalfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_maximal"
mxleafsfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_mxleafs"
# define cellular expression header:
expressionHeader = ["cell", "cell.name", "gene", "cell.expression", "peak.expression", "fraction.expression", "normal.expression", "rank", "max.expression", "avg.expression", "med.expression", "std.expression", "cells.expressing", "cells.count", "series.count", "time.series", "max.series", "specific.tissue", "general.tissue", "class.tissue", "match.tissue"]
reportHeader = ["gene", "cells.expressing", "cells.assayed", "cells.tracked", "cell.expression", "peak.expression", "fraction.expression", "series.count", "time.series", "max.series"]
tissueHeader = ["cell", "specific.tissue", "general.tissue", "class.tissue", "match.tissue"]
# create output files:
a_output = open(assayedfile, "w")
s_output = open(startedfile, "w")
t_output = open(trackedfile, "w")
f_output = open(focusedfile, "w")
i_output = open(inheritfile, "w")
l_output = open(inleafsfile, "w")
m_output = open(maximalfile, "w")
p_output = open(mxleafsfile, "w")
print >>a_output, "\t".join(expressionHeader + ["inherited"])
print >>s_output, "\t".join(expressionHeader + ["inherited"])
print >>t_output, "\t".join(expressionHeader + ["inherited"])
print >>f_output, "\t".join(expressionHeader + ["inherited"])
print >>i_output, "\t".join(expressionHeader + ["inherited"])
print >>l_output, "\t".join(expressionHeader + ["inherited"])
print >>m_output, "\t".join(expressionHeader + ["inherited"])
print >>p_output, "\t".join(expressionHeader + ["inherited"])
# load terminal leaf cells:
print
print "Loading terminal cells..."
inleafsCells = general.build2(extraspath + option.mapping, i="cell", x="cell.name", mode="values", skip=True)
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
# loading tissue annotation data...
print "Loading tissue annotation data..."
tissuesAnnotation = general.build2(tissuesinput, id_column="cell", mode="table")
# load expression data:
print "Loading expression data..."
assayedExpression = general.build2(assayedinput, id_complex=["gene","cell"], mode="table", separator=":")
assayedMatrix = general.build2(assayedinput, i="gene", j="cell", x="cell.expression", mode="matrix")
assayedCells = general.build2(assayedinput, i="cell", x="cell.name", mode="values", skip=True)
startedCells = general.build2(startedinput, i="cell", x="cell.name", mode="values", skip=True)
trackedCells = general.build2(trackedinput, i="cell", x="cell.name", mode="values", skip=True)
focusedCells = general.build2(focusedinput, i="cell", x="cell.name", mode="values", skip=True)
# define cellular space:
print "Defining inheritance cells..."
inheritCells = list()
for inleafsCell in inleafsCells:
inheritCells += ascendantsCollector(inleafsCell, parent_dict, cell_dict, ascendants=list())
inheritCells = sorted(list(set(inheritCells)))
# load header dictionary:
hd = general.build_header_dict(assayedinput)
header = general.valuesort(hd)
# inherit peak expression from ancestors:
print "Inheriting expression from ancestors..."
inheritExpression, maximalExpression = dict(), dict()
for gene in sorted(assayedMatrix.keys()):
inheritExpression[gene] = dict()
maximalExpression[gene] = dict()
for inheritCell in inheritCells:
ascendantCells, ascendantExpression = list(), dict()
ascendants = ascendantsCollector(inheritCell, parent_dict, cell_dict, ascendants=list(), sort=False)
#print inheritCell, ascendants
if len(set(ascendants)) != len(ascendants):
print "oh, oh: not a set!"
pdb.set_trace()
for ascendantCell in ascendants + [inheritCell]:
if ascendantCell in assayedMatrix[gene]:
ascendantExpression[ascendantCell] = float(assayedMatrix[gene][ascendantCell])
ascendantCells.append(ascendantCell)
if ascendantExpression != dict():
# get inheritance cells for maximal expression and for last ancestor expression:
maximalCells = general.valuesort(ascendantExpression)
maximalCells.reverse()
maximalCell = maximalCells[0]
ascendantCell = ascendantCells[0]
# store values for last ancestor expression:
inheritExpression[gene][inheritCell] = dict(assayedExpression[gene + ":" + ascendantCell])
inheritExpression[gene][inheritCell]["cell"] = str(inheritCell)
inheritExpression[gene][inheritCell]["cell.name"] = str(inheritCell)
inheritExpression[gene][inheritCell]["specific.tissue"] = tissuesAnnotation[inheritCell]["specific.tissue"]
inheritExpression[gene][inheritCell]["general.tissue"] = tissuesAnnotation[inheritCell]["general.tissue"]
inheritExpression[gene][inheritCell]["class.tissue"] = tissuesAnnotation[inheritCell]["class.tissue"]
inheritExpression[gene][inheritCell]["match.tissue"] = tissuesAnnotation[inheritCell]["match.tissue"]
inheritExpression[gene][inheritCell]["inherited"] = ascendantCell
#if inheritCell != inheritExpression[gene][inheritCell]["cell"]:
# print cell, inheritExpression[gene][inheritCell]["cell"], 1
# pdb.set_trace()
# store values for maximal ancestor expression:
maximalExpression[gene][inheritCell] = dict(assayedExpression[gene + ":" + maximalCell])
maximalExpression[gene][inheritCell]["cell"] = str(inheritCell)
maximalExpression[gene][inheritCell]["cell.name"] = str(inheritCell)
maximalExpression[gene][inheritCell]["specific.tissue"] = tissuesAnnotation[inheritCell]["specific.tissue"]
maximalExpression[gene][inheritCell]["general.tissue"] = tissuesAnnotation[inheritCell]["general.tissue"]
maximalExpression[gene][inheritCell]["class.tissue"] = tissuesAnnotation[inheritCell]["class.tissue"]
maximalExpression[gene][inheritCell]["match.tissue"] = tissuesAnnotation[inheritCell]["match.tissue"]
maximalExpression[gene][inheritCell]["inherited"] = ascendantCell
# export inherited signals:
print "Exporting inherited expression values..."
for gene in sorted(inheritExpression):
for cell in sorted(inheritExpression[gene].keys()):
#if cell != inheritExpression[gene][cell]["cell"]:
# print cell, inheritExpression[gene][cell]["cell"], 2
# pdb.set_trace()
output = list()
for column in header + ["inherited"]:
output.append(inheritExpression[gene][cell][column])
if cell in assayedCells:
print >>a_output, "\t".join(map(str, output))
if cell in startedCells:
print >>s_output, "\t".join(map(str, output))
if cell in trackedCells:
print >>t_output, "\t".join(map(str, output))
if cell in focusedCells:
print >>f_output, "\t".join(map(str, output))
if cell in inheritCells:
print >>i_output, "\t".join(map(str, output))
if cell in inleafsCells:
print >>l_output, "\t".join(map(str, output))
#print "\t".join(map(str, output))
#pdb.set_trace()
# export inherited signals:
print "Exporting maximal expression values..."
for gene in sorted(maximalExpression):
for cell in sorted(maximalExpression[gene].keys()):
output = list()
for column in header + ["inherited"]:
output.append(maximalExpression[gene][cell][column])
if cell in inheritCells:
print >>m_output, "\t".join(map(str, output))
if cell in inleafsCells:
print >>p_output, "\t".join(map(str, output))
#print "\t".join(map(str, output))
#pdb.set_trace()
print
print "Total inherited cells:", len(inheritCells)
print "Terminal (leaf) cells:", len(inleafsCells)
# close output files:
a_output.close()
s_output.close()
t_output.close()
f_output.close()
i_output.close()
l_output.close()
m_output.close()
p_output.close()
print
#k = inheritExpression.keys()[0]
#print k
#print inheritExpression[k][inleafsCell]
#pdb.set_trace()
# correct expression mode (detect outliers):
elif option.mode == "correct":
# load quantile functions
from quantile import Quantile
# define input files:
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
summaryfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_summary"
# load assayed expression data:
print
print "Loading expression data..."
expressionDict = general.build2(assayedfile, i="gene", j="cell", x="cell.expression", mode="matrix")
# prepare to sort genes by quantile expression:
print "Sorting genes by expression..."
medianDict, quantDict = dict(), dict()
for gene in expressionDict:
values = map(float, expressionDict[gene].values())
medianDict[gene] = numpy.median(values)
quantDict[gene] = Quantile(values, 0.99)
quantRanks = general.valuesort(quantDict)
quantRanks.reverse()
# store median rankings:
rankDict = dict()
medianRanks = general.valuesort(medianDict)
medianRanks.reverse()
k = 1
for gene in medianRanks:
rankDict[gene] = k
k += 1
# generate testing path:
testingpath = correctionpath + "testing/"
general.pathGenerator(testingpath)
# Perform Gaussian Mixture Modeling (GMM):
print "Performing GMM modeling..."
gmmDict = dict()
k = 1
for gene in expressionDict:
signals = map(int, map(float, expressionDict[gene].values()))
signals = [1 if (x == 0) else x for x in signals]
testingfile = testingpath + "mapCells-gmm_" + expression_flag + option.name + "_" + "temp"
resultsfile = testingpath + "mapCells-gmm_" + expression_flag + option.name + "_" + gene
#f_output = open(testingfile, "w")
#print >>f_output, "\n".join(["signal"] + map(str, signals))
#f_output.close()
#command = " ".join(["Rscript", "~/meTRN/scripts/mapCells-gmm.r", testingfile, resultsfile, option.limit, option.parameters])
#os.system(command)
#Rscript ~/meTRN/scripts/mapCells-pilot.r ~/Desktop/data.test ~/Desktop/data.output 1000
if "mapCells-gmm_" + expression_flag + option.name + "_" + gene in os.listdir(testingpath):
gmmDict[gene] = open(resultsfile).readlines()[1].strip().split(" ")[2]
#os.system("rm -rf " + testingfile)
# export expression signals:
rankingfile = correctionpath + "mapcells_" + expression_flag + option.name + "_correction_ranking" # rank information file
percentfile = correctionpath + "mapcells_" + expression_flag + option.name + "_correction_percent" # gene-cell data, percentile-ranked genes
mediansfile = correctionpath + "mapcells_" + expression_flag + option.name + "_correction_medians" # gene-cell data, median-ranked genes
# define output headers:
correctHeader = "\t".join(["index", "gene", "cell", "signal", "zscore", "nscore", "lscore", "rank", "median", "mean", "stdev", "alpha", "delta", "sigma", "gamma"])
rankingHeader = "\t".join(["gene", "quantile.rank", "median.rank", "median", "mean", "stdev", "alpha", "delta", "sigma", "gamma"])
# gather outputs:
print "Generating expression thresholds..."
r_output = open(rankingfile, "w")
print >>r_output, rankingHeader
outputDict = dict()
k = 1
for gene in quantRanks:
signals = map(float, expressionDict[gene].values())
maximal = max(signals)
# calculate expression cutoffs:
alpha = float(maximal)/10
delta = float(quantDict[gene])/10
sigma = float(quantDict[gene])/10
# detect GMM expression cutoff:
if gene in gmmDict:
gamma = int(gmmDict[gene])
else:
gamma = int(option.limit)
# threshold expression cutoffs:
if alpha < int(option.limit):
alpha = int(option.limit)
if delta < int(option.limit):
delta = int(option.limit)
if gamma < int(option.limit):
gamma = int(option.limit)
# calculate general stats:
median = numpy.median(signals)
mean = numpy.mean(signals)
stdev = numpy.std(signals)
logMean = numpy.log10(mean)
logStDev = numpy.log10(stdev)
# store/export data:
print >>r_output, "\t".join(map(str, [gene, k, rankDict[gene], median, mean, stdev, alpha, delta, sigma, gamma]))
if not gene in outputDict:
outputDict[gene] = dict()
for cell in sorted(expressionDict[gene].keys()):
signal = float(expressionDict[gene][cell])
if signal < 1:
signal = 1
zscore = float(signal-mean)/stdev
nscore = float(signal)/maximal
lscore = float(numpy.log10(signal) - logMean)/logStDev
outputDict[gene][cell] = "\t".join(map(str, [k, gene, cell, signal, zscore, nscore, lscore, rankDict[gene], median, mean, stdev, alpha, delta, sigma, gamma]))
k += 1
r_output.close()
# export expression signals, percentile-ranked genes:
print "Exporting percentile-ranked expression signals..."
f_output = open(percentfile, "w")
print >>f_output, correctHeader
for gene in quantRanks:
for cell in sorted(outputDict[gene]):
print >>f_output, outputDict[gene][cell]
f_output.close()
# export expression signals, median-ranked genes:
print "Exporting median-ranked expression signals..."
f_output = open(mediansfile, "w")
print >>f_output, correctHeader
for gene in medianRanks:
for cell in sorted(outputDict[gene]):
print >>f_output, outputDict[gene][cell]
f_output.close()
print
# check status mode:
elif option.mode == "check.status":
# define input files:
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
summaryfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_summary"
# scan peak files:
print
print "Scanning peak files:"
hd = general.build_header_dict(summaryfile)
k, peaks, peak_files = 0, 0, os.listdir(peakspath)
for inline in open(summaryfile).readlines()[1:]:
gene, found = inline.strip().split("\t")[hd["gene"]], list()
for peak_file in peak_files:
dataset = peak_file.split("_peaks.bed")[0].replace("POL2", "AMA-1")
if gene + "_" in dataset:
found.append(dataset)
peaks += general.countLines(peakspath + peak_file, header="OFF")
if found != list():
print gene, ":", ", ".join(sorted(found))
k += 1
print
print "Found factors:", k
print "Peaks called:", peaks
print
# scan expression files:
print
print "Scanning expression data:"
caught = list()
hd = general.build_header_dict(assayedfile)
for inline in open(assayedfile).readlines()[1:]:
initems = inline.strip().split("\t")
gene, timeSeries = initems[hd["gene"]], initems[hd["time.series"]]
for timeSerie in timeSeries.split(","):
if not gene.lower() in timeSerie:
if not gene in caught:
print gene, timeSeries
caught.append(gene)
print
print "Mismatched genes:", len(caught)
print
# lineage distance mode:
elif option.mode == "cell.distance":
# build cell-expression matrix:
print
print "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=0, minimum=0, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
print
# define output files:
signalsmatrixfile = str(option.expression + "_distance_signals")
lineagematrixfile = str(option.expression + "_distance_lineage")
combinematrixfile = str(option.expression + "_distance_combine")
# build cell-cell expression correlation matrix
if not signalsmatrixfile in os.listdir(distancepath) or option.overwrite == "ON":
print "Calculating expression correlation matrix..."
correlation_matrix, index = dict(), 1
f_output = open(distancepath + signalsmatrixfile, "w")
print >>f_output, "\t".join(["i", "j", "correlation", "correlation.pvalue", "correlation.adjusted.pvalue"])
for aCell in sorted(trackedCells):
print index, aCell
for bCell in sorted(trackedCells):
aValues, bValues = list(), list()
for gene in sorted(quantitation_matrix.keys()):
aValues.append(quantitation_matrix[gene][aCell])
bValues.append(quantitation_matrix[gene][bCell])
correlation, corPvalue = pearsonr(aValues, bValues)
adjCorPvalue = corPvalue*len(trackedCells)*len(trackedCells)
if adjCorPvalue > 1:
adjCorPvalue = 1
if not aCell in correlation_matrix:
correlation_matrix[aCell] = dict()
correlation_matrix[aCell][bCell] = [correlation, corPvalue, adjCorPvalue]
print >>f_output, "\t".join(map(str, [aCell, bCell] + correlation_matrix[aCell][bCell]))
index += 1
f_output.close()
print
else:
print "Loading expression correlation matrix..."
correlation_matrix = general.build2(distancepath + signalsmatrixfile, i="i", j="j", x=["correlation","correlation.pvalue","correlation.adjusted.pvalue"], datatype="float", mode="matrix", header_dict="auto")
# build lineage distance matrix:
if not lineagematrixfile in os.listdir(distancepath) or option.overwrite == "ON":
print "Calculating lineage distance matrix..."
lineage_matrix, index = dict(), 1
f_output = open(distancepath + lineagematrixfile, "w")
print >>f_output, "\t".join(["i", "j", "distance", "parent"])
for aCell in sorted(trackedCells):
print index, aCell
for bCell in sorted(trackedCells):
distance, ancestor = lineageDistance(aCell, bCell, parent_dict, cell_dict)
if not aCell in lineage_matrix:
lineage_matrix[aCell] = dict()
lineage_matrix[aCell][bCell] = [distance, ancestor]
print >>f_output, "\t".join(map(str, [aCell, bCell] + lineage_matrix[aCell][bCell]))
index += 1
f_output.close()
print
else:
print "Loading lineage distance matrix..."
lineage_matrix = general.build2(distancepath + lineagematrixfile, i="i", j="j", x=["distance","parent"], datatype="list", mode="matrix", header_dict="auto", listtypes=["int", "str"])
#print correlation_matrix["ABal"]["ABal"]
#print lineage_matrix["ABal"]["ABal"]
#pdb.set_trace()
# build expression distance matrix (as a function of fraction expression):
print "Generating combined distance matrix (at fraction range):"
f_output = open(distancepath + combinematrixfile, "w")
print >>f_output, "\t".join(["i", "j", "minimal", "fraction", "distance", "parent", "expression.correlation", "expression.correlation.pvalue", "expression.correlation.adjusted.pvalue", "i.genes", "j.genes", "overlap", "total", "overlap.max", "overlap.sum", "pvalue", "adjusted.pvalue", "flag"])
fraction_matrix, genes = dict(), sorted(tracking_matrix.keys())
for minimal in [1500, 1750, 2000]:
for fraction in general.drange(0.10, 0.50, 0.10):
print "...", minimal, fraction
fraction_matrix[fraction] = dict()
# find genes expressed per cell (using fraction cutoff):
cellular_matrix = dict()
fraction_quantitation_matrix, fraction_expression_matrix, fraction_tracking_matrix, fraction_trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=fraction, minimum=minimal, metric="fraction.expression")
for gene in fraction_expression_matrix:
for cell in fraction_expression_matrix[gene]:
if not cell in cellular_matrix:
cellular_matrix[cell] = list()
cellular_matrix[cell].append(gene)
# find multiple hypothesis adjustment factor:
adjust = 0
for aCell in sorted(fraction_trackedCells):
for bCell in sorted(fraction_trackedCells):
if aCell in cellular_matrix and bCell in cellular_matrix:
adjust += 1
# find gene expression overlap between cells:
overlap_matrix = dict()
universe = len(quantitation_matrix.keys())
for aCell in sorted(trackedCells):
for bCell in sorted(trackedCells):
if aCell in cellular_matrix and bCell in cellular_matrix:
aGenes = cellular_matrix[aCell]
bGenes = cellular_matrix[bCell]
union = set(aGenes).union(set(bGenes))
overlap = set(aGenes).intersection(set(bGenes))
maxOverlap = float(len(overlap))/min(len(aGenes), len(bGenes))
sumOverlap = float(len(overlap))/len(union)
# Hypergeometric paramters:
m = len(aGenes) # number of white balls in urn
n = universe - len(bGenes) # number of black balls in urn
N = len(bGenes) # number of balls drawn from urn
x = len(overlap) # number of white balls in drawn
# If I pull out all balls with elephant tatoos (N), is the draw enriched in white balls?:
pvalue = hyper.fishers(x, m+n, m, N, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
# Store overlap and significance:
if not aCell in overlap_matrix:
overlap_matrix[aCell] = dict()
overlap_matrix[aCell][bCell] = [len(aGenes), len(bGenes), len(overlap), universe, maxOverlap, sumOverlap, pvalue, adjPvalue]
# generate combined distance output line:
for aCell in sorted(trackedCells):
for bCell in sorted(trackedCells):
# load lineage distances:
distance, ancestor = lineage_matrix[aCell][bCell]
# load correlation distances:
correlation, corPvalue, adjCorPvalue = correlation_matrix[aCell][bCell]
# load expresssion distances:
if aCell in cellular_matrix and bCell in cellular_matrix:
aGenes, bGenes, overlap, universe, maxOverlap, sumOverlap, pvalue, adjPvalue = overlap_matrix[aCell][bCell]
madeFlag = "both.observed"
elif aCell in cellular_matrix:
aGenes, bGenes, overlap, universe, maxOverlap, sumOverlap, pvalue, adjPvalue = len(cellular_matrix[aCell]), 0, 0, len(trackedCells), 0, 0, 1, 1
madeFlag = "only.observed"
elif bCell in cellular_matrix:
aGenes, bGenes, overlap, universe, maxOverlap, sumOverlap, pvalue, adjPvalue = 0, len(cellular_matrix[bCell]), 0, len(trackedCells), 0, 0, 1, 1
madeFlag = "only.observed"
else:
aGenes, bGenes, overlap, universe, maxOverlap, sumOverlap, pvalue, adjPvalue = 0, 0, 0, len(trackedCells), 0, 0, 1, 1
madeFlag = "none.observed"
# export data:
print >>f_output, "\t".join(map(str, [aCell, bCell, minimal, fraction, distance, ancestor, correlation, corPvalue, adjCorPvalue, aGenes, bGenes, overlap, universe, maxOverlap, sumOverlap, pvalue, adjPvalue, madeFlag]))
# close output file:
f_output.close()
print
# cell time mode:
elif option.mode == "cell.times":
# define input expression files:
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
focusedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_focused"
inheritfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_inherit"
maximalfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_maximal"
# load cell times:
print
print "Loading cellular times..."
time_matrix = dict()
inlines = open(extraspath + option.times).readlines()
for inline in inlines:
cell, start, stop = inline.strip().split(",")
for time in range(int(start), int(stop)+1):
if not time in time_matrix:
time_matrix[time] = list()
time_matrix[time].append(cell)
# export cell times:
populationDict = {
"assayed" : assayedfile,
"started" : startedfile,
"tracked" : trackedfile,
"focused" : focusedfile,
"inherit" : inheritfile,
"maximal" : maximalfile
}
print "Exporting cells per time point..."
for population in populationDict:
populationCells = general.build2(populationDict[population], id_column="cell", skip=True, mute=True).keys()
for time in sorted(time_matrix.keys()):
general.pathGenerator(timepath + population + "/cells/")
f_output = open(timepath + population + "/cells/" + str(time), "w")
timedCells = sorted(set(time_matrix[time]).intersection(set(populationCells)))
if len(timedCells) > 0:
print >>f_output, "\n".join(timedCells)
f_output.close()
# generate reports:
print "Generating reports..."
for population in populationDict:
general.pathGenerator(timepath + population + "/report/")
f_output = open(timepath + population + "/report/mapcells_" + population + "_time_report.txt", "w")
print >>f_output, "\t".join(["time", "cell.count", "cell.percent", "cell.ids"])
for time in sorted(time_matrix.keys()):
general.pathGenerator(timepath + population + "/report/")
timedCount = general.countLines(timepath + population + "/cells/" + str(time))
timedPercent = round(100*float(timedCount)/len(time_matrix[time]), 2)
timedCells = open(timepath + population + "/cells/" + str(time)).read().split("\n")
print >>f_output, "\t".join([str(time), str(timedCount), str(timedPercent), ",".join(timedCells).rstrip(",")])
f_output.close()
print
# cubism graph mode:
elif option.mode == "cell.cubism":
# define input expression files:
assayedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_assayed"
startedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_started"
trackedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_tracked"
focusedfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_focused"
inheritfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_inherit"
maximalfile = expressionpath + "mapcells_" + expression_flag + option.name + "_expression_maximal"
# define cell populations:
populationDict = {
"assayed" : assayedfile,
"started" : startedfile,
"tracked" : trackedfile,
"focused" : focusedfile,
"inherit" : inheritfile,
"maximal" : maximalfile
}
# parse reports:
print
print "Exporting per gene, per timepoint expression cells:"
for population in populationDict:
print "Processing:", population
# define output paths:
factorpath = cubismpath + population + "/factor/"
matrixpath = cubismpath + population + "/matrix/"
general.pathGenerator(factorpath)
general.pathGenerator(matrixpath)
# build cell-expression matrix:
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=populationDict[population], path="", cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# load timepoint data:
timeDict = general.build2(timepath + population + "/report/mapcells_" + population + "_time_report.txt", id_column="time")
# load calendar months:
monthDict = dict((k,v) for k,v in enumerate(calendar.month_abbr))
# process genes x timepoints:
m_output = open(matrixpath + "mapcells_cubism_matrix.txt", "w")
print >>m_output, "\t".join(["gene", "time", "cells", "gene.cells", "time.cells"])
for gene in sorted(expression_matrix.keys()):
timeStamp = 1001856000000
timeAdded = 100000000
factorLines = list()
for time in sorted(map(int, timeDict.keys())):
geneCells = expression_matrix[gene]
timeCells = timeDict[str(time)]["cell.ids"].split(",")
dateCells = len(set(geneCells).intersection(set(timeCells)))
date = datetime.datetime.fromtimestamp(timeStamp / 1e3)
day, month, year = date.day, date.month, str(date.year)[2:]
date = "-".join(map(str, [day, monthDict[month], year]))
factorLines.append(",".join(map(str, [date, dateCells, dateCells, dateCells, len(trackedCells)])))
print >>m_output, "\t".join(map(str, [gene, time, dateCells, len(geneCells), len(timeCells)]))
timeStamp += timeAdded
factorLines.reverse()
f_output = open(factorpath + gene + ".csv", "w")
print >>f_output, "Date,Open,High,Low,Close,Volume"
for factorLine in factorLines:
print >>f_output, factorLine.strip()
f_output.close()
# close output matrix:
m_output.close()
print
# cell annotation mode:
elif option.mode == "cell.annotation":
# load target cells from time-points:
print
if option.times != "OFF":
# define output file:
f_output = open(cellnotationspath + "mapcells_" + option.name + "_" + option.infile, "w")
# load time-point cells:
cells = getTargetCells(inpath=timepath + option.times + "/cells/", mode="time", timeRange=range(option.start, option.stop + 1, option.step))
# load target cells from collection:
elif option.collection != "OFF":
# define output file:
f_output = open(cellnotationspath + "mapcells_" + option.collection + "_" + option.infile, "w")
# load collection cells:
cells = getTargetCells(inpath=cellsetpath + option.collection + "/", mode="collection")
# export features per cell:
print "Exporting features per cell..."
k = 0
inlines = open(annotationspath + option.infile).readlines()
if option.header == "ON":
inlines.pop(0)
for cell in cells:
for inline in inlines:
if option.format == "bed":
print >>f_output, cell + ":" + inline.strip()
k += 1
f_output.close()
print "Features scaled from", len(inlines), "to", k, ": " + str(round(float(k)/len(inlines), 0)) + "x"
print
# build matrix mode:
elif option.mode == "cell.matrix":
# update overlappath:
matrixpath = matrixpath + option.collection + "/"
general.pathGenerator(matrixpath)
# define input files:
infile = expressionpath + option.expression
# define output files:
matrixfile = matrixpath + str(option.expression + "_" + option.name + "_matrix")
# load header dictionary:
hd = general.build_header_dict(infile)
# build cellular expression matrix:
matrix, cells, genes, tissueDict = dict(), list(), list(), dict()
inlines = open(infile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip().split("\t")
cell, gene, cellExpression, fractionExpression, normalExpression, specificTissue, generalTissue, classTissue = initems[hd["cell"]], initems[hd["gene"]], initems[hd["cell.expression"]], initems[hd["fraction.expression"]], initems[hd["normal.expression"]], initems[hd["specific.tissue"]], initems[hd["general.tissue"]], initems[hd["class.tissue"]]
# extract expression value (using specified technique):
if option.technique == "binary":
if float(fractionExpression) >= option.fraction and float(cellExpression) >= option.minimum:
value = 1
else:
value = 0
elif option.technique == "signal":
value = float(cellExpression)
elif option.technique == "fraction":
value = float(fractionExpression)
elif option.technique == "normal":
value = float(normalExpression)
# store cells, genes, and values:
if not cell in cells:
cells.append(cell)
if not gene in genes:
genes.append(gene)
if not cell in tissueDict:
tissueDict[cell] = [classTissue, generalTissue, specificTissue]
if not cell in matrix:
matrix[cell] = dict()
matrix[cell][gene] = value
# export the cellular expression matrix!
f_output = open(matrixfile, "w")
cells, genes = sorted(cells), sorted(genes)
print >>f_output, "\t".join([""] + genes)
for cell in cells:
values = list()
for gene in genes:
if gene in matrix[cell]:
values.append(matrix[cell][gene])
else:
values.append(0)
valueCount = len(values) - values.count(0)
classTissue, generalTissue, specificTissue = tissueDict[cell]
specificTissue = specificTissue.replace(" ", "_")
label = ":".join([classTissue, generalTissue, specificTissue, cell])
print >>f_output, "\t".join([label] + map(str, values))
f_output.close()
# build in silico binding peaks mode:
elif option.mode == "cell.peaks":
# define the target contexts:
if option.contexts != "OFF":
shandle, target_context_dict = metrn.options_dict["contexts.condensed"][option.contexts]
target_contexts = list()
for target in target_context_dict:
target_contexts.append(target_context_dict[target])
target_contexts = sorted(list(set(target_contexts)))
# generate output paths:
insilicopath = bindingpath + option.name + "/"
general.pathGenerator(insilicopath)
# load header dictionary:
hd = general.build_header_dict(expressionpath + option.expression)
# load expression matrix:
print
print "Loading expression matrix..."
matrix = dict()
inlines = open(expressionpath + option.expression).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip().split("\t")
gene, cell, cellExpression, fractionExpression = initems[hd["gene"]], initems[hd["cell"]], float(initems[hd["cell.expression"]]), float(initems[hd["fraction.expression"]])
if not gene in matrix:
matrix[gene] = dict()
if fractionExpression >= option.fraction and float(cellExpression) >= option.minimum:
matrix[gene][cell] = fractionExpression
# load target cells:
if option.times != "OFF":
# load time-point cells:
timedCells = getTargetCells(inpath=timepath + option.times + "/cells/", mode="time", timeRange=range(option.start, option.stop + 1, option.step))
# scan peak files:
print
print "Generating cell-resolution peaks..."
k, peak_files, insilico_files = 0, os.listdir(peakspath), list()
for peak_file in peak_files:
dataset = peak_file.split("_peaks.bed")[0].replace("POL2", "AMA-1")
organism, strain, factor, context, institute, method = metrn.labelComponents(dataset, target="components")
if factor in matrix:
if option.contexts == "OFF" or context in target_contexts:
print "Processing:", dataset
insilico_file = peak_file.replace("POL2", "AMA-1")
f_output = open(insilicopath + insilico_file, "w")
for cell in sorted(matrix[factor].keys()):
if option.times == "OFF" or cell in timedCells:
for inline in open(peakspath + peak_file).readlines():
print >>f_output, cell + ":" + inline.strip()
f_output.close()
insilico_files.append(insilico_file)
# define output peak files:
unsortedfile = bindingpath + "mapcells_silico_" + option.name + "_unsorted.bed"
completefile = bindingpath + "mapcells_silico_" + option.name + "_complete.bed"
compiledfile = bindingpath + "mapcells_silico_" + option.name + "_compiled.bed"
# generate compilation files:
if not "mapcells_silico_" + option.peaks + "_complete.bed" in os.listdir(bindingpath) or option.overwrite == "ON":
# gather peak files and compiled into a single file:
print
print "Gathering peaks into single file..."
joint = " " + insilicopath
command = "cat " + insilicopath + joint.join(insilico_files) + " > " + unsortedfile
os.system(command)
print "Sorting peaks in single file..."
command = "sortBed -i " + unsortedfile + " > " + completefile
os.system(command)
# merge peaks into single file:
print "Collapsing peaks in sorted file..."
command = "mergeBed -nms -i " + completefile + " > " + compiledfile
os.system(command)
# remove unsorted file:
command = "rm -rf " + unsortedfile
os.system(command)
print
# gene and cell collection reporting mode (gene expressed per cell):
elif option.mode == "reports":
taskDict = {
"gene" : [cellsetpath, "cells"],
"cell" : [genesetpath, "genes"]
}
print
for task in taskDict:
inputpath, column = taskDict[task]
for collection in os.listdir(inputpath):
if collection in option.collection.split(",") or option.collection == "OFF":
print "Processing:", task, collection
f_output = open(reportspath + "mapcell_report_" + task + "_" + collection, "w")
print >>f_output, "\t".join([task, "count", column])
for item in sorted(os.listdir(inputpath + collection)):
contents = open(inputpath + collection + "/" + item).read().strip().split("\n")
contents = general.clean(contents)
print >>f_output, "\t".join(map(str, [item, len(contents), ",".join(sorted(contents))]))
f_output.close()
print
print
# cell collection mode (cells expressed per gene):
elif option.mode == "cell.collection":
# establish descendants cutoff:
if option.descendants == "OFF":
descendants_cutoff = 1000000
descendants_handle = "XX"
else:
descendants_cutoff = int(option.descendants)
descendants_handle = option.descendants
# establish ascendants cutoff:
if option.ascendants == "OFF":
ascendants_cutoff = 0
ascendants_handle = "XX"
else:
ascendants_cutoff = int(option.ascendants)
ascendants_handle = option.ascendants
# establish limit cutoff:
if option.limit == "OFF":
limit_cutoff = "OFF"
limit_handle = "XX"
else:
limit_cutoff = int(option.limit)
limit_handle = option.limit
# define output folder:
cellsetpath = cellsetpath + option.collection + "/"
general.pathGenerator(cellsetpath)
# export expressing-cells for each gene:
if option.expression != "OFF":
# build cell-expression matrix:
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# export cells per gene:
for gene in sorted(expression_matrix.keys()):
f_output = open(cellsetpath + gene, "w")
for cell in sorted(expression_matrix[gene]):
print >>f_output, cell
f_output.close()
# export cells for SOM neurons:
if option.neurons != "OFF":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input path:
sumpath = neuronspath + option.technique + "/results/" + option.neurons + "/summary/"
sumfile = "mapneurons_summary.txt"
# build header dict:
hd = general.build_header_dict(sumpath + sumfile)
# build SOM-cell matrix:
collection_matrix, trackedCells = dict(), list()
inlines = open(sumpath + sumfile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.rstrip("\n").split("\t")
neuron, cells = initems[hd["neuron"]], initems[hd["class.ids"]]
collection_matrix[neuron] = general.clean(cells.split(","), "")
trackedCells.extend(cells.split(","))
trackedCells = general.clean(list(set(trackedCells)), "")
# export cells per gene:
for neuron in sorted(collection_matrix.keys()):
f_output = open(cellsetpath + neuron, "w")
for cell in sorted(collection_matrix[neuron]):
print >>f_output, cell
f_output.close()
# gene collection mode (gene expressed per cell):
elif option.mode == "gene.collection":
# establish descendants cutoff:
if option.descendants == "OFF":
descendants_cutoff = 1000000
descendants_handle = "XX"
else:
descendants_cutoff = int(option.descendants)
descendants_handle = option.descendants
# establish ascendants cutoff:
if option.ascendants == "OFF":
ascendants_cutoff = 0
ascendants_handle = "XX"
else:
ascendants_cutoff = int(option.ascendants)
ascendants_handle = option.ascendants
# establish limit cutoff:
if option.limit == "OFF":
limit_cutoff = "OFF"
limit_handle = "XX"
else:
limit_cutoff = int(option.limit)
limit_handle = option.limit
# define output folder:
genesetpath = genesetpath + option.collection + "/"
general.pathGenerator(genesetpath)
# export expressing-cells for each gene:
if option.expression != "OFF":
# build cell-expression matrix:
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# gather cells:
cells = list()
for gene in sorted(quantitation_matrix.keys()):
for cell in sorted(quantitation_matrix[gene]):
cells.append(cell)
cells = sorted(list(set(cells)))
# invert matrix:
inverted_matrix = dict()
for gene in sorted(expression_matrix.keys()):
for cell in sorted(expression_matrix[gene]):
if not cell in inverted_matrix:
inverted_matrix[cell] = list()
inverted_matrix[cell].append(gene)
# export cells per gene:
for cell in cells:
f_output = open(genesetpath + cell, "w")
if cell in inverted_matrix:
for gene in sorted(inverted_matrix[cell]):
print >>f_output, gene
f_output.close()
# cell transfer mode:
elif option.mode == "cell.transfer":
# define time-range to examine:
timeRange=range(option.start, option.stop + 1, option.step)
# generate new collections:
for timePoint in timeRange:
# define output path:
outpath = cellsetpath + option.name + general.indexTag(timePoint, option.total) + option.nametag + "/"
general.pathGenerator(outpath)
# load timePoint cells:
timedCells = getTargetCells(inpath=timepath + option.times + "/cells/", mode="time", timeRange=[timePoint])
# parse per gene signatures in collection:
for gene in os.listdir(cellsetpath + option.collection):
# load expression cells:
expressionCells = open(cellsetpath + option.collection + "/" + gene).read().split("\n")
# export timed, expressionCells:
f_output = open(outpath + gene, "w")
print >>f_output, "\n".join(sorted(list(set(timedCells).intersection(set(expressionCells)))))
f_output.close()
# mapping overlap mode:
elif option.mode == "cell.overlap":
# update overlappath:
overlappath = overlappath + option.collection + "/"
general.pathGenerator(overlappath)
# build cell-expression matrix:
print
print "Loading cellular expression..."
signal_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="cell.expression")
fraction_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
normal_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
rank_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="rank")
# load collection cells:
#print "Loading target cells..."
targetCells = getTargetCells(inpath=cellsetpath + option.collection + "/", mode="collection")
# create output file:
o_output = open(overlappath + "mapcells_" + option.collection + "_matrix_overlap", "w")
o_header = ["i", "j", "i.cells", "j.cells", "overlap.cells", "total.cells", "overlap.max", "overlap.sum", "overlap.avg", "expression.cor", "fraction.cor", "normal.cor", "rank.cor", "pvalue", "pvalue.adj", "score", "i.only.ids", "j.only.ids", "overlap.ids"]
print >>o_output, "\t".join(o_header)
# find maximum rank, if necessary:
if option.extend == "ON":
maxRank = 0
for gene in rank_matrix:
for targetCell in rank_matrix[gene]:
if int(rank_matrix[gene][targetCell]) > maxRank:
maxRank = int(rank_matrix[gene][targetCell])
# load gene-expressing cells data:
print
print "Build expression matrix per gene..."
genes = os.listdir(cellsetpath + option.collection)
matrix = dict()
for gene in genes:
matrix[gene] = dict()
matrix[gene]["cells"], matrix[gene]["signals"], matrix[gene]["fractions"], matrix[gene]["normals"], matrix[gene]["ranks"] = list(), list(), list(), list(), list()
expressionCells = open(cellsetpath + option.collection + "/" + gene).read().split("\n")
for targetCell in targetCells:
if targetCell in expressionCells:
matrix[gene]["cells"].append(targetCell)
if targetCell in signal_matrix[gene]:
matrix[gene]["signals"].append(signal_matrix[gene][targetCell])
matrix[gene]["fractions"].append(fraction_matrix[gene][targetCell])
matrix[gene]["normals"].append(normal_matrix[gene][targetCell])
matrix[gene]["ranks"].append(rank_matrix[gene][targetCell])
elif option.extend == "ON":
matrix[gene]["signals"].append(0)
matrix[gene]["fractions"].append(0)
matrix[gene]["normals"].append(0)
matrix[gene]["ranks"].append(maxRank)
# print a matrix of cell expression overlap between genes:
print "Exporting cellular overlap matrix..."
adjust = len(matrix)*len(matrix)
universe = len(targetCells)
for geneX in sorted(matrix.keys()):
cellsX = matrix[geneX]["cells"]
signalsX, fractionsX, normalsX, ranksX = numpy.array(matrix[geneX]["signals"]), numpy.array(matrix[geneX]["fractions"]), numpy.array(matrix[geneX]["normals"]), numpy.array(matrix[geneX]["ranks"])
for geneY in sorted(matrix.keys()):
cellsY = matrix[geneY]["cells"]
signalsY, fractionsY, normalsY, ranksY = numpy.array(matrix[geneY]["signals"]), numpy.array(matrix[geneY]["fractions"]), numpy.array(matrix[geneY]["normals"]), numpy.array(matrix[geneY]["ranks"])
signalCor = numpy.corrcoef(signalsX, signalsY)[0][1]
fractionCor = numpy.corrcoef(fractionsX, fractionsY)[0][1]
normalCor = numpy.corrcoef(normalsX, normalsY)[0][1]
rankCor = numpy.corrcoef(ranksX, ranksY)[0][1]
cellsXo = sorted(set(cellsX).difference(set(cellsY))) # X-only cells
cellsYo = sorted(set(cellsY).difference(set(cellsX))) # Y-only cells
cellsO = sorted(set(cellsX).intersection(set(cellsY))) # overlap
cellsU = sorted(set(cellsX).union(set(cellsY))) # union
cellsT = targetCells
# Hypergeometric paramters:
m = len(cellsX) # number of white balls in urn
n = universe - len(cellsX) # number of black balls in urn
N = len(cellsY) # number of balls drawn from urn
x = len(cellsO) # number of white balls in drawn
# If I pull out all balls with elephant tatoos (N), is the draw enriched in white balls?:
pvalue = hyper.fishers(x, m+n, m, N, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(x, m+n, m, N, adjust=adjust)
output = [geneX, geneY]
output.append(len(cellsX))
output.append(len(cellsY))
output.append(len(cellsO))
output.append(len(cellsT))
if len(cellsO) > 0:
output.append(float(len(cellsO))/min(len(cellsX), len(cellsY)))
output.append(float(len(cellsO))/len(cellsU))
output.append(float(len(cellsO))/numpy.mean([len(cellsX), len(cellsY)]))
else:
output.append(0)
output.append(0)
output.append(0)
output.append(signalCor)
output.append(fractionCor)
output.append(normalCor)
output.append(rankCor)
output.append(pvalue)
output.append(adjPvalue)
output.append(score)
output.append(";".join(cellsXo))
output.append(";".join(cellsYo))
output.append(";".join(cellsO))
if len(output) != len(o_header):
print len(o_header), len(output)
print output
print
pdb.set_trace()
if " " in "\t".join(map(str, output)):
print output
pdb.set_trace()
print >>o_output, "\t".join(map(str, output))
# close output:
o_output.close()
print
# hybrid (datatypes) matrix mode:
elif option.mode == "cell.hybrid":
# get comparison properties:
peaks, domain = option.a.split("/")[:2]
collection = option.b.split("/")[0]
# load target contexts:
codeContexts, targetContexts = metrn.options_dict["contexts.extended"][option.contexts]
# make comparison output folders:
hybridpath = hybridpath + collection + "/" + peaks + "/" + domain + "/" + codeContexts + "/"
general.pathGenerator(hybridpath)
# define input files:
ainfile = str(coassociationspath + option.a).replace("//","/")
binfile = str(cellspath + "overlap/" + option.b).replace("//","/")
# load input headers:
aheader = general.build_header_dict(ainfile)
bheader = general.build_header_dict(binfile)
# load co-association results:
print
print "Loading co-associations..."
cobindingFrames = general.build2(ainfile, id_complex=["i", "j"], separator=":")
# load cellular expression overlap:
print "Loading co-expression..."
coexpressionFrames = general.build2(binfile, id_complex=["i", "j"], separator=":")
coexpressionMatrix = general.build2(binfile, i="i", j="j", x="overlap.sum", mode="matrix")
# characterize input file basenames:
abasename = option.a.split("/")[len(option.a.split("/"))-1].replace(".txt","").replace(".bed","")
bbasename = option.b.split("/")[len(option.b.split("/"))-1].replace(".txt","").replace(".bed","")
# define output file:
f_outfile = hybridpath + "mapcells_hybrid_" + collection + "-" + peaks + "-" + domain + "_combined.txt"
f_output = open(f_outfile, "w")
# generate output header:
header = ["i", "j", "label"]
acolumns = list()
for acolumn in general.valuesort(aheader):
if not acolumn in header:
acolumns.append(acolumn)
bcolumns = list()
for bcolumn in general.valuesort(bheader):
if not bcolumn in header and not bcolumn in ["i.only.ids", "j.only.ids", "overlap.ids"]:
bcolumns.append(bcolumn)
print >>f_output, "\t".join(header + acolumns + bcolumns)
# filter-match results:
print "Merging co-binding and co-expression..."
ifactor, jfactor = option.indexes.split(",")
icontext, jcontext = option.values.split(",")
comparisons = list()
for cobindingComparison in sorted(cobindingFrames.keys()):
iFactor, jFactor = cobindingFrames[cobindingComparison][ifactor], cobindingFrames[cobindingComparison][jfactor]
iContext, jContext = cobindingFrames[cobindingComparison][icontext], cobindingFrames[cobindingComparison][jcontext]
if iContext in targetContexts and jContext in targetContexts:
if iFactor in coexpressionMatrix and jFactor in coexpressionMatrix:
coexpressionComparison = iFactor + ":" + jFactor
label = ":".join(sorted([iFactor, jFactor]))
if not coexpressionComparison in comparisons:
output = [iFactor, jFactor, label]
for acolumn in acolumns:
output.append(cobindingFrames[cobindingComparison][acolumn])
for bcolumn in bcolumns:
output.append(coexpressionFrames[coexpressionComparison][bcolumn])
print >>f_output, "\t".join(map(str, output))
comparisons.append(coexpressionComparison)
# NOTE: this filtering for unique comparisons ensures that only one
# of the RNA PolII datasets gets used.
# close output file:
f_output.close()
print "Merged comparisons:", len(comparisons)
print
# dynamics (hybrid) matrix mode:
elif option.mode == "cell.dynamics":
# are working with hybrid co-binding and co-expression data?
if option.peaks != "OFF" and option.domain != "OFF":
hybridMode = "ON"
else:
hybridMode = "OFF"
# make comparison output folders:
if hybridMode == "ON":
dynamicspath = dynamicspath + option.name + "/" + option.peaks + "/" + option.domain + "/"
general.pathGenerator(dynamicspath)
f_outfile = dynamicspath + "mapcells_hybrid_" + option.name + "-" + option.peaks + "-" + option.domain + "_dynamics.txt"
f_output = open(f_outfile, "w")
u_outfile = dynamicspath + "mapcells_hybrid_" + option.name + "-" + option.peaks + "-" + option.domain + "_uniqueID.txt"
u_output = open(u_outfile, "w")
else:
dynamicspath = dynamicspath + option.name + "/overlap/"
general.pathGenerator(dynamicspath)
f_outfile = dynamicspath + "mapcells_direct_" + option.name + "_dynamics.txt"
f_output = open(f_outfile, "w")
u_outfile = dynamicspath + "mapcells_direct_" + option.name + "_uniqueID.txt"
u_output = open(u_outfile, "w")
# define output file:
k = 0
# load target contexts:
codeContexts, targetContexts = metrn.options_dict["contexts.extended"][option.contexts]
# load overlap data from collections:
print
print "Transfer dynamic co-binding and co-expression analysis..."
for index in range(option.start, option.stop+1, option.step):
collection = option.collection + general.indexTag(index, option.total) + option.nametag
labels = list()
# process hybrid co-binding and co-expression data:
if hybridMode == "ON" and collection in os.listdir(hybridpath):
if option.peaks in os.listdir(hybridpath + collection):
if option.domain in os.listdir(hybridpath + collection + "/" + option.peaks):
infile = hybridpath + collection + "/" + option.peaks + "/" + option.domain + "/" + codeContexts + "/mapcells_hybrid_" + collection + "-" + option.peaks + "-" + option.domain + "_combined.txt"
inlines = open(infile).readlines()
header = inlines.pop(0)
if k == 0:
print >>f_output, "timepoint" + "\t" + header.strip()
print >>u_output, "timepoint" + "\t" + header.strip()
k += 1
for inline in inlines:
label = inline.strip().split("\t")[2]
print >>f_output, str(index) + "\t" + inline.strip()
if not label in labels:
print >>u_output, str(index) + "\t" + inline.strip()
labels.append(label)
# process direct co-expression data:
if hybridMode == "OFF" and collection in os.listdir(overlappath):
infile = overlappath + collection + "/mapcells_" + collection + "_matrix_overlap"
inlines = open(infile).readlines()
header = inlines.pop(0)
if k == 0:
headerItems = header.strip().split("\t")[:15]
print >>f_output, "\t".join(["timepoint", "label"] + headerItems)
print >>u_output, "\t".join(["timepoint", "label"] + headerItems)
k += 1
for inline in inlines:
initems = inline.strip().split("\t")[:15]
label = ":".join(sorted([initems[0], initems[1]]))
print >>f_output, "\t".join([str(index), label] + initems)
if not label in labels:
print >>u_output, "\t".join([str(index), label] + initems)
labels.append(label)
f_output.close()
u_output.close()
print
# hypergeometric tissue-testing mode:
elif option.mode == "test.tissues":
# load tissue annotation matrixes:
print
print "Loading tissue annotations..."
specificTissues = general.build2(expressionpath + option.infile, i="specific.tissue", j="cell", mode="matrix", counter=True)
generalTissues = general.build2(expressionpath + option.infile, i="general.tissue", j="cell", mode="matrix", counter=True)
classTissues = general.build2(expressionpath + option.infile, i="class.tissue", j="cell", mode="matrix", counter=True)
totalCells = general.build2(expressionpath + option.expression, i="cell", x="specific.tissue", mode="values", skip=True)
totalCells = sorted(totalCells.keys())
# define a function for testing:
def tissueTesting(queryCells, tissueMatrix, totalCells, adjust=1, match=True):
if match:
queryCells = set(queryCells).intersection(set(totalCells))
tissueOverlap = dict()
for tissue in sorted(tissueMatrix.keys()):
tissueCells = sorted(tissueMatrix[tissue].keys())
if match:
tissueCells = set(tissueCells).intersection(set(totalCells))
overlapCells = set(queryCells).intersection(set(tissueCells))
m = len(queryCells)
n = len(totalCells) - len(queryCells)
U = len(totalCells)
N = len(tissueCells)
x = len(overlapCells)
unionized = len(set(queryCells).union(set(tissueCells)))
maximized = min(len(queryCells), len(tissueCells))
# determine overlap fractions:
if maximized > 0:
maxOverlap = float(x)/maximized
else:
maxOverlap = 0
if unionized > 0:
sumOverlap = float(x)/unionized
else:
sumOverlap = 0
# calculate probability mass function (PMF):
pvalue = hyper.fishers(x, U, m, N, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
# calculate enrichment/depletion score:
score = hyper.directional(x, U, m, N, adjust=adjust)
# store overlap scores:
tissueOverlap[tissue] = [len(queryCells), len(tissueCells), len(overlapCells), len(totalCells), maxOverlap, sumOverlap, pvalue, adjPvalue, score]
# return overlap scores:
return tissueOverlap
# load genes:
genes = sorted(os.listdir(cellsetpath + option.collection))
# determine Bonferroni correction factors
adjustSpecific = len(genes)*len(specificTissues)
adjustGeneral = len(genes)*len(generalTissues)
adjustClass = len(genes)*len(classTissues)
#print adjustSpecific
#print adjustGeneral
#print adjustClass
#pdb.set_trace()
# load cellular expression patterns per gene:
print "Loading per gene expression matrix..."
specificMatrix, generalMatrix, classMatrix = dict(), dict(), dict()
for gene in genes:
cells = open(cellsetpath + option.collection + "/" + gene).read().split("\n")
specificMatrix[gene] = tissueTesting(cells, specificTissues, totalCells, adjust=adjustSpecific)
generalMatrix[gene] = tissueTesting(cells, generalTissues, totalCells, adjust=adjustGeneral)
classMatrix[gene] = tissueTesting(cells, classTissues, totalCells, adjust=adjustClass)
# load cellular expression patterns per gene:
print "Exporting overlap scores..."
s_output = open(tissuespath + "mapcells_" + option.collection + "_matrix_specific.txt", "w")
g_output = open(tissuespath + "mapcells_" + option.collection + "_matrix_general.txt" , "w")
c_output = open(tissuespath + "mapcells_" + option.collection + "_matrix_class.txt" , "w")
print >>s_output, "\t".join(["gene", "tissue", "gene.cells", "tissue.cells", "overlap.cells", "total.cells", "overlap.max", "overlap.sum", "pvalue", "pvalue.adj", "score"])
print >>g_output, "\t".join(["gene", "tissue", "gene.cells", "tissue.cells", "overlap.cells", "total.cells", "overlap.max", "overlap.sum", "pvalue", "pvalue.adj", "score"])
print >>c_output, "\t".join(["gene", "tissue", "gene.cells", "tissue.cells", "overlap.cells", "total.cells", "overlap.max", "overlap.sum", "pvalue", "pvalue.adj", "score"])
for gene in sorted(specificMatrix.keys()):
for tissue in sorted(specificMatrix[gene].keys()):
print >>s_output, "\t".join(map(str, [gene, tissue] + specificMatrix[gene][tissue]))
for tissue in sorted(generalMatrix[gene].keys()):
print >>g_output, "\t".join(map(str, [gene, tissue] + generalMatrix[gene][tissue]))
for tissue in sorted(classMatrix[gene].keys()):
print >>c_output, "\t".join(map(str, [gene, tissue] + classMatrix[gene][tissue]))
# close outputs:
s_output.close()
g_output.close()
c_output.close()
print
# lineage construction/generation mode:
elif option.mode == "build.lineages":
import time
# establish descendants cutoff:
if option.descendants == "OFF":
descendants_cutoff = 1000000
descendants_handle = "XX"
else:
descendants_cutoff = int(option.descendants)
descendants_handle = option.descendants
# establish ascendants cutoff:
if option.ascendants == "OFF":
ascendants_cutoff = 0
ascendants_handle = "XX"
else:
ascendants_cutoff = int(option.ascendants)
ascendants_handle = option.ascendants
# establish limit cutoff:
if option.limit == "OFF":
limit_cutoff = "OFF"
limit_handle = "XX"
else:
limit_cutoff = int(option.limit)
limit_handle = option.limit
# define output paths:
logpath = lineagepath + option.name + "/" + option.method + "/lineage." + option.lineages + "/ascendants." + ascendants_handle + "/descendants." + descendants_handle + "/limit." + limit_handle + "/log/"
buildpath = lineagepath + option.name + "/" + option.method + "/lineage." + option.lineages + "/ascendants." + ascendants_handle + "/descendants." + descendants_handle + "/limit." + limit_handle + "/build/"
general.pathGenerator(logpath)
general.pathGenerator(buildpath)
# prepare log file:
l_output = open(logpath + "mapcells_build_" + option.cells + ".log", "w")
# clear output folder contents:
command = "rm -rf " + buildpath + "*"
os.system(command)
# build cell-expression matrix:
print
print "Loading cellular expression..."
print >>l_output, "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
print >>l_output, "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, trackedCells=trackedCells, lineages=option.lineages)
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
print >>l_output, "Pedigree cells:", len(pedigreeCells)
print >>l_output, "Tracked cells:", len(trackedCells)
# generate lineages for enrichment:
print
print "Generating lineages..."
print >>l_output, ""
print >>l_output, "Generating lineages..."
i, j, maxDN, minUP = 0, 0, 0, 10000
for parent in pedigreeCells:
i += 1
# define descendant cells:
descendants = descendantsCollector(parent, parent_dict, cell_dict, descendants=list())
# define ascendants cells:
ascendants = ascendantsCollector(parent, parent_dict, cell_dict, ascendants=list())
# calculate combinations possible:
combinations = combinationCalculator(len(descendants), len(descendants))
# apply descendants cutoff:
if len(descendants) <= descendants_cutoff and len(ascendants) >= ascendants_cutoff:
j += 1
print parent, len(ascendants), len(descendants), time.asctime(time.localtime())
print >>l_output, parent, len(ascendants), len(descendants), time.asctime(time.localtime())
# record max and min cutoffs:
if len(ascendants) < minUP:
minUP = len(ascendants)
if len(descendants) > maxDN:
maxDN = len(descendants)
# define lineage cells:
if option.method == "descender":
subtrees = [",".join(descendants)]
elif option.method == "generator":
subtrees = lineageGenerator(parent, parent_dict, cell_dict)
elif option.method == "builder":
subtrees = lineageBuilder(parent, parent_dict, cell_dict, limit=limit_cutoff)
elif option.method == "collector":
subtrees = lineageCollector(expression_matrix[gene], parent_dict, cell_dict)
print subtrees
pdb.set_trace() # not implemented yet
# export lineage cells:
f_output = open(buildpath + parent, "w")
index = 1
for subtree in subtrees:
print >>f_output, "\t".join([parent, parent + "." + str(index), subtree])
index += 1
f_output.close()
print
print "Pedigree nodes lineaged:", i
print "Pedigree nodes examined:", j, "(" + str(round(100*float(j)/i, 2)) + "%)"
print "Maximum descendants:", maxDN
print "Minimum ascendants:", minUP
print
print >>l_output, ""
print >>l_output, "Pedigree nodes lineaged:", i
print >>l_output, "Pedigree nodes examined:", j, "(" + str(round(100*float(j)/i, 2)) + "%)"
print >>l_output, "Maximum descendants:", maxDN
print >>l_output, "Minimum ascendants:", minUP
# close output files:
l_output.close()
#pdb.set_trace()
# hypergeometric lineage-testing mode:
elif option.mode == "test.lineages":
# establish descendants cutoff:
if option.descendants == "OFF":
descendants_cutoff = 1000000
descendants_handle = "XX"
else:
descendants_cutoff = int(option.descendants)
descendants_handle = option.descendants
# establish ascendants cutoff:
if option.ascendants == "OFF":
ascendants_cutoff = 0
ascendants_handle = "XX"
else:
ascendants_cutoff = int(option.ascendants)
ascendants_handle = option.ascendants
# establish limit cutoff:
if option.limit == "OFF":
limit_cutoff = "OFF"
limit_handle = "XX"
else:
limit_cutoff = int(option.limit)
limit_handle = option.limit
# define output paths:
logpath = lineagepath + option.name + "/" + option.method + "/lineage." + option.lineages + "/ascendants." + ascendants_handle + "/descendants." + descendants_handle + "/limit." + limit_handle + "/log/"
buildpath = lineagepath + option.name + "/" + option.method + "/lineage." + option.lineages + "/ascendants." + ascendants_handle + "/descendants." + descendants_handle + "/limit." + limit_handle + "/build/"
hyperpath = lineagepath + option.name + "/" + option.method + "/lineage." + option.lineages + "/ascendants." + ascendants_handle + "/descendants." + descendants_handle + "/limit." + limit_handle + "/hyper/"
cellsetpath = cellsetpath + option.collection + "/"
general.pathGenerator(logpath)
general.pathGenerator(buildpath)
general.pathGenerator(hyperpath)
#general.pathGenerator(cellsetpath)
# prepare log file:
l_output = open(logpath + "mapcells_hyper_" + option.collection + "_" + option.cells + ".log", "w")
# build cell-expression matrix:
print
print "Loading cellular expression..."
print >>l_output, "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# load cell-parent relationships:
print "Loading cell-parent relationships..."
print >>l_output, "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, trackedCells=trackedCells, lineages=option.lineages)
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
print >>l_output, "Pedigree cells:", len(pedigreeCells)
print >>l_output, "Tracked cells:", len(trackedCells)
# prepare for scanning...
i, j, k = 0, 0, 0
nodes = general.clean(os.listdir(buildpath), '.DS_Store')
overlap_dict, pvalue_dict, score_dict = dict(), dict(), dict()
# prepare output file:
f_output = open(hyperpath + "mapcells_hyper_" + option.collection + "_" + option.cells + ".txt", "w")
header = ["gene", "node", "lineage", "experiment.cells", "lineage.cells", "overlap.sum", "overlap.max", "overlap.count", "total.count", "lineage.count", "expressed.count", "pvalue", "pvalue.adj", "score", "cells"]
print >>f_output, "\t".join(map(str, header))
# load target cells:
print
print "Loading target cells..."
print >>l_output, ""
print >>l_output, "Loading target cells..."
collection_matrix = dict()
for collection in os.listdir(cellsetpath):
collectionCells = general.clean(open(cellsetpath + collection).read().split("\n"), "")
collection_matrix[collection] = collectionCells
#print collection, collectionCells
# define multiple-hypothesis correction factor:
lineageTotal = 0
for node in nodes:
lineageTotal += general.countLines(buildpath + node)
adjust = len(collection_matrix)*lineageTotal
# check background cell population:
if option.cells == "tracked":
pedigreeCells = list(trackedCells)
# scan cells for enrichment:
print "Scanning cells for lineage enrichments..."
print >>l_output, ""
print >>l_output, "Scanning cells for lineage enrichments..."
collectionsEnriched = list()
for collection in collection_matrix:
collectionCells = collection_matrix[collection]
# filter (complete) pedigree cells to reduce to tracked cells?
if option.cells == "tracked" and collection in tracking_matrix:
completeCells = set(tracking_matrix[collection]).intersection(set(pedigreeCells))
else:
completeCells = pedigreeCells
# Note: These are cells for which we have expression measurements for gene ('collection')...
# Note: It is not necessary to filter expression cells because these are by definition a subset of the tracked cells.
# scan lineages for enrichment:
nodesEnriched, linesEnriched = list(), list()
for node in nodes:
# load node-specific lineages:
lineageLines = open(buildpath + node).readlines()
for lineageLine in lineageLines:
lineageNode, lineageName, lineageCells = lineageLine.strip().split("\t")
lineageCells = lineageCells.split(",")
lineageCount = len(lineageCells)
# filter lineage cells to reduce to tracked cells?
if option.cells == "tracked" and collection in tracking_matrix:
lineageCells = set(tracking_matrix[collection]).intersection(set(lineageCells))
#print collection, node, len(tracking_matrix[collection]), len(lineageCells), ",".join(lineageCells)
#pdb.set_trace()
# test enrichment in lineage:
i += 1
completed = len(completeCells)
descended = len(lineageCells)
collected = len(collectionCells)
overlaped = len(set(collectionCells).intersection(set(lineageCells)))
unionized = len(set(collectionCells).union(set(lineageCells)))
maximized = min(descended, collected)
# determine overlaps:
if maximized > 0:
maxOverlap = float(overlaped)/maximized
else:
maxOverlap = 0
if unionized > 0:
sumOverlap = float(overlaped)/unionized
else:
sumOverlap = 0
# check overlap:
if maxOverlap >= float(option.overlap):
j += 1
# calculate probability mass function (PMF):
pvalue = hyper.fishers(overlaped, completed, descended, collected, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
# calculate enrichment/depletion score:
score = hyper.directional(overlaped, completed, descended, collected, adjust=adjust)
# should we store this result?
if adjPvalue < float(option.pvalue):
k += 1
if not collection in overlap_dict:
overlap_dict[collection], pvalue_dict[collection], score_dict[collection] = dict(), dict(), dict()
overlap_dict[collection][node] = maxOverlap
pvalue_dict[collection][node] = adjPvalue
score_dict[collection][node] = score
output = [collection, node, lineageName, len(collectionCells), lineageCount, sumOverlap, maxOverlap, overlaped, completed, descended, collected, pvalue, adjPvalue, score, ','.join(lineageCells)]
print >>f_output, "\t".join(map(str, output))
if not collection in collectionsEnriched:
collectionsEnriched.append(collection)
if not node in nodesEnriched:
nodesEnriched.append(node)
linesEnriched.append(lineageName)
print collection, i, j, k, len(collectionsEnriched), len(nodesEnriched), len(linesEnriched)
print >>l_output, collection, i, j, k, len(collectionsEnriched), len(nodesEnriched), len(linesEnriched)
print
print "Lineages examined:", i
print "Lineages overlapped:", j
print "Lineages significant:", k, "(" + str(round(100*float(k)/i, 2)) + "%)"
print
print >>l_output, ""
print >>l_output, "Lineages examined:", i
print >>l_output, "Lineages overlapped:", j
print >>l_output, "Lineages significant:", k, "(" + str(round(100*float(k)/i, 2)) + "%)"
# close output file
f_output.close()
l_output.close()
#pdb.set_trace()
# hypergeometric testing between sets of cells mode:
elif option.mode == "test.comparison":
# define output paths:
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# prepare log file:
l_output = open(logpath + "mapcells_comparison_" + option.name + "_" + option.cells + ".log", "w")
# build cell-expression matrix:
print
print "Loading cellular expression..."
print >>l_output, "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
print >>l_output, "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, trackedCells=trackedCells, lineages=option.cells)
# Note that here the lineage-filtering uses the indicated cells option!
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
print >>l_output, "Pedigree cells:", len(pedigreeCells)
print >>l_output, "Tracked cells:", len(trackedCells)
# prepare for scanning...
overlap_dict, pvalue_dict = dict(), dict()
i, j, k = 0, 0, 0
# prepare output file:
f_output = open(hyperpath + "mapcells_test_" + option.name + "_" + option.cells + "_comparison.txt", "w")
header = ["query", "target", "lineage", "query.cells", "target.cells", "overlap.sum", "overlap.max", "overlap.count", "total.count", "query.count", "target.count", "pvalue", "pvalue.adj", "score", "cells"]
print >>f_output, "\t".join(map(str, header))
# load query cells:
print
print "Loading query cells..."
print >>l_output, ""
print >>l_output, "Loading query cells..."
query_matrix = dict()
for query in os.listdir(querypath):
queryCells = general.clean(open(querypath + query).read().split("\n"), "")
query_matrix[query] = queryCells
#print query, queryCells
# load target cells:
print
print "Loading target cells..."
print >>l_output, ""
print >>l_output, "Loading target cells..."
target_matrix = dict()
for target in os.listdir(targetpath):
targetCells = general.clean(open(targetpath + target).read().split("\n"), "")
target_matrix[target] = targetCells
#print target, targetCells
# define multiple-hypothesis correction factor:
adjust = len(query_matrix)*len(target_matrix)
# check background cell population:
if option.cells == "tracked":
pedigreeCells = list(trackedCells)
# scan query cells for enrichment:
print "Scanning target cells for query cells enrichment..."
print >>l_output, ""
print >>l_output, "Scanning target cells for query cells enrichment..."
queriesEnriched = list()
for query in sorted(query_matrix.keys()):
queryCells = list(set(query_matrix[query]))
# filter query cells to reduce to tracked cells?
if option.cells == "tracked":
queryCells = set(queryCells).intersection(set(pedigreeCells))
# scan target cells for enrichment:
targetsEnriched, linesEnriched = list(), list()
for target in sorted(target_matrix.keys()):
targetCells = list(set(target_matrix[target]))
# filter target cells to reduce to tracked cells?
if option.cells == "tracked":
targetCells = set(targetCells).intersection(set(pedigreeCells))
#print query, target, len(queryCells), len(targetCells), ",".join(targetCells)
#pdb.set_trace()
# test enrichment in lineage:
i += 1
completed = len(pedigreeCells)
descended = len(targetCells)
collected = len(queryCells)
overlaped = len(set(queryCells).intersection(set(targetCells)))
unionized = len(set(queryCells).union(set(targetCells)))
maximized = min(descended, collected)
# determine overlaps:
if maximized > 0:
maxOverlap = float(overlaped)/maximized
else:
maxOverlap = 0
if unionized > 0:
sumOverlap = float(overlaped)/unionized
else:
sumOverlap = 0
# check overlap:
if maxOverlap >= float(option.overlap):
j += 1
# calculate probability mass function (PMF):
pvalue = hyper.fishers(overlaped, completed, descended, collected, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
# calculate enrichment/depletion score:
score = hyper.directional(overlaped, completed, descended, collected, adjust=adjust)
if adjPvalue < float(option.pvalue):
k += 1
if not query in overlap_dict:
overlap_dict[query], pvalue_dict[query] = dict(), dict()
overlap_dict[query][target] = maxOverlap
pvalue_dict[query][target] = pvalue
output = [query, target, option.target, len(queryCells), len(targetCells), sumOverlap, maxOverlap, overlaped, completed, descended, collected, pvalue, adjPvalue, score, ','.join(targetCells)]
print >>f_output, "\t".join(map(str, output))
if not query in queriesEnriched:
queriesEnriched.append(query)
if not target in targetsEnriched:
targetsEnriched.append(target)
print query, i, j, k, len(queriesEnriched), len(targetsEnriched), len(linesEnriched)
print >>l_output, query, i, j, k, len(queriesEnriched), len(targetsEnriched), len(linesEnriched)
print
print "Lineages examined:", i
print "Lineages overlapped:", j
print "Lineages significant:", k, "(" + str(round(100*float(k)/i, 2)) + "%)"
print
print >>l_output, ""
print >>l_output, "Lineages examined:", i
print >>l_output, "Lineages overlapped:", j
print >>l_output, "Lineages significant:", k, "(" + str(round(100*float(k)/i, 2)) + "%)"
# close output file
f_output.close()
l_output.close()
# filter testing results to neurons where the region is contained mode:
elif option.mode == "test.regions":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input/output paths:
bedpath = neuronspath + option.technique + "/results/" + option.neurons + "/regions/bed/"
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# load region coordinates per neuron:
print
print "Loading regions per neuron matrix..."
neuron_matrix = dict()
for bedfile in general.clean(os.listdir(bedpath), ".DS_Store"):
neuron = bedfile.replace(".bed", "")
neuron_matrix[neuron] = dict()
for bedline in open(bedpath + bedfile).readlines():
chrm, start, stop, region = bedline.strip("\n").split("\t")[:4]
neuron_matrix[neuron][region] = [chrm, int(start), int(stop)]
# load gene coordinates:
print "Loading gene/feature coordinates..."
coord_dict = dict()
ad = general.build_header_dict(annotationspath + option.reference)
inlines = open(annotationspath + option.reference).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
chrm, start, stop, feature, strand, name = initems[ad["chrm"]], initems[ad["start"]], initems[ad["end"]], initems[ad["feature"]], initems[ad["strand"]], initems[ad["name"]]
if strand == "+":
coord_dict[name] = [chrm, int(start)-option.up, int(start)+option.dn]
coord_dict[feature] = [chrm, int(start)-option.up, int(start)+option.dn]
elif strand == "-":
coord_dict[name] = [chrm, int(stop)-option.dn, int(stop)+option.up]
coord_dict[feature] = [chrm, int(stop)-option.dn, int(stop)+option.up]
# prepare output file:
f_output = open(hyperpath + "mapcells_test_" + option.name + "_" + option.cells + "_regions.txt", "w")
# define hypergeometric results file:
hyperfile = hyperpath + "mapcells_test_" + option.name + "_" + option.cells + "_comparison.txt"
# build header dict:
hd = general.build_header_dict(hyperfile)
# scan hypergeometric results file for cases of overlap:
print "Scanning hypergeometric results..."
i, j, k = 0, 0, 0
inlines = open(hyperfile).readlines()
print >>f_output, inlines.pop(0).strip("\n")
queriesMissed, queriesFound, targetsFound = list(), list(), list()
for inline in inlines:
initems = inline.strip("\n").split("\t")
query, target, pvalue = initems[hd["query"]], initems[hd["target"]], initems[hd["pvalue.adj"]]
if query in coord_dict:
i += 1
qchrm, qstart, qstop = coord_dict[query]
hits = False
for region in neuron_matrix[target]:
j += 1
rchrm, rstart, rstop = neuron_matrix[target][region]
if qchrm == rchrm:
if qstart <= rstart and qstop >= rstop:
k += 1
hits = True
if hits:
print >>f_output, inline.strip("\n")
queriesFound.append(query)
targetsFound.append(target)
else:
queriesMissed.append(query)
queriesMissed = sorted(list(set(queriesMissed)))
# close output file
f_output.close()
queriesFound = sorted(list(set(queriesFound)))
targetsFound = sorted(list(set(targetsFound)))
#pdb.set_trace()
print
print "Queries found in neurons:", len(queriesFound)
print "Neurons found in queries:", len(targetsFound)
print "Searches performed:", i
print "Searches performed (x Regions):", j
print "Searches with hits (x Regions):", k
print "Queries with coordinates and found:", ", ".join(queriesFound)
print "Queries missed (no coordinates):", len(queriesMissed)
print "\n".join(queriesMissed)
print
# false discovery rate mode:
elif option.mode == "test.fdr":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input/output paths:
bedpath = neuronspath + option.technique + "/results/" + option.neurons + "/regions/bed/"
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# load region coordinates per neuron:
print
print "Loading regions per neuron matrix..."
neuron_matrix = dict()
for bedfile in general.clean(os.listdir(bedpath), ".DS_Store"):
neuron = bedfile.replace(".bed", "")
neuron_matrix[neuron] = dict()
for bedline in open(bedpath + bedfile).readlines():
chrm, start, stop, region = bedline.strip("\n").split("\t")[:4]
neuron_matrix[neuron][region] = [chrm, int(start), int(stop)]
# load gene coordinates:
print "Loading gene/feature coordinates..."
coord_dict = dict()
ad = general.build_header_dict(annotationspath + option.reference)
inlines = open(annotationspath + option.reference).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
chrm, start, stop, feature, strand, name = initems[ad["chrm"]], initems[ad["start"]], initems[ad["end"]], initems[ad["feature"]], initems[ad["strand"]], initems[ad["name"]]
if strand == "+":
coord_dict[name] = [chrm, int(start)-option.up, int(start)+option.dn]
coord_dict[feature] = [chrm, int(start)-option.up, int(start)+option.dn]
elif strand == "-":
coord_dict[name] = [chrm, int(stop)-option.dn, int(stop)+option.up]
coord_dict[feature] = [chrm, int(stop)-option.dn, int(stop)+option.up]
# prepare output file:
f_output = open(hyperpath + "mapcells_test_" + option.name + "_" + option.cells + "_fdr.txt", "w")
# define hypergeometric results file:
hyperfile = hyperpath + "mapcells_test_" + option.name + "_" + option.cells + "_comparison.txt"
# build header dict:
hd = general.build_header_dict(hyperfile)
# load positive hypergeometric results:
print "Loading hypergeometric results (hits)..."
inlines = open(hyperfile).readlines()
inlines.pop(0)
hyper_matrix, hyperTargets = dict(), list()
for inline in inlines:
initems = inline.strip("\n").split("\t")
query, target, pvalue = initems[hd["query"]], initems[hd["target"]], initems[hd["pvalue.adj"]]
if not query in hyper_matrix:
hyper_matrix[query] = dict()
hyper_matrix[query][target] = float(pvalue)
if not target in hyperTargets:
hyperTargets.append(target)
# select the best matching neuron for each query:
match_matrix = dict()
print "Scanning hypergeometric results per query..."
i, j, k = 0, 0, 0
positiveRate, negativeRate, matchTargets = list(), list(), list()
for query in hyper_matrix:
if query in coord_dict:
i += 1
qchrm, qstart, qstop = coord_dict[query]
for target in neuron_matrix:
j += 1
hits = 0
for region in neuron_matrix[target]:
rchrm, rstart, rstop = neuron_matrix[target][region]
if qchrm == rchrm:
if qstart <= rstart and qstop >= rstop:
hits += 1
if hits != 0:
if not query in match_matrix:
match_matrix[query] = dict()
match_matrix[query][target] = float(hits)/len(neuron_matrix[target])
if not target in matchTargets:
matchTargets.append(target)
#print hyper_matrix.keys()
#print match_matrix.keys()
#print query
#print target
#print match_matrix[query][target]
#pdb.set_trace()
# Test A
"""
print
print "Testing positive and negative hits..."
positiveRate, negativeRate, unknownRate = list(), list(), list()
for query in match_matrix:
hits = general.valuesort(match_matrix[query])
hits.reverse()
target = hits[0]
if query in hyper_matrix and target in hyper_matrix[query]:
positiveRate.append(query + ":" + target)
print "+", query, target, match_matrix[query][target]
else:
print "-", query, target, match_matrix[query][target]
negativeRate.append(query + ":" + target)
if query in hyper_matrix:
unknownRate.append(query + ":" + target)
print "True Positive Rate:", len(positiveRate), 100*float(len(positiveRate))/(len(positiveRate)+len(negativeRate))
print "False Positive Rate:", len(negativeRate), 100*float(len(negativeRate))/(len(positiveRate)+len(negativeRate))
print "False Unknown Rate:", len(unknownRate), 100*float(len(unknownRate))/(len(positiveRate)+len(unknownRate))
print
"""
# Test B
"""
print
print "Testing positive and negative hits..."
positiveRate, negativeRate, unknownRate = list(), list(), list()
for query in hyper_matrix:
hits = 0
for target in general.valuesort(hyper_matrix[query]):
if query in match_matrix and target in match_matrix[query]:
hits += 1
if hits != 0:
positiveRate.append(query + ":" + target)
else:
negativeRate.append(query + ":" + target)
if query in match_matrix:
unknownRate.append(query + ":" + target)
print "True Positive Rate:", len(positiveRate), 100*float(len(positiveRate))/(len(positiveRate)+len(negativeRate))
print "False Positive Rate:", len(negativeRate), 100*float(len(negativeRate))/(len(positiveRate)+len(negativeRate))
print "False Unknown Rate:", len(unknownRate), 100*float(len(unknownRate))/(len(positiveRate)+len(unknownRate))
print
"""
# Test C
print
print "Testing positive and negative hits..."
positiveRate, negativeRate, unknownRate = list(), list(), list()
for query in match_matrix:
hits = 0
for target in general.valuesort(match_matrix[query]):
if query in hyper_matrix and target in hyper_matrix[query]:
hits += 1
if hits != 0:
positiveRate.append(query + ":" + target)
else:
negativeRate.append(query + ":" + target)
if query in hyper_matrix:
unknownRate.append(query + ":" + target)
print "Genes enriched in SOM neurons:", len(hyper_matrix)
print "Genes with promoter in SOM neurons:", len(match_matrix)
print "Neurons enriched in gene expression:", len(hyperTargets)
print "Neurons with gene promoter matches:", len(matchTargets)
print "True Positive Rate:", len(positiveRate), 100*float(len(positiveRate))/(len(positiveRate)+len(negativeRate))
print "False Positive Rate:", len(negativeRate), 100*float(len(negativeRate))/(len(positiveRate)+len(negativeRate))
print "False Unknown Rate (not enriched in any neuron):", len(unknownRate), 100*float(len(unknownRate))/(len(positiveRate)+len(unknownRate))
print
# scan each gene for cellular overlap in neurons where the promoter is found:
"""
print "Scanning positive and negative hits..."
i, j, k = 0, 0, 0
positiveRate, negativeRate = list(), list()
for query in hyper_matrix:
if query in coord_dict:
i += 1
qchrm, qstart, qstop = coord_dict[query]
for target in neuron_matrix:
j += 1
hits = 0
for region in neuron_matrix[target]:
rchrm, rstart, rstop = neuron_matrix[target][region]
if qchrm == rchrm:
if qstart <= rstart and qstop >= rstop:
hits += 1
if hits != 0:
if target in hyper_matrix[query]:
positiveRate.append(query + ":" + target)
else:
negativeRate.append(query + ":" + target)
"""
#print >>f_output, inlines.pop(0).strip("\n")
# close output file
f_output.close()
#print
#print "Queries found in neurons:", len(queriesFound)
#print "Neurons found in queries:", len(targetsFound)
#print "Searches performed:", i
#print "Searches performed (x Regions):", j
#print "Searches with hits (x Regions):", k
#print "Queries with coordinates and found:", ", ".join(queriesFound)
#print "Queries missed (no coordinates):", len(queriesMissed)
#print
# annotate tissue composition in neurons:
elif option.mode == "test.composition":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input/output paths:
bedpath = neuronspath + option.technique + "/results/" + option.neurons + "/regions/bed/"
codespath = neuronspath + option.technique + "/results/" + option.neurons + "/codes/"
summarypath = neuronspath + option.technique + "/results/" + option.neurons + "/summary/"
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
compositionpath = comparepath + option.name + "/composition/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(compositionpath)
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# load codes:
inlines = open(codespath + option.neurons + ".codes").readlines()
codes = inlines.pop(0).strip().split("\t")
codeDict = dict()
for inline in inlines:
initems = inline.strip().split("\t")
neuron = initems.pop(0)
codeDict["neuron" + neuron] = initems
# load cellular expression data:
print
print "Loading cellular annotation..."
annotationDict = general.build2(expressionpath + option.expression, id_column="cell", value_columns=["specific.tissue", "general.tissue", "class.tissue", "match.tissue"], skip=True, verbose=False)
# load tissue annotation matrixes:
print "Loading tissue annotations..."
#specificCounts = general.build2(expressionpath + option.infile, i="specific.tissue" , mode="values", skip=True, counter=True)
#generalCounts = general.build2(expressionpath + option.infile, i="general.tissue", mode="values", skip=True, counter=True)
#classCounts = general.build2(expressionpath + option.infile, i="class.tissue", mode="values", skip=True, counter=True)
totalCells = general.build2(expressionpath + option.expression, i="cell", x="specific.tissue", mode="values", skip=True)
totalCells = sorted(totalCells.keys())
# gather tissue labels
specificTissues, generalTissues, classTissues, matchTissues = list(), list(), list(), list()
for cell in annotationDict:
if not annotationDict[cell]["specific.tissue"] in specificTissues:
specificTissues.append(annotationDict[cell]["specific.tissue"])
if not annotationDict[cell]["general.tissue"] in generalTissues:
generalTissues.append(annotationDict[cell]["general.tissue"])
if not annotationDict[cell]["class.tissue"] in classTissues:
classTissues.append(annotationDict[cell]["class.tissue"])
if not annotationDict[cell]["match.tissue"] in matchTissues:
matchTissues.append(annotationDict[cell]["match.tissue"])
# load cells identified in each neuron:
print
print "Loading cell identities per neuron..."
neuronDict = general.build2(summarypath + "mapneurons_summary.txt", id_column="neuron", value_columns=["class.ids"])
# load cells counted in each neuron:
print "Loading cell counts per neuron..."
countMatrix, binaryMatrix = dict(), dict()
for neuron in os.listdir(bedpath):
inlines = open(bedpath + neuron).readlines()
neuron = neuron.replace(".bed", "")
countMatrix[neuron] = dict()
binaryMatrix[neuron] = dict()
for inline in inlines:
chrm, start, end, feature, score, strand, cell, regions = inline.strip().split("\t")
if not cell in countMatrix[neuron]:
countMatrix[neuron][cell] = 0
binaryMatrix[neuron][cell] = 1
countMatrix[neuron][cell] += 1
# generate tissue class scores:
cellList = list()
cellMatrix, specificMatrix, generalMatrix, classMatrix, matchMatrix = dict(), dict(), dict(), dict(), dict()
for neuron in neuronDict:
if not neuron in cellMatrix:
cellMatrix[neuron] = dict()
specificMatrix[neuron] = dict()
generalMatrix[neuron] = dict()
classMatrix[neuron] = dict()
matchMatrix[neuron] = dict()
for cell in neuronDict[neuron]["class.ids"].split(","):
specificTissue, generalTissue, classTissue, matchTissue = annotationDict[cell]["specific.tissue"], annotationDict[cell]["general.tissue"], annotationDict[cell]["class.tissue"], annotationDict[cell]["match.tissue"]
if not cell in cellMatrix[neuron]:
cellMatrix[neuron][cell] = 0
if not specificTissue in specificMatrix[neuron]:
specificMatrix[neuron][specificTissue] = 0
if not generalTissue in generalMatrix[neuron]:
generalMatrix[neuron][generalTissue] = 0
if not classTissue in classMatrix[neuron]:
classMatrix[neuron][classTissue] = 0
if not matchTissue in matchMatrix[neuron]:
matchMatrix[neuron][matchTissue] = 0
cellList.append(cell)
cellMatrix[neuron][cell] += binaryMatrix[neuron][cell]
specificMatrix[neuron][specificTissue] += binaryMatrix[neuron][cell]
generalMatrix[neuron][generalTissue] += binaryMatrix[neuron][cell]
classMatrix[neuron][classTissue] += binaryMatrix[neuron][cell]
matchMatrix[neuron][matchTissue] += binaryMatrix[neuron][cell]
cellList = sorted(list(set(cellList)))
# Note: The above dictionaries record how many of the cell (ids)
# in a given neuron have correspond to a given tissue.
# prepare class tallies for normalization:
specificTallies, generalTallies, classTallies, matchTallies = dict(), dict(), dict(), dict()
for cell in cellList:
if not annotationDict[cell]["specific.tissue"] in specificTallies:
specificTallies[annotationDict[cell]["specific.tissue"]] = 0
if not annotationDict[cell]["general.tissue"] in generalTallies:
generalTallies[annotationDict[cell]["general.tissue"]] = 0
if not annotationDict[cell]["class.tissue"] in classTallies:
classTallies[annotationDict[cell]["class.tissue"]] = 0
if not annotationDict[cell]["match.tissue"] in matchTallies:
matchTallies[annotationDict[cell]["match.tissue"]] = 0
specificTallies[annotationDict[cell]["specific.tissue"]] += 1
generalTallies[annotationDict[cell]["general.tissue"]] += 1
classTallies[annotationDict[cell]["class.tissue"]] += 1
matchTallies[annotationDict[cell]["match.tissue"]] += 1
# Note: The above tallies record the number of cells (observed,
# in neurons) that correspond to each tissue.***
# prepare output files:
f_output = open(compositionpath + "mapcells_composition_codes.txt", "w")
c_output = open(compositionpath + "mapcells_composition_cellular.txt", "w")
s_output = open(compositionpath + "mapcells_composition_specific.txt", "w")
g_output = open(compositionpath + "mapcells_composition_general.txt", "w")
l_output = open(compositionpath + "mapcells_composition_class.txt", "w")
m_output = open(compositionpath + "mapcells_composition_match.txt", "w")
# print out headers:
print >>f_output, "\t".join(["neuron", "id", "fraction.ids"])
print >>c_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>s_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>g_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>l_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>m_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
# Note: We will now output the following information:
# id.found : is ID found in neuron?
# id.cells : number of cells (diversity) that match ID.
# fraction.ids: fraction of ID diversity in neuron.
# fraction.sum: fraction of cellular diversity in neuron that matches ID.
# fraction.rat: fraction of cellular diversity in neuron that matches ID, normalized by the representation of the ID.
# fraction.max: fraction of cellular diversity in neuron as normalized by the ID with the highest cellular diversity in neuron.
# fraction.nrm: fraction of cellular diversity in neuron as normalized by the total number of cells with said ID.
# determine missed tissues:
print
specificMissed, generalMissed, classMissed, matchMissed = set(specificTissues).difference(set(specificTallies.keys())), set(generalTissues).difference(set(generalTallies.keys())), set(classTissues).difference(set(classTallies.keys())), set(matchTissues).difference(set(matchTallies.keys()))
print "Specific tissues not found:", str(len(specificMissed)) + " (" + str(len(specificTissues)) + ") ; " + ",".join(sorted(specificMissed))
print "General tissues not found:", str(len(generalMissed)) + " (" + str(len(generalTissues)) + ") ; " + ",".join(sorted(generalMissed))
print "Class tissues not found:", str(len(classMissed)) + " (" + str(len(classTissues)) + ") ; " + ",".join(sorted(classMissed))
print "Match tissues not found:", str(len(matchMissed)) + " (" + str(len(matchTissues)) + ") ; " + ",".join(sorted(matchMissed))
print
# export the fractions:
print "Exporting representation per neuron..."
for neuron in sorted(neuronDict.keys()):
if neuron in codeDict:
# export factor signals:
index = 0
for code in codes:
print >>f_output, "\t".join(map(str, [neuron, code, codeDict[neuron][index]]))
index += 1
# export cell counts:
for cell in cellList:
adjust = len(neuronDict.keys())*len(cellList)
types = len(cellMatrix[neuron].keys())
total = sum(cellMatrix[neuron].values())
maxxx = max(cellMatrix[neuron].values())
if cell in cellMatrix[neuron]:
count = float(cellMatrix[neuron][cell])
index = 1
else:
count = 0
index = 0
print >>c_output, "\t".join(map(str, [neuron, cell, index, count, float(index)/types, float(count)/total, float(count)/maxxx, 1, 1, 1, 0]))
# export specific tissue enrichment:
for specificTissue in sorted(specificTallies.keys()):
types = len(specificMatrix[neuron].keys())
total = sum(specificMatrix[neuron].values())
maxxx = max(specificMatrix[neuron].values())
tally = specificTallies[specificTissue]
if specificTissue in specificMatrix[neuron]:
count = float(specificMatrix[neuron][specificTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(specificTallies.keys())
universe = sum(specificTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>s_output, "\t".join(map(str, [neuron, specificTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export general tissue enrichment:
for generalTissue in sorted(generalTallies.keys()):
types = len(generalMatrix[neuron].keys())
total = sum(generalMatrix[neuron].values())
maxxx = max(generalMatrix[neuron].values())
tally = generalTallies[generalTissue]
if generalTissue in generalMatrix[neuron]:
count = float(generalMatrix[neuron][generalTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(generalTallies.keys())
universe = sum(generalTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>g_output, "\t".join(map(str, [neuron, generalTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export class tissue enrichment:
for classTissue in sorted(classTallies.keys()):
types = len(classMatrix[neuron].keys())
total = sum(classMatrix[neuron].values())
maxxx = max(classMatrix[neuron].values())
tally = classTallies[classTissue]
if classTissue in classMatrix[neuron]:
count = float(classMatrix[neuron][classTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(classTallies.keys())
universe = sum(classTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>l_output, "\t".join(map(str, [neuron, classTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export match tissue enrichment:
for matchTissue in sorted(matchTallies.keys()):
types = len(matchMatrix[neuron].keys())
total = sum(matchMatrix[neuron].values())
maxxx = max(matchMatrix[neuron].values())
tally = matchTallies[matchTissue]
if matchTissue in matchMatrix[neuron]:
count = float(matchMatrix[neuron][matchTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(matchTallies.keys())
universe = sum(matchTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>m_output, "\t".join(map(str, [neuron, matchTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# close outputs:
f_output.close()
c_output.close()
s_output.close()
g_output.close()
l_output.close()
m_output.close()
print
print "Combining cell and factor (mix) information.."
# load input factor information:
factorDict = general.build2(compositionpath + "mapcells_composition_codes.txt", i="neuron", j="id", x="fraction.ids", mode="matrix")
# define input cell/tissue files:
infiles = ["mapcells_composition_cellular.txt", "mapcells_composition_specific.txt", "mapcells_composition_general.txt", "mapcells_composition_class.txt", "mapcells_composition_match.txt"]
for infile in infiles:
print "Processing:", infile
# initiate neuron data extraction:
f_output = open(compositionpath + infile.replace(".txt", ".mix"), "w")
inheader = open(compositionpath + infile).readline().strip().split("\t")
inlines = open(compositionpath + infile).readlines()
print >>f_output, inlines.pop(0)
# append factor information to neuron data:
processed = list()
for inline in inlines:
neuron, label = inline.strip().split("\t")[:2]
if not neuron in processed:
processed.append(neuron)
for factor in factorDict[neuron]:
output = list()
for column in inheader:
if column == "neuron":
output.append(neuron)
elif column == "id":
output.append(factor)
elif column in ["pvalue", "pvalue.adj"]:
output.append("1")
else:
output.append(factorDict[neuron][factor])
print >>f_output, "\t".join(output)
print >>f_output, inline.strip()
# close outputs:
f_output.close()
print
# examine co-association correspondence between genes:
elif option.mode == "test.similarity":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input/output paths:
bedpath = neuronspath + option.technique + "/results/" + option.neurons + "/regions/bed/"
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# load query cells:
print
print "Loading query cells..."
query_matrix = dict()
for query in os.listdir(querypath):
queryCells = general.clean(open(querypath + query).read().split("\n"), "")
query_matrix[query] = queryCells
#print query, queryCells
print "Generating merged region file..."
#queryfile = hyperpath + "query.bed"
#regionsfile = hyperpath + "regions.bed"
#overlapfile = hyperpath + "overlap.bed"
joint = " " + bedpath
command = "cat " + bedpath + joint.join(os.listdir(bedpath)) + " > " + regionsfile
os.system(command)
# load gene coordinates:
print "Loading gene/feature coordinates..."
coord_dict = dict()
ad = general.build_header_dict(annotationspath + option.reference)
inlines = open(annotationspath + option.reference).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
chrm, start, stop, feature, strand, name = initems[ad["#chrm"]], initems[ad["start"]], initems[ad["end"]], initems[ad["feature"]], initems[ad["strand"]], initems[ad["name"]]
if strand == "+":
start, end = int(start)-option.up, int(start)+option.dn
elif strand == "-":
start, end = int(stop)-option.dn, int(stop)+option.up
for query in query_matrix:
if query == feature or query == name:
f_output = open(queryfile, "w")
print >>f_output, "\t".join(map(str, [chrm, start, end, feature, 0, strand]))
f_output.close()
overlaps = list()
command = "intersectBed -u -a " + regionsfile + " -b " + queryfile + " > " + overlapfile
os.system(command)
for inline in open(overlapfile).readlines():
overlaps.append(inline.strip())
print query, len(overlaps)
if len(overlaps) > 0:
pdb.set_trace()
break
# tree building mode:
elif option.mode == "tree.build":
# build cell-expression matrix:
print
print "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
# trim tree:
cell_tree, parent_tree = dict(), dict()
for parent in parent_dict:
for cell in parent_dict[parent]:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
process = False
if option.lineages == "complete":
process = True
elif parent in trackedCells and cell in trackedCells:
process = True
elif option.ascendants != "OFF" and len(ascendants) < int(option.ascendants):
process = True
if process:
if not parent in parent_tree:
parent_tree[parent] = list()
parent_tree[parent].append(cell)
cell_tree[cell] = parent
tree = treeBuilder(parent_tree, cell_tree)
#print sorted(tree.keys())
#print tree["P0"]
#pdb.set_trace()
f_output = open(cellspath + "mapcells_tree_" + option.name + ".json", "w")
json.dump(tree["P0"], f_output)
f_output.close()
# tree coloring mode:
elif option.mode == "tree.color":
# build cell-expression matrix:
print
print "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
# trim tree:
cell_tree, parent_tree = dict(), dict()
for parent in parent_dict:
for cell in parent_dict[parent]:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
process = False
if option.lineages == "complete":
process = True
elif parent in trackedCells and cell in trackedCells:
process = True
elif option.ascendants != "OFF" and len(ascendants) < int(option.ascendants):
process = True
if process:
if not parent in parent_tree:
parent_tree[parent] = list()
parent_tree[parent].append(cell)
cell_tree[cell] = parent
# build header dict:
hd = general.build_header_dict(option.infile)
# load input lines:
pvalue_matrix, cells_matrix = dict(), dict()
inlines = open(option.infile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
query, target, pvalue, cells = initems[hd["query"]], initems[hd["target"]], initems[hd["pvalue"]], initems[hd["cells"]]
if not query in pvalue_matrix:
pvalue_matrix[query] = dict()
cells_matrix[query] = dict()
pvalue_matrix[query][target] = float(pvalue)
cells_matrix[query][target] = cells.split(",")
# scan inputs, selecting the targets of highest enrichment and generating color tree for each:
k = 0
print
print "Scanning queries..."
for query in cells_matrix:
target = general.valuesort(pvalue_matrix[query])[0]
cells = cells_matrix[query][target]
print query, target, pvalue_matrix[query][target], len(cells)
tree = treeBuilder(parent_tree, cell_tree, highlights=cells)
#print sorted(tree.keys())
#print tree["P0"]
#pdb.set_trace()
f_output = open(cellspath + "mapcells_tree_" + option.name + "_" + query + "-" + target + ".json", "w")
json.dump(tree["P0"], f_output)
f_output.close()
k += 1
print
print "Queries processed:", k
print
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
#python mapCells.py --path ~/meTRN --mode import --infile murray_2012_supplemental_dataset_1_per_gene.txt --name murray # Retired!
#python mapCells.py --path ~/meTRN --mode import --infile waterston_avgExpression.csv --name waterston --measure max.expression
#python mapCells.py --path ~/meTRN --mode import --infile waterston_avgExpression.csv --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN --mode check.status --peaks optimal_standard_factor_sx_rawraw --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN --mode check.status --peaks optimal_standard_factor_ex_rawraw --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN/ --mode build.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode build.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages complete --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode test.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode test.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_assayed --name waterston.assayed --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN --organism ce --mode robust --infile waterston_avgExpression.csv
| 42.815027
| 384
| 0.692007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50,185
| 0.292595
|
b86edb269cd9e7e592b4cc82203020de3b8e84a3
| 1,838
|
py
|
Python
|
gravity/bak/gravity3.py
|
baijianhua/pymath
|
a96ebbd8c8ac646c436d8bf33cb01764a948255d
|
[
"MIT"
] | null | null | null |
gravity/bak/gravity3.py
|
baijianhua/pymath
|
a96ebbd8c8ac646c436d8bf33cb01764a948255d
|
[
"MIT"
] | null | null | null |
gravity/bak/gravity3.py
|
baijianhua/pymath
|
a96ebbd8c8ac646c436d8bf33cb01764a948255d
|
[
"MIT"
] | null | null | null |
# https://stackoverflow.com/questions/47295473/how-to-plot-using-matplotlib-python-colahs-deformed-grid
"""
这个形状仍然不对。靠近坐标轴的地方变化太大。不管是横轴还是纵轴。应该是以原点为圆心,各个网格均匀分担才对
而不管是否靠近坐标轴
变形的目标,是在某处给定一个球体或者立方体,整个坐标中的网格,靠近这个物体的,受到变形影响,距离越远,影响
越小,直到可以忽略不计
但有个要求是靠近物体的网格,是均匀的受到影响,不能有的多,有的少
或许用极坐标是更好的选择?但是也不行。极坐标如何体现原有的坐标系呢?
极坐标没有平直的地方,到处都不均匀。
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
EDGE = 5
STEP = 2 * EDGE + 1
def plot_grid(x, y, ax=None, **kwargs):
ax = ax or plt.gca()
segs1 = np.stack((x, y), axis=2)
segs2 = segs1.transpose(1, 0, 2)
ax.add_collection(LineCollection(segs1, **kwargs))
ax.add_collection(LineCollection(segs2, **kwargs))
ax.autoscale()
def sig(i):
# return 1
return -1 if (i < 0) else 1
def f1(x: np.array, y: np.array):
u = []
v = []
for i in range(0, len(x)):
ui = []
vi = []
for j in range(0, len(x[i])):
# 这样取到的是网格中每个点的坐标,逐行取,从左到右。
xx = x[i][j]
yy = y[i][j]
print("x=", xx, "y=", yy)
expn = - 0.2 * (xx ** 2 + yy ** 2)
# 坐标越远离中心,delta越小。当x=+-1或者y=+-1,
delta = np.exp(expn)
print(expn)
uu = xx if xx == 0 else xx + sig(xx) * delta
vv = yy if yy == 0 else yy + sig(yy) * delta
print("uu=", uu, "vv=", vv)
ui.append(uu)
vi.append(vv)
# vi.append(yy)
# ui.append(xx)
u.append(ui)
v.append(vi)
return u, v
fig, ax = plt.subplots()
ax.set_aspect('equal')
grid_x, grid_y = np.meshgrid(np.linspace(-EDGE, EDGE, STEP), np.linspace(-EDGE, EDGE, STEP))
plot_grid(grid_x, grid_y, ax=ax, color="lightgrey")
distx, disty = f1(grid_x, grid_y)
plot_grid(distx, disty, ax=ax, color="C0")
plt.show()
| 24.506667
| 103
| 0.581066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 968
| 0.414384
|
b86fc82da8dc94ff37ad24a384c231a1a48f881c
| 7,780
|
py
|
Python
|
IR_Extraction.py
|
Kazuhito00/yolo2_onnx
|
95c5e2063071d610ec8e98963f3639e0b25efb59
|
[
"MIT"
] | 15
|
2018-07-02T19:11:09.000Z
|
2022-03-31T07:12:53.000Z
|
IR_Extraction.py
|
Kazuhito00/yolo2_onnx
|
95c5e2063071d610ec8e98963f3639e0b25efb59
|
[
"MIT"
] | null | null | null |
IR_Extraction.py
|
Kazuhito00/yolo2_onnx
|
95c5e2063071d610ec8e98963f3639e0b25efb59
|
[
"MIT"
] | 9
|
2018-05-08T01:58:53.000Z
|
2022-01-28T06:36:02.000Z
|
from Onnx import make_dir, OnnxImportExport
import subprocess
import pickle
import os
import numpy as np
import time
def generate_svg(modelName, marked_nodes=[]):
"""
generate SVG figure from existed ONNX file
"""
if marked_nodes ==[]:
addfilenamestr = ""
add_command_str = ""
else:
addfilenamestr = "_marked"
marked_str = '_'.join([str(e) for e in marked_nodes])
add_command_str = " --marked 1 --marked_list {}".format(marked_str)
onnxfilepath = "onnx/{}.onnx".format(modelName)
dotfilepath = "dot/{}{}.dot".format(modelName,addfilenamestr)
svgfilepath = "svg/{}{}.svg".format(modelName,addfilenamestr)
# check if onnx file exist
if not os.path.isfile(os.getcwd()+"/"+onnxfilepath):
print('generate_svg Error! Onnx file not exist!')
return
else:
make_dir("dot")
make_dir("svg")
subprocess.call("python net_drawer.py --input {} --output {} --embed_docstring {}".format(onnxfilepath,dotfilepath,add_command_str), shell=True) # onnx -> dot
subprocess.call("dot -Tsvg {} -o {}".format(dotfilepath,svgfilepath), shell=True)# dot -> svg
print('generate_svg ..end')
return svgfilepath
def get_init_shape_dict(rep):
"""
Extract Shape of Initial Input Object
e.g.
if
%2[FLOAT, 64x3x3x3]
%3[FLOAT, 64]
then
return {u'2':(64,3,3,3),u'3':(64,)}
"""
d = {}
if hasattr(rep, 'input_dict'):
for key in rep.input_dict:
tensor = rep.input_dict[key]
shape = np.array(tensor.shape, dtype=int)
d.update({key:shape})
return d
elif hasattr(rep, 'predict_net'):
for k in rep.predict_net.tensor_dict.keys():
tensor = rep.predict_net.tensor_dict[k]
shape = np.array(tensor.shape.as_list(),dtype=float).astype(int)
d.update({k: shape})
return d
else:
print ("rep Error! check your onnx version, it might not support IR_Extraction operation!")
return d
def get_output_shape_of_node(node, shape_dict, backend, device = "CPU"):# or "CUDA:0"
"""
generate output_shape of a NODE
"""
out_idx = node.output[0]
input_list = node.input # e.g. ['1', '2']
inps = []
for inp_idx in input_list:
inp_shape = shape_dict[inp_idx]
rand_inp = np.random.random(size=inp_shape).astype('float16')
inps.append(rand_inp)
try:
out = backend.run_node(node=node, inputs=inps, device=device)
out_shape = out[0].shape
except:
out_shape = shape_dict[input_list[0]]
print("Op: [{}] run_node error! return inp_shape as out_shape".format(node.op_type))
return out_shape, out_idx
def get_overall_shape_dict(model, init_shape_dict, backend):
"""
generate output_shape of a MODEL GRAPH
"""
shape_dict = init_shape_dict.copy()
for i, node in enumerate(model.graph.node):
st=time.time()
out_shape, out_idx = get_output_shape_of_node(node, shape_dict, backend)
shape_dict.update({out_idx:out_shape})
print("out_shape: {} for Obj[{}], node [{}][{}]...{:.2f} sec".format(out_shape, out_idx, i, node.op_type,time.time()-st))
return shape_dict
def get_graph_order(model):
"""
Find Edges (each link) in MODEL GRAPH
"""
Node2nextEntity = {}
Entity2nextNode = {}
for Node_idx, node in enumerate(model.graph.node):
# node input
for Entity_idx in node.input:
if not Entity_idx in Entity2nextNode.keys():
Entity2nextNode.update({Entity_idx:Node_idx})
# node output
for Entity_idx in node.output:
if not Node_idx in Node2nextEntity.keys():
Node2nextEntity.update({Node_idx:Entity_idx})
return Node2nextEntity, Entity2nextNode
def get_kernel_shape_dict(model, overall_shape_dict):
"""
Get Input/Output/Kernel Shape for Conv in MODEL GRAPH
"""
conv_d = {}
for i, node in enumerate(model.graph.node):
if node.op_type == 'Conv':
for attr in node.attribute:
if attr.name == "kernel_shape":
kernel_shape = np.array(attr.ints, dtype=int)
break
inp_idx = node.input[0]
out_idx = node.output[0]
inp_shape = overall_shape_dict[inp_idx]
out_shape = overall_shape_dict[out_idx]
conv_d.update({i:(inp_idx, out_idx, inp_shape, out_shape, kernel_shape)})
print("for node [{}][{}]:\ninp_shape: {} from obj[{}], \nout_shape: {} from obj[{}], \nkernel_shape: {} \n"
.format(i, node.op_type, inp_shape, inp_idx, out_shape, out_idx, kernel_shape ))
return conv_d
def calculate_num_param_n_num_flops(conv_d):
"""
calculate num_param and num_flops from conv_d
"""
n_param = 0
n_flops = 0
for k in conv_d:
#i:(inp_idx, out_idx, inp_shape, out_shape, kernel_shape)
inp_shape, out_shape, kernel_shape = conv_d[k][2],conv_d[k][3],conv_d[k][4]
h,w,c,n,H,W = kernel_shape[1], kernel_shape[1], inp_shape[1], out_shape[1], out_shape[2], out_shape[3]
n_param += n*(h*w*c+1)
n_flops += H*W*n*(h*w*c+1)
return n_param, n_flops
def find_sequencial_nodes(model, Node2nextEntity, Entity2nextNode, search_target=['Conv', 'Add', 'Relu', 'MaxPool'], if_print = False):
"""
Search Where is Subgroup
"""
found_nodes = []
for i, node in enumerate(model.graph.node):
if if_print: print("\nnode[{}] ...".format(i))
n_idx = i #init
is_fit = True
for tar in search_target:
try:
assert model.graph.node[n_idx].op_type == tar #check this node
if if_print: print("node[{}] fit op_type [{}]".format(n_idx, tar))
e_idx = Node2nextEntity[n_idx] #find next Entity
n_idx = Entity2nextNode[e_idx] #find next Node
#if if_print: print(e_idx,n_idx)
except:
is_fit = False
if if_print: print("node[{}] doesn't fit op_type [{}]".format(n_idx, tar))
break
if is_fit:
if if_print: print("node[{}] ...fit!".format(i))
found_nodes.append(i)
else:
if if_print: print("node[{}] ...NOT fit!".format(i))
if if_print: print("\nNode{} fit the matching pattern".format(found_nodes))
return found_nodes
def get_permutations(a):
"""
get all permutations of list a
"""
import itertools
p = []
for r in range(len(a)+1):
c = list(itertools.combinations(a,r))
for cc in c:
p += list(itertools.permutations(cc))
return p
def get_list_of_sequencial_nodes(search_head = ['Conv'], followings = ['Add', 'Relu', 'MaxPool']):
"""
if
search_head = ['Conv']
followings = ['Add', 'Relu', 'MaxPool']
return
[['Conv'],
['Conv', 'Add'],
['Conv', 'Relu'],
['Conv', 'MaxPool'],
['Conv', 'Add', 'Relu'],
['Conv', 'Relu', 'Add'],
['Conv', 'Add', 'MaxPool'],
['Conv', 'MaxPool', 'Add'],
['Conv', 'Relu', 'MaxPool'],
['Conv', 'MaxPool', 'Relu'],
['Conv', 'Add', 'Relu', 'MaxPool'],
['Conv', 'Add', 'MaxPool', 'Relu'],
['Conv', 'Relu', 'Add', 'MaxPool'],
['Conv', 'Relu', 'MaxPool', 'Add'],
['Conv', 'MaxPool', 'Add', 'Relu'],
['Conv', 'MaxPool', 'Relu', 'Add']]
"""
search_targets = [ search_head+list(foll) for foll in get_permutations(followings)]
return search_targets
| 35.525114
| 166
| 0.579434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,350
| 0.302057
|
b87022120f02d56a10e8caeb021ec987a4c00e77
| 9,961
|
py
|
Python
|
app.py
|
rshane7/Sqlalchemy-Challenge
|
b0cd11388727e1f43453b0e7b0019e304d45eb39
|
[
"ADSL"
] | null | null | null |
app.py
|
rshane7/Sqlalchemy-Challenge
|
b0cd11388727e1f43453b0e7b0019e304d45eb39
|
[
"ADSL"
] | null | null | null |
app.py
|
rshane7/Sqlalchemy-Challenge
|
b0cd11388727e1f43453b0e7b0019e304d45eb39
|
[
"ADSL"
] | null | null | null |
# Python script uses flask and SQL alchemy to create API requests for weather data from Hawaii.
# Import dependencies.
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API SQLite Connection & Landing Page
# The Flask Application does all of the following:
# ✓ Correctly generates the engine to the correct sqlite file
# ✓ Uses automap_base() and reflects the database schema
# ✓ Correctly saves references to the tables in the sqlite file (measurement and station)
# ✓ Correctly creates and binds the session between the python app and database
#----------------------------------------------------------------------------------------------------------------------
# Database Setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# Reflect an existing database into a new model
Base = automap_base()
# Reflect the tables
Base.prepare(engine, reflect=True)
# Save references to tables
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Flask Setup
app = Flask(__name__)
# Flask Routes
@app.route("/")
def home():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/mostactivetobs<br/>"
f"/api/v1.0/start/<start><br/>"
f"/api/v1.0/start_date/end_date/<start_date>/<end_date>"
)
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API Static Routes
# The static routes do all of the following:
# Precipitation route
# ✓ Returns the jsonified precipitation data for the last year in the database.
# ✓ Returns json with the date as the key and the value as the precipitation Stations route.
# ✓ Returns jsonified data of all of the stations in the database Tobs route.
# ✓ Returns jsonified data for the most active station (USC00519281) for the last year of data.
#----------------------------------------------------------------------------------------------------------------------
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Return JSON where (Key: date / Value: precipitation)"""
print("Precipitation API request received.")
# Create our session (link) from Python to the DB
session = Session(engine)
# Query the most recent date in dataset.
# Convert to datetime object for calculation below.
max_date = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).limit(1).all()
max_date = max_date[0][0]
max_date = dt.datetime.strptime(max_date, "%Y-%m-%d")
# Calculate the date 1 year ago from the last data point in the database
year_ago = max_date - dt.timedelta(days=366)
# Perform a query to retrieve the last 12 months of precipitation data.
precipitations = session.query(func.strftime("%Y-%m-%d", Measurement.date), Measurement.prcp).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= year_ago).all()
# Iterate through precipitations to append all key/values to precipitation dictionary.
# Append precipitation dictionary to list, then return jsonify.
all_precipitations = []
for date, prcp in precipitations:
precipitation_dict = {}
precipitation_dict["date"] = date
precipitation_dict["prcp"] = prcp
all_precipitations.append(precipitation_dict)
return jsonify(all_precipitations)
@app.route("/api/v1.0/stations")
def stations():
"""Return JSON API for all stations in dataset."""
print("Stations API request received.")
# Create our session (link) from Python to the DB
session = Session(engine)
# Query all stations in the dataset.
stations = session.query(Station.id, Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()
# Iterate through stations to append all key/values to station dictionary.
# Append station dictionary to list, then return jsonify.
all_stations = []
for id, station, name, latitude, longitude, elevation in stations:
station_dict = {}
station_dict["id"] = id
station_dict["station"] = station
station_dict["name"] = name
station_dict["latitude"] = latitude
station_dict["longitude"] = longitude
station_dict["elevation"] = elevation
all_stations.append(station_dict)
return jsonify(all_stations)
# most active station last year of data
@app.route("/api/v1.0/mostactivetobs")
def last_year_tobs_most_active():
print("Most Active Station API request received.")
# Create our session (link) from Python to the DB
session = Session(engine)
# last date in the dataset and year from last date calculations
last_date = session.query(Measurement.date,Measurement.prcp).order_by(Measurement.date.desc()).first()[0]
last_year = str(dt.datetime.strptime(last_date,"%Y-%m-%d")-dt.timedelta(days=365))
last_year_tobs_most_active = session.query(Measurement.station,Measurement.date,Measurement.tobs).\
filter(Measurement.date >=last_year, Measurement.date <=last_date, Measurement.station == 'USC00519281').\
order_by(Measurement.date).all()
# Iterate through temperatures to append all key/values to temperature dictionary.
# Append temperature dictionary to list, then return jsonify.
all_mostactivetobs = []
for station, date, tobs in last_year_tobs_most_active:
last_year_tobs_most_active_dict = {}
last_year_tobs_most_active_dict["station"] = station
last_year_tobs_most_active_dict["date"] = date
last_year_tobs_most_active_dict["tobs"] = tobs
all_mostactivetobs.append(last_year_tobs_most_active_dict)
return jsonify(all_mostactivetobs)
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API Dynamic Route
# The dynamic route does all of the following:
# Start route
# ✓ Route accepts the start date as a parameter from the URL
# Start/end route
# ✓ Route accepts the start and end dates as parameters from the URL
# ✓ Returns the min, max, and average temperatures calculated from the given start date to the given end date ✓ Returns the min, max, and average temperatures calculated from the given start date to the end of the dataset
#----------------------------------------------------------------------------------------------------------------------
@app.route("/api/v1.0/start/<start>/")
def calc_start_temps(start):
"""Return a JSON API of the minimum temperature, the average temperature, and the max temperature...
for all dates greater than and equal to the start date."""
print("Calculate Start Temps. API request received.")
# Create our session (link) from Python to the DB
session = Session(engine)
# Query will accept start date in the format '%Y-%m-%d' and return the minimum, average, and maximum temperatures
# for all dates from that date.
start_temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
# Iterate through start temps to append all key/values to Start (Date) Calc Temp dictionary.
# Append Start (Date) Calc Temp dictionary to list, then return jsonify.
all_start_calc_temps = []
for result in start_temps:
start_calc_temp_dict = {}
start_calc_temp_dict["min_temp."] = result[0]
start_calc_temp_dict["avg_temp."] = result[1]
start_calc_temp_dict["max_temp."] = result[2]
all_start_calc_temps.append(start_calc_temp_dict)
return jsonify(all_start_calc_temps)
@app.route("/api/v1.0/start_date/end_date/<start_date>/<end_date>")
def calc_start_end_temps(start_date, end_date):
# Return a JSON API of the minimum temperature, the average temperature, and the max temperature
# between the start and end date inclusive.
print("Calculate Start/End Temps. API request received.")
# Create our session (link) from Python to the DB
session = Session(engine)
# Query will accept start and end dates in the format '%Y-%m-%d' and return the minimum, average, and
# maximum temperatures for all dates in that range.
start_end_temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# Iterate through start temps to append all key/values to Start (Date) Calc Temp dictionary.
# Append Start (Date) Calc Temp dictionary to list, then return jsonify.
all_calc_start_end_temps = []
for result in start_end_temps:
calc_start_end_temp_dict = {}
calc_start_end_temp_dict["min_temp."] = result[0]
calc_start_end_temp_dict["avg_temp."] = result[1]
calc_start_end_temp_dict["max_temp."] = result[2]
all_calc_start_end_temps.append(calc_start_end_temp_dict)
return jsonify(all_calc_start_end_temps)
if __name__ == '__main__':
app.run(debug=True)
session.close()
#----------------------------------------------------------------------------------------------------------------------
# THE END
#----------------------------------------------------------------------------------------------------------------------
| 45.484018
| 229
| 0.635679
| 0
| 0
| 0
| 0
| 6,605
| 0.661227
| 0
| 0
| 5,699
| 0.570528
|
b870872caf1a9e4c3f638cbe128e60ddb9f7db8d
| 5,194
|
py
|
Python
|
optimalTAD/__main__.py
|
cosmoskaluga/optimalTAD
|
eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2
|
[
"MIT"
] | null | null | null |
optimalTAD/__main__.py
|
cosmoskaluga/optimalTAD
|
eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2
|
[
"MIT"
] | null | null | null |
optimalTAD/__main__.py
|
cosmoskaluga/optimalTAD
|
eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import sys
import time
import glob
import os
from . import logger
from . import config
from . visualization import plot
from . optimization import run
from . optimization import utils
class optimalTAD:
def __init__(self):
self.log = logger.initialize_logger()
self.cfg = config.get_configuration()
parser = argparse.ArgumentParser(description = 'optimalTAD: Topologically Associating Domain optimal set prediction', usage = ''' optimalTAD <command> [<args>]
The basic optimalTAD commands are:
run Run optimization process
visualize Visualize results ''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args([self.cfg.get('basic', 'mode')])
arg = sys.argv[1:2]
if arg:
if arg[0] in ['run', 'visualize']:
args.command = arg[0]
if args.command not in ['run', 'visualize']:
self.log.info('Unrecognized command!')
parser.print_help()
sys.exit(1)
chippath = self.cfg.get('run','chipseq')
self.chippath = glob.glob(os.path.expanduser(chippath))
getattr(self, args.command)()
def run(self):
start_time = time.time()
hicpath = self.cfg.get('run','hic')
hicpath = glob.glob(os.path.expanduser(hicpath))
parser = argparse.ArgumentParser(description='Run optimization process')
parser.add_argument('--hic', type = str, nargs='+', default = sorted(hicpath), help = 'Path to iteratively corrected Hi-C data')
parser.add_argument('--chipseq', type = str, nargs = '+', default = sorted(self.chippath), help = 'Path ChIP-seq data')
parser.add_argument('--np', type = int, default = int(self.cfg['run']['np']), help = 'Number of processors')
parser.add_argument('--resolution', type = int, default = int(self.cfg['run']['resolution']), help = 'Resolution')
parser.add_argument('--stepsize', type = float, default = float(self.cfg['run']['stepsize']), help = 'Step size to increment gamma parameter')
parser.add_argument('--gamma_max', type = float, default = float(self.cfg['run']['gamma_max']), help = 'Max gamma parameter')
parser.add_argument('--hic_format', type = str, default = self.cfg['run']['hic_format'], help = 'Hi-C matrices input format for armatus')
parser.add_argument('--empty_row_imputation', action = 'store_true', help = 'Missing rows (and columns) imputation')
parser.add_argument('--truncation', action = 'store_true', help = 'Value truncation of input Hi-C-matrix')
parser.add_argument('--log2_hic', action = 'store_true', help = 'log2 transformation of input Hi-C matrix')
parser.add_argument('--log2_chip', action = 'store_true', help = 'log2 transformation of input ChIP-Seq track')
parser.add_argument('--zscore_chip', action = 'store_true', help = 'Z-score transformation of ChIP-Seq track')
parser.set_defaults(empty_row_imputation = eval(self.cfg['run']['empty_row_imputation']))
parser.set_defaults(truncation = eval(self.cfg['run']['truncation']))
parser.set_defaults(log2_hic = eval(self.cfg['run']['log2_hic']))
parser.set_defaults(log2_chip = eval(self.cfg['run']['log2_chip']))
parser.set_defaults(zscore_chip = eval(self.cfg['run']['zscore_chip']))
args = parser.parse_args(sys.argv[2:])
run.main(args, self.cfg, self.log)
cpu_time = round(time.time()-start_time, 2)
self.log.info('Execution time: {} sec'.format(cpu_time))
def visualize(self):
start_time = time.time()
chipname = utils.get_chipname(self.chippath, self.cfg['visualization']['samplename'])
parser = argparse.ArgumentParser(description='Visualize results')
parser.add_argument('--samplename', type = str, default = self.cfg['visualization']['samplename'], help = 'Samplename of Hi-C data')
parser.add_argument('--region', type = str, default = self.cfg['visualization']['region'], help = 'Chromosomal coordinates')
parser.add_argument('--resolution', type = int, default = int(self.cfg['run']['resolution']), help = 'Resolution')
parser.add_argument('--chipseq', type = str, default = chipname, help = 'Path to ChIP-seq data')
parser.add_argument('--log2_chip', action = 'store_true', help = 'log2 transformation of an input ChIP-Seq track')
parser.add_argument('--zscore_chip', action = 'store_true', help = 'Z-score transformation of an input ChIP-Seq track')
parser.add_argument('--rnaseq', type = str, default = str(self.cfg['visualization']['rnaseq']), help = 'RNA-seq data')
parser.set_defaults(log2_chip = eval(self.cfg['run']['log2_chip']))
parser.set_defaults(zscore_chip = eval(self.cfg['run']['zscore_chip']))
args = parser.parse_args(sys.argv[2:])
plot.main(args, self.cfg['visualization'], self.log)
cpu_time = round(time.time()-start_time, 2)
self.log.info('Execution time: {} sec'.format(cpu_time))
if __name__ == '__main__':
optimalTAD()
| 55.849462
| 167
| 0.649788
| 4,926
| 0.948402
| 0
| 0
| 0
| 0
| 0
| 0
| 1,716
| 0.330381
|
b870e2ce26d78dfa9746e5e88adb9ed1463fb9fc
| 944
|
py
|
Python
|
communications/migrations/0002_auto_20190902_1759.py
|
shriekdj/django-social-network
|
3654051e334996ee1b0b60f83c4f809a162ddf4a
|
[
"MIT"
] | 368
|
2019-10-10T18:02:09.000Z
|
2022-03-31T14:31:39.000Z
|
communications/migrations/0002_auto_20190902_1759.py
|
shriekdj/django-social-network
|
3654051e334996ee1b0b60f83c4f809a162ddf4a
|
[
"MIT"
] | 19
|
2020-05-09T19:10:29.000Z
|
2022-03-04T18:22:51.000Z
|
communications/migrations/0002_auto_20190902_1759.py
|
shriekdj/django-social-network
|
3654051e334996ee1b0b60f83c4f809a162ddf4a
|
[
"MIT"
] | 140
|
2019-10-10T18:01:59.000Z
|
2022-03-14T09:37:39.000Z
|
# Generated by Django 2.2.4 on 2019-09-02 11:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('communications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='message',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='author_messages', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='message',
name='friend',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='friend_messages', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 32.551724
| 153
| 0.665254
| 785
| 0.831568
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.153602
|
b871aaee0feb9ef1cdc6b28c76ed73a977fed9b3
| 1,126
|
py
|
Python
|
examples/sht2x.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
examples/sht2x.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
examples/sht2x.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sensirion SHT2x humidity sensor.
Drives SHT20, SHT21 and SHT25 humidity and temperature sensors.
Sensirion `SHT2x Datasheets <https://www.sensirion.com/en/environmental-sensors/humidity-sensors/humidity-temperature-sensor-sht2x-digital-i2c-accurate/>`
"""
from i2cmod import SHT2X
def example():
with SHT2X() as sensor:
print("Identification: 0x{:016X}".format(sensor.serial_number))
for adc_res, reg_value in (
('12/14', 0x02),
(' 8/10', 0x03),
('10/13', 0x82),
('11/11', 0x83)):
sensor.user_register = reg_value
print("-" * 79)
print("Resolution: {}-bit (rh/T)".format(adc_res))
print("Temperature: {:.2f} °C".format(sensor.centigrade))
print("Temperature: {:.2f} °F".format(sensor.fahrenheit))
print("Relative Humidity: {:.2f} % ".format(sensor.humidity))
print("User Register: 0x{:02X}".format(sensor.user_register))
if __name__ == '__main__':
example()
| 34.121212
| 154
| 0.579041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.481383
|
b872257816f92142d8b69ab7304685ffe49c0d35
| 52
|
py
|
Python
|
__init__.py
|
HarisNaveed17/aws-boxdetector
|
e71daebbebe9dc847bdad70d2ea2fe859fede587
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
HarisNaveed17/aws-boxdetector
|
e71daebbebe9dc847bdad70d2ea2fe859fede587
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
HarisNaveed17/aws-boxdetector
|
e71daebbebe9dc847bdad70d2ea2fe859fede587
|
[
"Apache-2.0"
] | null | null | null |
from pipeline import *
box_detection = BoxDetector()
| 26
| 29
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8746c0bb1705159c5c6690183e9699670c24d04
| 217
|
bzl
|
Python
|
bazel_versions.bzl
|
pennig/rules_xcodeproj
|
109ab85a82954ea38f0529eafc291f5ce6f63483
|
[
"MIT"
] | 1
|
2022-03-31T09:13:24.000Z
|
2022-03-31T09:13:24.000Z
|
bazel_versions.bzl
|
pennig/rules_xcodeproj
|
109ab85a82954ea38f0529eafc291f5ce6f63483
|
[
"MIT"
] | null | null | null |
bazel_versions.bzl
|
pennig/rules_xcodeproj
|
109ab85a82954ea38f0529eafc291f5ce6f63483
|
[
"MIT"
] | null | null | null |
"""Specifies the supported Bazel versions."""
CURRENT_BAZEL_VERSION = "5.0.0"
OTHER_BAZEL_VERSIONS = [
"6.0.0-pre.20220223.1",
]
SUPPORTED_BAZEL_VERSIONS = [
CURRENT_BAZEL_VERSION,
] + OTHER_BAZEL_VERSIONS
| 18.083333
| 45
| 0.728111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.341014
|
b874c67d7c0255eb46088b631d745bcaf2f71c70
| 1,372
|
py
|
Python
|
Pymug/server/game/parse.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | 2
|
2019-08-30T08:26:44.000Z
|
2021-04-09T14:22:09.000Z
|
Pymug/server/game/parse.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | null | null | null |
Pymug/server/game/parse.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | null | null | null |
def _compile(words):
if not len(words):
return None, ''
num = None
if words[0].isdigit():
num = int(words[0])
words = words[1:]
return num, ' '.join(words)
def _split_out_colons(terms):
newterms = []
for term in terms:
if ':' in term:
subterms = term.split(':')
for sub in subterms:
newterms.append(sub)
newterms.append(':')
newterms = newterms[:-1]
else:
newterms.append(term)
return [term for term in newterms if len(term)]
# parse user command text
def user_command(text):
terms = text.strip().split()
terms = _split_out_colons(terms)
cmd = {}
if len(terms) == 0:
return cmd
cmd['verb'] = terms[0]
mode = 'directobject'
flags = ['with', 'by', 'from', 'for', 'to', ':']
words = []
for term in terms[1:]:
if mode == ':':
words.append(term)
elif term in flags:
num, cmd[mode] = _compile(words)
if not len(cmd[mode]):
cmd.pop(mode)
if num:
cmd[mode+'_num'] = num
words = []
mode = term
else:
words.append(term)
if len(words):
num, cmd[mode] = _compile(words)
if num:
cmd[mode+'_num'] = num
return cmd
| 26.384615
| 52
| 0.487609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.075802
|
b876fc588cb708294748bda2b97c2a9bb2b7cc83
| 539
|
py
|
Python
|
files/OOP/Encapsulation/Encapsulation 3.py
|
grzegorzpikus/grzegorzpikus.github.io
|
652233e0b98f48a3396583bab2559f5981bac8ad
|
[
"CC-BY-3.0"
] | null | null | null |
files/OOP/Encapsulation/Encapsulation 3.py
|
grzegorzpikus/grzegorzpikus.github.io
|
652233e0b98f48a3396583bab2559f5981bac8ad
|
[
"CC-BY-3.0"
] | null | null | null |
files/OOP/Encapsulation/Encapsulation 3.py
|
grzegorzpikus/grzegorzpikus.github.io
|
652233e0b98f48a3396583bab2559f5981bac8ad
|
[
"CC-BY-3.0"
] | null | null | null |
class BankAccount:
def __init__(self, checking = None, savings = None):
self._checking = checking
self._savings = savings
def get_checking(self):
return self._checking
def set_checking(self, new_checking):
self._checking = new_checking
def get_savings(self):
return self._savings
def set_savings(self, new_savings):
self._savings = new_savings
my_account = BankAccount()
my_account.set_checking(523.48)
print(my_account.get_checking())
my_account.set_savings(386.15)
print(my_account.get_savings())
| 22.458333
| 54
| 0.747681
| 382
| 0.70872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b877894244ea866fac268797b7d04d857a48c881
| 800
|
py
|
Python
|
utils/folder_to_list.py
|
abhatta1234/face_analysis_pytorch
|
2abe930c0ca02a1fd819d4710fd9bff392f32f58
|
[
"MIT"
] | 27
|
2020-05-19T16:51:42.000Z
|
2022-02-28T05:00:16.000Z
|
utils/folder_to_list.py
|
abhatta1234/face_analysis_pytorch
|
2abe930c0ca02a1fd819d4710fd9bff392f32f58
|
[
"MIT"
] | 3
|
2020-04-09T04:46:24.000Z
|
2020-10-21T18:57:05.000Z
|
utils/folder_to_list.py
|
abhatta1234/face_analysis_pytorch
|
2abe930c0ca02a1fd819d4710fd9bff392f32f58
|
[
"MIT"
] | 10
|
2020-05-11T19:50:30.000Z
|
2022-03-16T11:49:52.000Z
|
import argparse
from os import listdir, path
import numpy as np
def convert(main_folder, output):
ret = []
for label, class_folder in listdir(main_folder):
class_folder_path = path.join(main_folder, class_folder)
for img_name in listdir(class_folder_path):
image_path = path.join(class_folder, img_name)
ret.append([image_path, str(label)])
np.savetxt(output, ret, delimiter=" ", fmt="%s %i")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Folder with classes subfolders to a file to train."
)
parser.add_argument("--folder", "-f", help="Folder to convert.")
parser.add_argument("--output", "-o", help="Output file.")
args = parser.parse_args()
convert(args.folder, args.output)
| 26.666667
| 72
| 0.665
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.1675
|
b8783b5b039e343b576ed8c99dd5d5e0e166571d
| 509
|
py
|
Python
|
Leetcode/Python Solutions/Strings/ReverseString.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | 1
|
2020-01-06T02:21:56.000Z
|
2020-01-06T02:21:56.000Z
|
Leetcode/Python Solutions/Strings/ReverseString.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | null | null | null |
Leetcode/Python Solutions/Strings/ReverseString.py
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
bc656fd655617407856e0ce45b68585fa81c5035
|
[
"MIT"
] | 3
|
2021-02-22T17:41:01.000Z
|
2022-01-13T05:03:19.000Z
|
"""
LeetCode Problem: 344. Reverse String
Link: https://leetcode.com/problems/reverse-string/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(n)
Space Complexity: O(1)
"""
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
low = 0
high = len(s) - 1
while low < high:
s[low], s[high] = s[high], s[low]
low += 1
high -= 1
| 23.136364
| 58
| 0.554028
| 315
| 0.618861
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.522593
|
b878732e91bebe5ae9b4cd691ecca80c673cb34c
| 7,242
|
py
|
Python
|
packages/verify_layer.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
packages/verify_layer.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
packages/verify_layer.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from common import FUCHSIA_ROOT, get_package_imports, get_product_imports
import json
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Standard names for root packages in a layer.
ROOT_CANONICAL_PACKAGES = [
'buildbot',
'default',
'kitchen_sink',
]
REQUIRED_PRODUCTS = [
'default'
]
# Standard names for packages in a layer.
CANONICAL_PACKAGES = [
'all',
]
# Directories which do not require aggregation.
NO_AGGREGATION_DIRECTORIES = [
'config',
'disabled',
'products',
]
# Non-package files allowed in package directories.
NON_PACKAGE_FILES = [
'README.md',
]
def check_json(packages):
'''Verifies that all files in the list are JSON files.'''
all_json = True
for package in packages:
with open(package, 'r') as file:
try:
json.load(file)
except ValueError:
all_json = False
print('Non-JSON file: %s' % package)
return all_json
def check_schema(packages, validator, schema):
'''Verifies that all files adhere to the schema.'''
all_valid = True
for package in packages:
if subprocess.call([validator, schema, package]) != 0:
all_valid = False
return all_valid
def check_deps_exist(dep_map):
'''Verifies that all dependencies exist.'''
all_exist = True
for (package, deps) in dep_map.iteritems():
for dep in deps:
if not os.path.isfile(dep):
all_exist = False
print('Dependency of %s does not exist: %s' % (package, dep))
return all_exist
def check_all(directory, dep_map, layer, is_root=True):
'''Verifies that directories contain an "all" package and that this packages
lists all the files in the directory.
'''
for dirpath, dirnames, filenames in os.walk(directory):
dirnames = [d for d in dirnames if d not in NO_AGGREGATION_DIRECTORIES]
is_clean = True
for dir in dirnames:
subdir = os.path.join(dirpath, dir)
if not check_all(subdir, dep_map, layer, is_root=False):
is_clean = False
if not is_clean:
return False
all_package = os.path.join(dirpath, 'all')
if not os.path.isfile(all_package):
print('Directory does not contain an "all" package: %s' % dirpath)
return False
known_deps = dep_map[all_package]
has_all_files = True
def verify(package):
if package not in known_deps:
print('Missing dependency in %s: %s' % (all_package, package))
return False
return True
for file in filenames:
if is_root and (file in ROOT_CANONICAL_PACKAGES or file == layer):
continue
if file in CANONICAL_PACKAGES or file in NON_PACKAGE_FILES:
continue
package = os.path.join(dirpath, file)
if not verify(package):
has_all_files = False
for dir in dirnames:
package = os.path.join(dirpath, dir, 'all')
if not verify(package):
has_all_files = False
return has_all_files
def check_no_fuchsia_packages_in_all(packages):
allowed_keys = {'imports'}
all_clear = True
for package in [p for p in packages if os.path.basename(p) == 'all']:
with open(package, 'r') as file:
data = json.load(file)
keys = set(data.keys())
if not keys.issubset(allowed_keys):
all_clear = False
print('"all" should only contain imports: %s' % package)
return all_clear
def check_root(base, layer):
'''Verifies that all canonical packages are present at the root.'''
all_there = True
for file in ROOT_CANONICAL_PACKAGES + [layer]:
if not os.path.isfile(os.path.join(base, file)):
all_there = False
print('Missing root package: %s' % file)
return all_there
def check_product_root(base, layer):
'''Verified that the default product is present.'''
missing = []
for product in REQUIRED_PRODUCTS:
path = os.path.join(base, product)
if not os.path.isfile(path):
missing.append(path)
if not missing:
return True
print('Missing products: %s' % missing)
return False
def main():
parser = argparse.ArgumentParser(
description=('Checks that packages in a given layer are properly '
'formatted and organized'))
layer_group = parser.add_mutually_exclusive_group(required=True)
layer_group.add_argument('--layer',
help='Name of the layer to analyze',
choices=['garnet', 'peridot', 'topaz'])
layer_group.add_argument('--vendor-layer',
help='Name of the vendor layer to analyze')
parser.add_argument('--json-validator',
help='Path to the JSON validation tool',
required=True)
args = parser.parse_args()
os.chdir(FUCHSIA_ROOT)
if args.layer:
layer = args.layer
packages_base = os.path.join(layer, 'packages')
products_base = os.path.join(layer, 'products')
else:
layer = args.vendor_layer
packages_base = os.path.join('vendor', layer, 'packages')
products_base = os.path.join('vendor', layer, 'products')
# List all packages files.
packages = []
for dirpath, dirnames, filenames in os.walk(packages_base):
packages.extend([os.path.join(dirpath, f) for f in filenames
if f not in NON_PACKAGE_FILES])
products = []
for dirpath, dirnames, filenames in os.walk(products_base):
products.extend([os.path.join(dirpath, f) for f in filenames
if f not in NON_PACKAGE_FILES])
if not check_json(packages):
return False
if not check_json(products):
return False
schema = os.path.join(SCRIPT_DIR, 'package_schema.json')
if not check_schema(packages, args.json_validator, schema):
return False
schema = os.path.join(SCRIPT_DIR, 'product_schema.json')
if not check_schema(products, args.json_validator, schema):
return False
deps = dict([(p, get_package_imports(p)) for p in packages])
if not check_deps_exist(deps):
return False
if not check_all(packages_base, deps, layer):
return False
if not check_no_fuchsia_packages_in_all(packages):
return False
if not check_root(packages_base, layer):
return False
deps = dict([(p, get_product_imports(p)) for p in products])
if not check_deps_exist(deps):
return False
if not check_product_root(products_base, layer):
return False
return True
if __name__ == '__main__':
return_code = 0
if not main():
print('Errors!')
return_code = 1
sys.exit(return_code)
| 31.081545
| 80
| 0.619994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,492
| 0.20602
|
b8796958be709a16a0f0fcd864b552aa54f7203a
| 8,064
|
py
|
Python
|
server/main.py
|
MrCheka/langidnn
|
77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a
|
[
"MIT"
] | null | null | null |
server/main.py
|
MrCheka/langidnn
|
77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a
|
[
"MIT"
] | 7
|
2020-07-17T01:22:21.000Z
|
2022-02-26T10:48:01.000Z
|
server/main.py
|
MrCheka/langidnn
|
77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import argparse
import logging
from src.helpers.NNHelper import NNHelper
from src.controller.Controller import Controller
from src.params.Parameters import Parameters
def createParser():
parser = argparse.ArgumentParser(prog='langid_server',
description='''Это серверная часть
идентификатора языка с использованием нейронных сетей''',
epilog='(c) Дипломный проект Байко Станислава Леоновича, ИИ-13, БрГТУ',
add_help=False)
parent_group = parser.add_argument_group(title='Параметры')
parent_group.add_argument('--help', '-h', action='help', help='Справка')
subparsers = parser.add_subparsers(dest='mode',
title='Возможные комманды',
description='Команды, которые должны быть в качестве первого параметра %(prog)s.')
server_parser = subparsers.add_parser('server',
add_help=False,
help='Запуск программы в режиме сервера',
description='''Запуск в режиме сервера.
В этом режиме программа предоставляет API для идентификации языка.''')
server_group = server_parser.add_argument_group(title='Параметры')
server_group.add_argument('--model', '-m', required=True,
help='Модель нейронной сети, используемая для идентификации.', metavar='МОДЕЛЬ')
server_group.add_argument('--address', '-a', default='127.0.0.1',
help='Адрес для прослушивания. По умолчанию 127.0.0.1 (localhost)', metavar='АДРЕС')
server_group.add_argument('--port', '-p', default='8888', help='Порт для прослушивания. По умолчанию 8888.',
metavar='ПОРТ')
server_group.add_argument('--help', '-h', action='help', help='Справка')
train_parser = subparsers.add_parser('train',
add_help=False,
help='Запуск программы в режиме обучения',
description='''Запуск программы в режиме обучения.
В этом режиме предоставляются возможности для обучения модели нейронной сети.''')
train_group = train_parser.add_argument_group(title='Параметры')
train_group.add_argument('--model', '-m', help='Модель нейронной сети для продолжения обучения', metavar='МОДЕЛЬ')
train_group.add_argument('--dataset', '-d', help='Название датасета, используемого для обучения', metavar='ДАТАСЕТ')
train_group.add_argument('--unicode', '-u', help='Использовать unicode нормализацию', action='store_true', default=False)
train_group.add_argument('--size', '-s', help='Размер каждого слоя модели', type=int, default=500)
train_group.add_argument('--embedding_size', '-e', help='Размер embedding слоя', type=int, default=200)
train_group.add_argument('--layers', '-l', help='Количество слоёв', type=int, default=1)
train_group.add_argument('--dropout', help='Вероятность dropout', type=float, default=0.5)
train_group.add_argument('--learning_rate', help='Минимальная ошибка на валидационной выборке', type=float, default=0.0001)
train_group.add_argument('--max_iters', help='Максимальное количество итераций', type=int)
train_group.add_argument('--checkpoint', '-c', help='Количество итераций для сохранения модели', type=int, default=5000)
train_group.add_argument('--batch_size', '-b', help='Размер порции данных, подаваемых на нейронную сеть', type=int, default=64)
train_group.add_argument('--time_stop', '-t', help='Количество часов на обучение', type=int)
train_group.add_argument('--input', '-i', help='Размер входа', type=int, default=200)
train_group.add_argument('--cell_type', help='Тип ячеек', choices=['lstm', 'gru'], default='gru')
train_group.add_argument('--help', '-h', action='help', help='Справка')
test_parser = subparsers.add_parser('test',
add_help=False,
help='Запуск программы в режиме тестирования',
description='''Запуск программы в режиме тестирования
В этом режиме предоставляются возможности для тестирования модели нейронной сети.''')
test_group = test_parser.add_argument_group(title='Параметры')
test_group.add_argument('--model', '-m', required=True, help='Модель нейронной сети для тестирования.',
metavar='МОДЕЛЬ')
test_group.add_argument('--dataset', '-d', help='Название датасета, используемого для тестирования', metavar='ДАТАСЕТ')
test_group.add_argument('--text', '-t', help='Текст для идентификации языка.', metavar='ТЕКСТ')
test_group.add_argument('--file', '-f', help='Текстовый файл для идентификации языка.', metavar='ФАЙЛ')
test_group.add_argument('--help', '-h', action='help', help='Справка')
return parser
def run_server(namespace):
model = namespace.model
address = namespace.address
port = namespace.port
with tf.Session() as sess:
contr = Controller(sess, model)
contr.run(address, port)
def run_train(namespace):
params = Parameters('PARAMS')
if namespace.model:
model = namespace.model
elif namespace.dataset:
dataset = namespace.dataset
params.add_integer('min_count', 0)
params.add_integer('trained_lines', 0)
params.add_integer('step', 0)
if namespace.unicode:
params.add_bool('unicode_normalization', True)
if namespace.size:
params.add_integer('size', namespace.size)
if namespace.embedding_size:
params.add_integer('embedding_size', namespace.embedding_size)
if namespace.layers:
params.add_integer('num_layers', namespace.layers)
if namespace.dropout:
params.add_float('dropout', namespace.dropout)
if namespace.learning_rate:
params.add_float('learning_rate', namespace.learning_rate)
if namespace.max_iters:
params.add_integer('max_iters', namespace.max_iters)
if namespace.checkpoint:
params.add_integer('steps_per_checkpoint', namespace.checkpoint)
if namespace.batch_size:
params.add_integer('batch_size', namespace.batch_size)
if namespace.time_stop:
params.add_string('time_stop', namespace.time_stop)
if namespace.input:
params.add_integer('max_length', namespace.input)
if namespace.cell_type:
params.add_string('cell_type', namespace.cell_type)
with tf.Session() as sess:
helper = NNHelper(sess, model, params)
helper.train()
def run_test(namespace):
if namespace.text:
text = namespace.text
elif namespace.file:
with open(namespace.file, 'r', encoding='utf-8') as f:
text = f.read()
elif namespace.dataset:
dataset = namespace.dataset
else:
raise Exception('Не указан текст или файл для идентификации')
with tf.Session() as sess:
helper = NNHelper(sess, namespace.model)
if dataset:
acc = helper.test(dataset)
print('Точность определения - {0}% {1}'.format(100*acc[0]/acc[1], acc))
else:
lang, acc = helper.detect_lang(text)
print('Результат:\n')
print('Язык - {0}'.format(lang))
print('Точность определения - {0}%'.format(acc*100))
if __name__ == '__main__':
parser = createParser()
namespace = parser.parse_args()
if namespace.mode == 'server':
run_server(namespace)
elif namespace.mode == 'train':
run_train(namespace)
elif namespace.mode == 'test':
run_test(namespace)
else:
logging.error('Ошибка во время выбора режима работы приложения. Попробуйте ещё раз.')
| 48
| 131
| 0.631696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,093
| 0.43207
|
b879c0d4ddbe81e9894622e0f700feebdf2b2709
| 1,086
|
py
|
Python
|
Python/Code/Python3-Base/13_Network/Server/SingleProgressServer.py
|
hiloWang/notes
|
64a637a86f734e4e80975f4aa93ab47e8d7e8b64
|
[
"Apache-2.0"
] | 2
|
2020-10-08T13:22:08.000Z
|
2021-07-28T14:45:41.000Z
|
Python/Python3-Base/13_Network/Server/SingleProgressServer.py
|
flyfire/Programming-Notes-Code
|
4b1bdd74c1ba0c007c504834e4508ec39f01cd94
|
[
"Apache-2.0"
] | null | null | null |
Python/Python3-Base/13_Network/Server/SingleProgressServer.py
|
flyfire/Programming-Notes-Code
|
4b1bdd74c1ba0c007c504834e4508ec39f01cd94
|
[
"Apache-2.0"
] | 6
|
2020-08-20T07:19:17.000Z
|
2022-03-02T08:16:21.000Z
|
#########################
# 单进程服务器
#########################
"""
同一时刻只能为一个客户进行服务,不能同时为多个客户服务类似于找一个“明星”签字一样,客户需要耐心等待才可以获取到服务
当服务器为一个客户端服务时,而另外的客户端发起了connect,只要服务器listen的队列有空闲的位置,就会为这个新客户端进行连接,
并且客户端可以发送数据,但当服务器为这个新客户端服务时,可能一次性把所有数据接收完毕当receive接收数据时,返回值为空,
即没有返回数据,那么意味着客户端已经调用了close关闭了;因此服务器通过判断receive接收数据是否为空 来判断客户端是否已经下线
"""
from socket import *
serSocket = socket(AF_INET, SOCK_STREAM)
# 重复使用绑定的信息,避免2MSL时套接字等待问题。
serSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
localAddress = ('', 7788)
serSocket.bind(localAddress)
serSocket.listen(5)
while True:
print('-----主进程,,等待新客户端的到来------')
newSocket, destinationAddress = serSocket.accept()
print('-----主进程,,接下来负责数据处理[%s]-----' % str(destinationAddress))
try:
while True:
receiveData = newSocket.recv(1024)
if len(receiveData) > 0:
print('receive[%s]:%s' % (str(destinationAddress), receiveData))
else:
print('[%s]客户端已经关闭' % str(destinationAddress))
break
finally:
newSocket.close()
# serSocket.close()
| 24.133333
| 80
| 0.64825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,024
| 0.619855
|
b87a63d631b48c56ae9ad1ccd48c2c053e4047a5
| 3,098
|
py
|
Python
|
webapp/kortkatalogen/base/models.py
|
snickaren/CIPAC
|
58455d59734a571e0134d6368d27ee3e65001c9a
|
[
"Apache-2.0"
] | null | null | null |
webapp/kortkatalogen/base/models.py
|
snickaren/CIPAC
|
58455d59734a571e0134d6368d27ee3e65001c9a
|
[
"Apache-2.0"
] | 2
|
2021-06-01T22:47:10.000Z
|
2021-06-10T20:52:49.000Z
|
webapp/kortkatalogen/base/models.py
|
snickaren/CIPAC
|
58455d59734a571e0134d6368d27ee3e65001c9a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
class BaseCatalog(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'%s' % self.name
class Meta:
verbose_name = u"Katalog"
verbose_name_plural = u"Kataloger"
abstract = True
class BaseBox(models.Model):
folder_name = models.CharField(max_length=255, unique=True, verbose_name="Katalognamn", help_text="Filkatalog på disk där denna lådas filer ligger")
sequence_number = models.IntegerField(db_index=True)
label = models.CharField(max_length=255,db_index=True, verbose_name="Etikett")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'%s %s' % (self.sequence_number, self.label)
class Meta:
verbose_name = u"Låda"
verbose_name_plural = u"Lådor"
ordering = ['sequence_number']
abstract = True
class BaseCard(models.Model):
name = models.CharField(max_length=255, db_index=True, verbose_name="Kortnamn", help_text="Rubriken som visas överst på en kortsida")
filename = models.CharField(max_length=255, db_index=True, verbose_name="Filnamn", help_text="Filnamnet för bildfilen")
filename_back = models.CharField(max_length=255, db_index=True, verbose_name="Filnamn baksida", help_text="Filnamnet för bildfilen av baksidan")
ocr_text = models.TextField(blank=True, help_text="Automatiskt OCR-tolkad text från kortet.")
ocr_text_back = models.TextField(blank=True, help_text="Automatiskt OCR-tolkad text från kortets baksida.")
letter = models.CharField(max_length=1, null=True, blank=True, db_index=True, verbose_name="Indexbokstav" , help_text="Anges för första kortet för att dela upp katalogen alfabetiskt.")
sequence_number = models.IntegerField(db_index=True, verbose_name="Sekvensnummer i låda")
catalog_sequence_number = models.IntegerField(null=True, blank=True, verbose_name="Kortnummer", help_text="Globalt katalognummer som anger kortets plats i katalogen. Används även som identifierare.")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
comment = models.TextField(blank=True, null=True, verbose_name="Intern kommentar", help_text="Visas ej för besökare.")
# readonly field to show preview pic in django admin interface
def image_tag(self):
return u'<img alt="Kort %s" src="/static/alfa/%s" />' % (self.catalog_sequence_number, self.box.folder_name + "/" + self.filename.replace(".jpg", "_view500.jpg"))
image_tag.short_description = 'Bild'
image_tag.allow_tags = True
def __unicode__(self):
return u'%s %s' % (self.catalog_sequence_number, self.name)
class Meta:
verbose_name = u"Kort"
verbose_name_plural = u"Kort"
ordering = ['catalog_sequence_number']
abstract = True
| 45.558824
| 203
| 0.726598
| 3,052
| 0.979147
| 0
| 0
| 0
| 0
| 0
| 0
| 844
| 0.270773
|
b87adad624fdbc747cbd3966ca19edcc62c0db08
| 2,607
|
py
|
Python
|
script/raw-word-cloud.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | 2
|
2020-05-27T11:12:41.000Z
|
2020-12-17T19:33:41.000Z
|
script/raw-word-cloud.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | null | null | null |
script/raw-word-cloud.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | 3
|
2020-03-18T19:20:24.000Z
|
2020-12-17T17:37:24.000Z
|
import os
import nltk
import spacy
from datetime import datetime
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType, ArrayType
from pyspark.sql.functions import udf, col, explode, collect_list, count
from SYS import COL, MODE, DIR, FILE
nltk.download('stopwords')
os.system("python -m spacy download en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
def get_token(tweet):
doc = nlp(tweet)
tokens = []
for t in doc:
if len(t) > 2:
if t.is_stop is False:
if t.pos_ in ["VERB", "NOUN"]:
tokens.append(str(t.lemma_))
elif t.pos_ in ["NUM", "SYM", "ADP"]:
continue
elif t.is_stop:
continue
else:
tokens.append(str(t))
return tokens
def process_token(in_df):
print("{} [System]: Start processing token!".format(datetime.now()))
udf_token = udf(get_token, ArrayType(StringType()))
in_df = in_df.withColumn(COL.token, udf_token(col(COL.descri)))
in_df = in_df.withColumn(COL.token, explode(col(COL.token)))
in_df = in_df.drop(col(COL.descri))
in_df = in_df.groupby(col(COL.year), col(COL.token))\
.agg(collect_list(COL.o_id).alias(COL.o_id), count(COL.token).alias(COL.count))
print("{} [System]: Token processed!".format(datetime.now()))
return in_df
java8_location = '/Library/Java/JavaVirtualMachines/liberica-jdk-1.8.0_202/Contents/Home'
os.environ['JAVA_HOME'] = java8_location
spark = SparkSession\
.builder\
.appName("A1")\
.getOrCreate()
def get_unprocessed_df():
try:
in_df = spark.read.parquet(FILE.cleaned_data2_uri).select(col(COL.o_id), col(COL.descri), col(COL.year))
in_df = in_df.filter(col(COL.descri).isNotNull()).drop_duplicates([COL.o_id, COL.year])
if MODE.limit:
in_df = in_df.limit(20)
print("{} [System]: Cleaned data read in successfully! {} lines read in!".format(datetime.now(), in_df.count()))
return in_df
except Exception:
print("{} [System]: Cleaned data not exist. Script Exit!".format(datetime.now()))
import sys
sys.exit(1)
if __name__ == '__main__':
spark.read.parquet(FILE.word_cloud_data1_uri).groupby(col(COL.token)).count().sort(col(COL.count), ascending=False).show()
# df = get_unprocessed_df()
# df = process_token(df)
# df.write.mode("overwrite").parquet(FILE.word_cloud_data1_uri, compression="gzip")
# if MODE.debug:
# df = df.filter(col(COL.descri).isNotNull())
# df.show()
| 33.857143
| 126
| 0.638282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 593
| 0.227465
|
b87d4232f38a23242d6a6192e497347e1e6d8428
| 141
|
py
|
Python
|
main/ftpServer.py
|
McUtty/FlowerPlan
|
b0998835356e8e10fe53cad447bc559df2ac7175
|
[
"MIT"
] | null | null | null |
main/ftpServer.py
|
McUtty/FlowerPlan
|
b0998835356e8e10fe53cad447bc559df2ac7175
|
[
"MIT"
] | null | null | null |
main/ftpServer.py
|
McUtty/FlowerPlan
|
b0998835356e8e10fe53cad447bc559df2ac7175
|
[
"MIT"
] | null | null | null |
import uftpd
uftpd.stop()
# uftpd.start([port = 21][, verbose = 1])
uftpd.restart()
# Version abfragen
# wenn neuer - Dateien downloaden
| 12.818182
| 41
| 0.687943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.652482
|
b87d46b05ec2436786cd95db9a677d1f89cf7d59
| 10,672
|
py
|
Python
|
Entradas/views.py
|
ToniIvars/Blog
|
c2d1674c2c1fdf51749f4b014795b507ed93b45e
|
[
"MIT"
] | null | null | null |
Entradas/views.py
|
ToniIvars/Blog
|
c2d1674c2c1fdf51749f4b014795b507ed93b45e
|
[
"MIT"
] | 4
|
2021-03-30T13:26:38.000Z
|
2021-06-10T19:20:56.000Z
|
Entradas/views.py
|
ToniIvars/Blog
|
c2d1674c2c1fdf51749f4b014795b507ed93b45e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from random import randint
from Entradas.models import entradas_blog, comentarios
from Entradas.forms import FormNuevaEntrada, FormContacto, FormEditarEntrada, SignupForm, LoginForm, FormEditarPerfil
initial_dict_editar={}
initial_dict_crear={}
entrada_a_editar=None
id_entrada=None
username=''
# View 'signup'
codigo_enviado=False
codigo=''
email=''
password=''
def generador_slug(tit):
slug=tit.lower().replace(' ','-').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u')
return slug
def generar_codigo():
codigo=''
for i in range(6):
codigo += str(randint(1,9))
return codigo
def set_username(request):
global username
if username=='':
username=request.user.username
# Create your views here.
def error_404(request):
return render(request, '404.html')
def error_500(request):
return render(request, '500.html')
# @login_required
def inicio(request):
set_username(request)
articulos=entradas_blog.objects.order_by('-pk')[:4]
return render(request, 'inicio.html', {'articulos':articulos})
# @login_required
def muestra_entrada(request, slug):
set_username(request)
mostrar=entradas_blog.objects.get(slug=slug)
com=comentarios.objects.filter(entrada=mostrar.titulo)
if request.method=='POST':
autor=request.POST.get('input-autor')
comentario=request.POST.get('input-comentario')
nuevo_com=comentarios(entrada=mostrar.titulo, autor=autor, cuerpo=comentario)
nuevo_com.save()
messages.success(request, 'El comentario ha sido agregado correctamente.')
return redirect('entrada', slug)
return render(request, 'muestra-entrada.html', {'entrada_a_mostrar':mostrar, 'comentarios':com})
# @login_required
def resultados(request):
set_username(request)
if request.GET['input-ent']:
articulos=entradas_blog.objects.filter(titulo__icontains=request.GET['input-ent']).order_by('-pk')
if articulos:
return render(request, 'buscar-entrada.html', {'entradas':articulos})
else:
messages.error(request, 'No se han encontrado entradas.')
return redirect('inicio')
else:
return render(request, 'buscar-entrada.html')
@login_required
def crear_entrada(request):
global initial_dict_crear
set_username(request)
if request.method=='POST':
nueva_entrada=FormNuevaEntrada(request.POST)
if nueva_entrada.is_valid():
info_nueva_entrada=nueva_entrada.cleaned_data
slug_field=generador_slug(info_nueva_entrada['titulo'])
try:
obj=entradas_blog.objects.get(slug=slug_field)
initial_dict_crear = {
'creador':info_nueva_entrada['creador'],
'cuerpo_texto':info_nueva_entrada['cuerpo_texto'],
}
messages.error(request, 'El título de la entrada que intentas crear ya existe.')
return redirect('crear-entrada')
except ObjectDoesNotExist:
ent=entradas_blog(creador=info_nueva_entrada['creador'], titulo=info_nueva_entrada['titulo'], cuerpo=info_nueva_entrada['cuerpo_texto'], slug=slug_field)
ent.save()
initial_dict_crear = {}
return redirect('inicio')
else:
initial_dict_crear={'creador':username}
nueva_entrada=FormNuevaEntrada(initial=initial_dict_crear)
return render(request, 'crear-entrada.html', {'form':nueva_entrada})
@login_required
def editar_entrada(request, entrada):
global initial_dict_editar, entrada_a_editar, id_entrada
set_username(request)
id_entrada = entrada
entrada_a_editar=entradas_blog.objects.get(pk=id_entrada)
initial_dict_editar = {
'creador':entrada_a_editar.creador,
'titulo':entrada_a_editar.titulo,
'cuerpo_texto':entrada_a_editar.cuerpo,
}
if request.method=='POST':
editar_entrada=FormEditarEntrada(request.POST)
if editar_entrada.is_valid():
info_editar_entrada=editar_entrada.cleaned_data
slug_field=generador_slug(info_editar_entrada['titulo'])
try:
obj=entradas_blog.objects.get(slug=slug_field)
if info_editar_entrada['titulo'] == initial_dict_editar['titulo']:
raise ObjectDoesNotExist
else:
messages.error(request, 'El título editado pertenece a otra entrada.')
return redirect('editar-entrada')
except ObjectDoesNotExist:
entradas_blog.objects.filter(pk=id_entrada).update(creador=info_editar_entrada['creador'], titulo=info_editar_entrada['titulo'], cuerpo=info_editar_entrada['cuerpo_texto'], slug=slug_field)
messages.success(request, 'Entrada actualizada correctamente.')
return redirect('perfil', username)
else:
editar_entrada=FormEditarEntrada(initial=initial_dict_editar)
return render(request, 'editar-entrada.html', {'form':editar_entrada})
@login_required
def eliminar_entrada(request, entrada):
set_username(request)
if request.GET.get('input-del'):
entradas_blog.objects.filter(pk=entrada).delete()
messages.success(request, 'Entrada eliminada correctamente.')
return redirect('perfil', username)
elif request.GET.get('nothing'):
return redirect('perfil', username)
return render(request, 'eliminar-entrada.html')
def signup(request):
global username, codigo_enviado, codigo, email, password
if request.method=='POST':
registro=SignupForm(request.POST)
if registro.is_valid():
info_registro=registro.cleaned_data
if ' ' in info_registro['username']:
messages.error(request, 'El nombre de usuario no puede contener espacios.')
return redirect('registro')
else:
username=info_registro['username']
password=info_registro['password']
password2=info_registro['password2']
email=info_registro['email']
try:
user=User.objects.get_by_natural_key(username)
messages.error(request, 'Este usuario ya existe.')
return redirect('registro')
except ObjectDoesNotExist:
if password != password2:
messages.error(request, 'Las contraseñas no coinciden.')
return redirect('registro')
else:
user=User.objects.create_user(username, email, password)
user.save()
login(request, user)
messages.success(request, 'El usuario ha sido creado correctamente.')
return redirect('inicio')
else:
registro=SignupForm()
return render(request, 'signup.html', {'form':registro})
def login_view(request):
global username
if request.method=='POST':
inicio_sesion=LoginForm(request.POST)
if inicio_sesion.is_valid():
info_inicio=inicio_sesion.cleaned_data
username=info_inicio['username']
password=info_inicio['password']
user=authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('inicio')
else:
messages.error(request, 'No ha sido posible iniciar sesión.')
return redirect('iniciar-sesion')
else:
inicio_sesion=LoginForm()
return render(request, 'login.html', {'form':inicio_sesion})
def logout_view(request):
logout(request)
return redirect('iniciar-sesion')
# @login_required
def ver_perfil(request, profile):
articulos=entradas_blog.objects.filter(creador=profile).order_by('-pk')
return render(request, 'ver-perfil.html', {'nombre_usuario':profile, 'articulos':articulos})
@login_required
def perfil(request, profile):
# if profile == username:
articulos=entradas_blog.objects.filter(creador=profile).order_by('-pk')
return render(request, 'perfil.html', {'articulos':articulos})
# else:
# messages.error(request, 'El perfil al que intentas acceder no es el tuyo.')
# return redirect('inicio')
@login_required
def editar_perfil(request, profile):
set_username(request)
if request.method=='POST':
edicion=FormEditarPerfil(request.POST)
if edicion.is_valid():
info_edicion=edicion.cleaned_data
user=User.objects.get_by_natural_key(username)
user.username=info_edicion['username']
user.first_name=info_edicion['first_name']
user.last_name=info_edicion['last_name']
user.email=info_edicion['email']
user.save()
messages.success(request, 'El perfil ha sido actualizado correctamente.')
return redirect('editar-perfil', username)
else:
obj=User.objects.get_by_natural_key(username)
initial_dict_contacto={
'username':obj.username,
'first_name':obj.first_name,
'last_name':obj.last_name,
'email':obj.email,
}
edit_perfil=FormEditarPerfil(initial=initial_dict_contacto)
return render(request, 'editar-perfil.html', {'form':edit_perfil})
@login_required
def actualizar_contra(request, profile):
set_username(request)
if request.method=='POST':
user=User.objects.get_by_natural_key(username)
contra=request.POST.get('input-contra')
contra2=request.POST.get('input-contra2')
if contra == contra2:
user.set_password(contra)
messages.success(request, 'La contraseña ha sido actualizado correctamente.')
return redirect('editar-perfil', username)
else:
messages.error(request, 'Las contraseñas no coinciden.')
return redirect('actualizar-contraseña', username)
return render(request, 'actualizar-contraseña.html')
| 32.048048
| 205
| 0.648801
| 0
| 0
| 0
| 0
| 5,449
| 0.509967
| 0
| 0
| 1,964
| 0.183809
|
b87f562e23be6f95cf850092c0a407380227775e
| 975
|
py
|
Python
|
setup.py
|
remiolsen/anglerfish
|
5caabebf5864180e5552b3e40de3650fc5fcabd6
|
[
"MIT"
] | null | null | null |
setup.py
|
remiolsen/anglerfish
|
5caabebf5864180e5552b3e40de3650fc5fcabd6
|
[
"MIT"
] | 19
|
2019-10-07T11:14:54.000Z
|
2022-03-28T12:36:47.000Z
|
setup.py
|
remiolsen/anglerfish
|
5caabebf5864180e5552b3e40de3650fc5fcabd6
|
[
"MIT"
] | 2
|
2019-05-28T14:15:26.000Z
|
2022-03-28T09:28:44.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys, os
setup(
name='anglerfish',
version='0.4.1',
description='Anglerfish, a tool to demultiplex Illumina libraries from ONT data',
author='Remi-Andre Olsen',
author_email='remi-andre.olsen@scilifelab.se',
url='https://github.com/remiolsen/anglerfish',
license='MIT',
packages = find_packages(),
install_requires=[
'python-levenshtein',
'biopython',
'numpy'
],
scripts=['./anglerfish.py'],
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Medical Science Apps."
]
)
| 29.545455
| 85
| 0.645128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 596
| 0.611282
|
b87f7b6b8b386385428ae91baa19206d67341ede
| 600
|
py
|
Python
|
files/urls.py
|
danielchriscarter/part2-django
|
d4adffa3280431151bb8d1c51f0be2dbffff9dd1
|
[
"BSD-2-Clause"
] | null | null | null |
files/urls.py
|
danielchriscarter/part2-django
|
d4adffa3280431151bb8d1c51f0be2dbffff9dd1
|
[
"BSD-2-Clause"
] | null | null | null |
files/urls.py
|
danielchriscarter/part2-django
|
d4adffa3280431151bb8d1c51f0be2dbffff9dd1
|
[
"BSD-2-Clause"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'files'
urlpatterns = [
path('', views.index, name='index'),
path('file/<int:file_id>/', views.fileview, name='file'),
path('file/<int:file_id>/edit', views.fileedit, name='fileedit'),
path('dir/<int:dir_id>/', views.dirview, name='directory'),
path('newfile/<int:dir_id>/', views.newfile, name='newfile'),
path('newdir/<int:dir_id>/', views.newdir, name='newdir'),
path('newdir/root/', views.newdir_root, name='newdir_root'),
path('search', views.search, name='search'),
]
| 37.5
| 73
| 0.62
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.355
|
b8823356abe70dc72971de117d1caaf078936601
| 1,733
|
py
|
Python
|
morpheus/algorithms/kmeans.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 12
|
2018-10-04T08:27:33.000Z
|
2022-01-11T15:41:29.000Z
|
morpheus/algorithms/kmeans.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 3
|
2020-09-22T16:18:51.000Z
|
2021-12-28T19:01:00.000Z
|
morpheus/algorithms/kmeans.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 4
|
2019-12-13T17:52:19.000Z
|
2021-12-17T12:43:44.000Z
|
# Copyright 2018 Side Li and Arun Kumar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClusterMixin
class NormalizedKMeans(BaseEstimator, ClusterMixin):
def __init__(self, iterations=20, center_num=5):
self.center_num = center_num
self.iterations = iterations
def fit(self, X, k_center):
self.k_center, self.ya = self.k_means(X, self.iterations, self.center_num, k_center, X.shape[0])
return self
def k_means(self, data, iterations, center_num, k_center, rows):
all_one = np.matrix([1] * rows).T
all_one_k = np.matrix([1] * center_num)
all_one_c = np.matrix([1] * k_center.shape[0]).T
if sp.issparse(data):
t2 = (data.power(2)).sum(axis=1).dot(all_one_k)
else:
t2 = (np.power(data, 2)).sum(axis=1).reshape((-1, 1)) * all_one_k
t22 = data * 2
ya = None
for _ in range(iterations):
dist = t2 - t22 * k_center + all_one * np.power(k_center, 2).sum(axis=0)
ya = (dist == (np.amin(dist) * all_one_k))
k_center = (data.T * ya) / (all_one_c * ya.sum(axis=0))
return k_center, ya
| 40.302326
| 104
| 0.660704
| 1,051
| 0.606463
| 0
| 0
| 0
| 0
| 0
| 0
| 570
| 0.328909
|
b8833e3d9f3a2008bcf62eb119ccbf510334b106
| 796
|
py
|
Python
|
670/main.py
|
pauvrepetit/leetcode
|
6ad093cf543addc4dfa52d72a8e3c0d05a23b771
|
[
"MIT"
] | null | null | null |
670/main.py
|
pauvrepetit/leetcode
|
6ad093cf543addc4dfa52d72a8e3c0d05a23b771
|
[
"MIT"
] | null | null | null |
670/main.py
|
pauvrepetit/leetcode
|
6ad093cf543addc4dfa52d72a8e3c0d05a23b771
|
[
"MIT"
] | null | null | null |
# 670. 最大交换
#
# 20200905
# huao
class Solution:
def maximumSwap(self, num: int) -> int:
return int(self.maximumSwapStr(str(num)))
def maximumSwapStr(self, num: str) -> str:
s = list(num)
if len(s) == 1:
return num
maxNum = '0'
maxLoc = 0
for i in range(len(s))[::-1]:
c = s[i]
if maxNum < c:
maxNum = c
maxLoc = i
if s[0] == maxNum:
return maxNum + self.maximumSwapStr(num[1:])
s[maxLoc] = s[0]
s[0] = str(maxNum)
ss = ""
for i in s:
ss += i
return ss
print(Solution().maximumSwap(100))
print(Solution().maximumSwap(2736))
print(Solution().maximumSwap(9973))
print(Solution().maximumSwap(98638))
| 22.742857
| 56
| 0.5
| 616
| 0.766169
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.050995
|
b8860c8f4169552c8561caf03f121aafce628fa6
| 333
|
py
|
Python
|
tests/resources/test_codegen/template.py
|
come2ry/atcoder-tools
|
d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b
|
[
"MIT"
] | 313
|
2016-12-04T13:25:21.000Z
|
2022-03-31T09:46:15.000Z
|
tests/resources/test_codegen/template.py
|
come2ry/atcoder-tools
|
d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b
|
[
"MIT"
] | 232
|
2016-12-02T22:55:20.000Z
|
2022-03-27T06:48:02.000Z
|
tests/resources/test_codegen/template.py
|
come2ry/atcoder-tools
|
d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b
|
[
"MIT"
] | 90
|
2017-09-23T15:09:48.000Z
|
2022-03-17T03:13:40.000Z
|
#!/usr/bin/env python3
import sys
def solve(${formal_arguments}):
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
${input_part}
solve(${actual_arguments})
if __name__ == '__main__':
main()
| 16.65
| 37
| 0.588589
| 0
| 0
| 117
| 0.351351
| 0
| 0
| 0
| 0
| 32
| 0.096096
|
b8862a702744111dde08cc354f165d4d573be5a8
| 21,501
|
py
|
Python
|
orghtml.py
|
waynezhang/orgextended
|
853ae89f937d302c2dd9dad3ae98aa5c2485faaa
|
[
"MIT"
] | null | null | null |
orghtml.py
|
waynezhang/orgextended
|
853ae89f937d302c2dd9dad3ae98aa5c2485faaa
|
[
"MIT"
] | null | null | null |
orghtml.py
|
waynezhang/orgextended
|
853ae89f937d302c2dd9dad3ae98aa5c2485faaa
|
[
"MIT"
] | null | null | null |
import sublime
import sublime_plugin
import datetime
import re
import regex
from pathlib import Path
import os
import fnmatch
import OrgExtended.orgparse.node as node
from OrgExtended.orgparse.sublimenode import *
import OrgExtended.orgutil.util as util
import OrgExtended.orgutil.navigation as nav
import OrgExtended.orgutil.template as templateEngine
import logging
import sys
import traceback
import OrgExtended.orgfolding as folding
import OrgExtended.orgdb as db
import OrgExtended.asettings as sets
import OrgExtended.orgcapture as capture
import OrgExtended.orgproperties as props
import OrgExtended.orgutil.temp as tf
import OrgExtended.pymitter as evt
import OrgExtended.orgnotifications as notice
import OrgExtended.orgextension as ext
import OrgExtended.orgsourceblock as src
import OrgExtended.orgexporter as exp
import yaml
import sys
import subprocess
import html
log = logging.getLogger(__name__)
# Global properties I AT LEAST want to support.
# Both as a property on the document and in our settings.
#+OPTIONS: num:nil toc:nil
#+REVEAL_TRANS: None/Fade/Slide/Convex/Concave/Zoom
#+REVEAL_THEME: Black/White/League/Sky/Beige/Simple/Serif/Blood/Night/Moon/Solarized
#+Title: Title of Your Talk
#+Author: Your Name
#+Email: Your Email Address or Twitter Handle
def GetCollapsibleCodeOld():
return """
var coll = document.getElementsByClassName("collapsible");
var i;
for (i = 0; i < coll.length; i++) {
coll[i].addEventListener("click", function() {
this.classList.toggle("active");
var content = this.nextElementSibling;
if (content.style.display === "block") {
content.style.display = "none";
} else {
content.style.display = "block";
}
});
}
"""
def GetCollapsibleCode():
return """
var coll = document.getElementsByClassName("collapsible");
var i;
var accume = 0;
for (i = 0; i < coll.length; i++) {
coll[i].addEventListener("click", function() {
this.classList.toggle("active");
var content = this.nextElementSibling;
if (content.style.maxHeight) {
content.style.maxHeight = null;
} else {
content.style.maxHeight = content.scrollHeight + "px";
}
accume += content.scrollHeight + 5;
while(content.parentNode && (content.parentNode.nodeName == 'DIV' || content.parentNode.nodeName == 'SECTION')) {
if(content.parentNode.nodeName == 'DIV') {
//alert(content.parentNode.nodeName);
content.parentNode.style.maxHeight = (accume + content.parentNode.scrollHeight) + "px";
accume += content.parentNode.scrollHeight;
}
content = content.parentNode;
}
});
coll[i].click();
}
"""
def GetCollapsibleCss():
return """
.node-body {
padding: 0 18px;
max-height: 0;
overflow: hidden;
transition: max-height 0.2s ease-out;
}
.active, .collapsible:hover {
background-color: #ccc;
}
.collapsible:after {
content: '\\002b';
font-size: 22px;
float: right;
margin-right: 20px;
}
.active:after {
content: '\\2212';
font-size: 22px;
margin-right: 20px;
}
"""
RE_CAPTION = regex.compile(r"^\s*[#][+]CAPTION[:]\s*(?P<caption>.*)")
RE_ATTR = regex.compile(r"^\s*[#][+]ATTR_HTML[:](?P<params>\s+[:](?P<name>[a-zA-Z0-9._-]+)\s+(?P<value>([^:]|((?<! )[:]))+))+$")
RE_ATTR_ORG = regex.compile(r"^\s*[#][+]ATTR_ORG[:] ")
RE_SCHEDULING_LINE = re.compile(r"^\s*(SCHEDULED|CLOSED|DEADLINE|CLOCK)[:].*")
RE_DRAWER_LINE = re.compile(r"^\s*[:].+[:]\s*$")
RE_END_DRAWER_LINE = re.compile(r"^\s*[:](END|end)[:]\s*$")
RE_LINK = re.compile(r"\[\[(?P<link>[^\]]+)\](\[(?P<desc>[^\]]+)\])?\]")
RE_UL = re.compile(r"^(?P<indent>\s*)(-|[+])\s+(?P<data>.+)")
RE_STARTQUOTE = re.compile(r"#\+(BEGIN_QUOTE|BEGIN_EXAMPLE|BEGIN_VERSE|BEGIN_CENTER|begin_quote|begin_example|begin_verse|begin_center)")
RE_ENDQUOTE = re.compile(r"#\+(END_QUOTE|END_EXAMPLE|END_VERSE|END_CENTER|end_quote|end_example|end_verse|end_center)")
RE_STARTNOTE = re.compile(r"#\+(BEGIN_NOTES|begin_notes)")
RE_ENDNOTE = re.compile(r"#\+(END_NOTES|end_notes)")
RE_FN_MATCH = re.compile(r"\s*[:]([a-zA-Z0-9-_]+)\s+([^: ]+)?\s*")
RE_STARTSRC = re.compile(r"^\s*#\+(BEGIN_SRC|begin_src|BEGIN:|begin:)\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDSRC = re.compile(r"^\s*#\+(END_SRC|end_src|end:|END:)")
RE_RESULTS = re.compile(r"^\s*#\+RESULTS.*")
RE_TABLE_ROW = re.compile(r"^\s*[|]")
RE_TABLE_SEPARATOR = re.compile(r"^\s*[|][-]")
RE_CHECKBOX = re.compile(r"^\[ \] ")
RE_CHECKED_CHECKBOX = re.compile(r"^\[[xX]\] ")
RE_PARTIAL_CHECKBOX = re.compile(r"^\[[-]\] ")
RE_EMPTY_LINE = re.compile(r"^\s*$")
# <!-- multiple_stores height="50%" width="50%" -->
RE_COMMENT_TAG = re.compile(r"^\s*[<][!][-][-]\s+(?P<name>[a-zA-Z0-9_-]+)\s+(?P<props>.*)\s+[-][-][>]")
def mapLanguage(lang):
if(lang == 'html'):
return 'language-html'
elif(lang == 'python'):
return 'language-python'
else:
return lang
def GetStyleRelatedData(style, extension):
inHeader = os.path.join(sublime.packages_path(),"User", "htmlstyles", style + extension)
if(os.path.isfile(inHeader)):
with open(inHeader) as f:
contents = f.read()
return contents
resourceName = "Packages/OrgExtended/htmlstyles/" + style + extension
try:
contents = sublime.load_resource(resourceName)
return contents
except:
pass
#inHeader = os.path.join(sublime.packages_path(),"OrgExtended", "htmlstyles", style + extension)
#if(os.path.isfile(inHeader)):
# with open(inHeader) as f:
# contents = f.read()
# return contents
return ""
def GetStyleRelatedPropertyData(file, key, setting):
val = exp.GetGlobalOption(file, key, setting, "")
if("<" in val or "{" in val):
return val
elif(os.path.isfile(val)):
with open(val) as f:
contents = f.read()
return contents
else:
return val
def GetHighlightJsCss(style):
import OrgExtended.orgutil.webpull as wp
wp.download_highlightjs()
data = os.path.join(sublime.packages_path(),"User", "highlightjs", "styles", style + ".css")
if(os.path.isfile(data)):
with open(data) as f:
contents = f.read()
return contents
def GetHighlightJs():
import OrgExtended.orgutil.webpull as wp
wp.download_highlightjs()
data = os.path.join(sublime.packages_path(),"User", "highlightjs", "highlight.pack.js")
if(os.path.isfile(data)):
with open(data) as f:
contents = f.read()
return contents
def GetHeaderData(style, file):
d1 = GetStyleRelatedData(style,"_inheader.html")
d2 = GetStyleRelatedPropertyData(file, "HtmlInHeader", "HTML_INHEADER")
return d1 + d2
def GetHeadingData(style, file):
d1 = GetStyleRelatedData(style,"_heading.html")
d2 = GetStyleRelatedPropertyData(file, "HtmlHeading", "HTML_HEADING")
return d1 + d2
def GetFootingData(style, file):
d1 = GetStyleRelatedData(style,"_footing.html")
d2 = GetStyleRelatedPropertyData(file, "HtmlFooting", "HTML_FOOTING")
return d1 + d2
def GetStyleData(style, file):
d1 = GetStyleRelatedData(style,".css")
d2 = GetStyleRelatedPropertyData(file, "HtmlCss", "HTML_CSS")
return d1 + d2
class HtmlDoc(exp.OrgExporter):
def __init__(self, filename, file,**kwargs):
super(HtmlDoc, self).__init__(filename, file, **kwargs)
self.fs.write("<!DOCTYPE html>\n")
self.fs.write("<!-- exported by orgextended html exporter -->\n")
if(self.language):
self.fs.write("<html xmlns=\"http://www.w3.org/1999/xhtml\" lang=\"{language}\" xml:lang=\"{language}\">".format(language=self.language))
else:
self.fs.write("<html lang=\"en\" class>\n")
self.commentName = None
self.figureIndex = 1
self.tableIndex = 1
def AddJs(self,link):
self.fs.write(" <script type=\"text/javascript\" src=\"" + link + "\"></script>\n")
def AddStyle(self,link):
self.fs.write(" <link rel=\"stylesheet\" href=\""+link+"\"></link>\n")
def AddInlineStyle(self,content):
# <style>
# BLOCK
# </style>
self.fs.write(" <style>\n{0}\n </style>\n".format(content))
def InsertJs(self,content):
# <style>
# BLOCK
# </style>
self.fs.write(" <script>\n{0}\n </script>\n".format(content))
def StartHead(self):
self.fs.write(" <head>\n")
def EndHead(self):
data = GetHeaderData(self.style, self.file)
self.fs.write(data)
self.fs.write(" </head>\n")
def AddExportMetaCustom(self):
if(self.title):
self.fs.write("<title>{title}</title>".format(title=self.title))
if(self.author):
self.fs.write("<meta name=\"author\" content=\"{author}\" />".format(author=self.author))
def StartDocument(self, file):
self.fs.write(" <div class=\"ready\">\n")
def EndDocument(self):
self.fs.write(" </div>\n")
def StartNodes(self):
self.fs.write(" <div class=\"orgmode\">\n")
def EndNodes(self):
self.fs.write(" </div>\n")
# Per slide properties
#:PROPERTIES:
#:css_property: value
#:END:
def StartNode(self,n):
properties = ""
for prop in n.properties:
properties = "{0} {1}=\"{2}\"".format(properties, prop, n.properties[prop])
self.fs.write(" <section {0}>\n".format(properties))
def StartNodeBody(self, n):
level = n.level + 1
self.fs.write(" <div class=\"node-body h{level}\">\n".format(level=level))
def EndNodeBody(self,n):
self.fs.write(" </div>\n")
def EndNode(self,n):
self.fs.write(" </section>\n")
def NodeHeading(self,n):
heading = html.escape(n.heading)
level = n.level + 1
self.fs.write(" <h{level} class=\"collapsible\">{heading}</h{level}>\n".format(level=level,heading=heading))
def ClearAttributes(self):
self.attrs = {}
self.caption = None
def AttributesGather(self, l):
if(self.PreScanExportCommentsGather(l)):
return True
m = RE_CAPTION.match(l)
if(not hasattr(self, 'caption')):
self.caption = None
if(m):
self.caption = m.captures('caption')[0]
return True
m = RE_ATTR.match(l)
# We capture #+ATTR_HTML: lines
if(m):
keys = m.captures('name')
vals = m.captures('value')
if not hasattr(self,'attrs'):
self.attrs = {}
for i in range(len(keys)):
self.attrs[keys[i]] = vals[i]
return True
# We skip #+ATTR_ORG: lines
m = RE_ATTR_ORG.match(l)
if(m):
return True
return False
def EscAndLinks(self, l):
line = html.escape(l)
m = RE_LINK.search(line)
if(m):
link = m.group('link').strip()
desc = m.group('desc')
if(not desc):
desc = link
else:
desc = desc.strip()
if(link.endswith(".png") or link.endswith(".jpg") or link.endswith(".jpeg") or link.endswith(".gif")):
if(link.startswith("file:")):
link = re.sub(r'^file:','',link)
extradata = ""
if(self.commentName and self.commentName in link):
extradata = " " + self.commentData
self.commentName = None
if(hasattr(self,'attrs')):
for key in self.attrs:
extradata += " " + str(key) + "=\"" + str(self.attrs[key]) + "\""
preamble = ""
postamble = ""
if(hasattr(self,'caption') and self.caption):
preamble = "<div class=\"figure\"><p>"
postamble = "</p><p><span class=\"figure-number\">Figure {index}: </span>{caption}</p></div>".format(index=self.figureIndex,caption=self.caption)
self.figureIndex += 1
line = RE_LINK.sub("{preamble}<img src=\"{link}\" alt=\"{desc}\"{extradata}>{postamble}".format(preamble=preamble,link=link,desc=desc,extradata=extradata,postamble=postamble),line)
self.ClearAttributes()
else:
line = RE_LINK.sub("<a href=\"{link}\">{desc}</a>".format(link=link,desc=desc),line)
self.ClearAttributes()
else:
line = exp.RE_BOLD.sub(r"<b>\1</b>",line)
line = exp.RE_ITALICS.sub(r"<i>\1</i>",line)
line = exp.RE_UNDERLINE.sub(r"<u>\1</u>",line)
line = exp.RE_STRIKETHROUGH.sub(r"<strike>\1</strike>",line)
line = exp.RE_VERBATIM.sub(r"<pre>\1</pre>",line)
line = exp.RE_CODE.sub(r"<code>\1</code>",line)
line = RE_STARTQUOTE.sub(r"<blockquote>",line)
line = RE_ENDQUOTE.sub(r"</blockquote>",line)
line = RE_STARTNOTE.sub(r'<aside class="notes">',line)
line = RE_ENDNOTE.sub(r"</aside>",line)
line = RE_CHECKBOX.sub(r'<input type="checkbox">',line)
line = RE_CHECKED_CHECKBOX.sub(r'<input type="checkbox" checked>',line)
if(sets.Get("htmlExportPartialCheckboxChecked",True)):
line = RE_PARTIAL_CHECKBOX.sub(r'<input type="checkbox" checked>',line)
else:
line = RE_PARTIAL_CHECKBOX.sub(r'<input type="checkbox">',line)
line = exp.RE_HR.sub(r'<hr>',line)
return line
def NodeBody(self,slide):
inDrawer = False
inResults= False
inUl = 0
ulIndent = 0
inTable = False
haveTableHeader = False
inSrc = False
skipSrc = False
exp = None
for l in slide._lines[1:]:
if(self.AttributesGather(l)):
continue
if(inResults):
if(l.strip() == ""):
inResults = False
elif(RE_ENDSRC.search(l) or RE_END_DRAWER_LINE.search(l)):
inResults = False
continue
if(inResults):
if(exp == 'code' or exp == 'none'):
continue
else:
line = self.EscAndLinks(l)
self.fs.write(" " + line + "\n")
continue
if(inDrawer):
if(RE_END_DRAWER_LINE.search(l)):
inDrawer = False
continue
if(inTable):
if(RE_TABLE_ROW.search(l)):
if(RE_TABLE_SEPARATOR.search(l)):
continue
else:
tds = l.split('|')
if(len(tds) > 3):
# An actual table row, build a row
self.fs.write(" <tr>\n")
for td in tds[1:-1]:
if(haveTableHeader):
self.fs.write(" <td>{0}</td>\n".format(self.EscAndLinks(td)))
else:
self.fs.write(" <th>{0}</th>\n".format(self.EscAndLinks(td)))
haveTableHeader = True
# Fill in the tds
self.fs.write(" </tr>\n")
continue
else:
self.fs.write(" </table>\n")
inTable = False
haveTableHeader = False
if(inSrc):
if(RE_ENDSRC.search(l)):
inSrc = False
if(skipSrc):
skipSrc = False
continue
self.fs.write(" </code></pre>\n")
continue
else:
if(not skipSrc):
self.fs.write(" " + l + "\n")
continue
# src block
m = RE_STARTSRC.search(l)
if(m):
inSrc = True
language = m.group('lang')
paramstr = l[len(m.group(0)):]
p = type('', (), {})()
src.BuildFullParamList(p,language,paramstr,slide)
exp = p.params.Get("exports",None)
if(isinstance(exp,list) and len(exp) > 0):
exp = exp[0]
if(exp == 'results' or exp == 'none'):
skipSrc = True
continue
# Some languages we skip source by default
skipLangs = sets.Get("htmlDefaultSkipSrc",[])
if(exp == None and language == skipLangs):
skipSrc = True
continue
#params = {}
#for ps in RE_FN_MATCH.finditer(paramstr):
# params[ps.group(1)] = ps.group(2)
attribs = ""
# This is left over from reveal.
if(p.params.Get("data-line-numbers",None)):
attribs += " data-line-numbers=\"{nums}\"".format(nums=p.params.Get("data-line-numbers",""))
self.fs.write(" <pre><code language=\"{lang}\" {attribs}>\n".format(lang=mapLanguage(language),attribs=attribs))
continue
# property drawer
if(RE_DRAWER_LINE.search(l)):
inDrawer = True
continue
# scheduling
if(RE_SCHEDULING_LINE.search(l)):
continue
if(RE_RESULTS.search(l)):
inResults = True
continue
m = RE_COMMENT_TAG.search(l)
if(m):
self.commentData = m.group('props')
self.commentName = m.group('name')
continue
m = RE_TABLE_ROW.search(l)
if(m):
self.fs.write(" <table>\n")
if(hasattr(self,'caption') and self.caption):
self.fs.write(" <caption class=\"t-above\"><span class=\"table-number\">Table {index}:</span>{caption}</caption>".format(index=self.tableIndex,caption=self.caption))
self.tableIndex += 1
self.ClearAttributes()
if(not RE_TABLE_SEPARATOR.search(l)):
tds = l.split('|')
if(len(tds) > 3):
self.fs.write(" <tr>\n")
for td in tds[1:-1]:
self.fs.write(" <th>{0}</th>".format(self.EscAndLinks(td)))
self.fs.write(" </tr>\n")
haveTableHeader = True
inTable = True
continue
m = RE_UL.search(l)
if(m):
thisIndent = len(m.group('indent'))
if(not inUl):
ulIndent = thisIndent
self.fs.write(" <ul>\n")
inUl += 1
elif(thisIndent > ulIndent):
ulIndent = thisIndent
self.fs.write(" <ul>\n")
inUl += 1
elif(thisIndent < ulIndent and inUl > 1):
inUl -= 1
self.fs.write(" </ul>\n")
data = self.EscAndLinks(m.group('data'))
self.fs.write(" <li>{content}</li>\n".format(content=data))
continue
elif(inUl):
while(inUl > 0):
inUl -= 1
self.fs.write(" </ul>\n")
if(RE_EMPTY_LINE.search(l)):
self.fs.write(" <br>\n")
# Normal Write
line = self.EscAndLinks(l)
self.fs.write(" " + line + "\n")
if(inUl):
inUl -= 1
self.fs.write(" </ul>\n")
pass
def StartBody(self):
self.fs.write(" <body>\n")
data = GetHeadingData(self.style, self.file)
if(data):
self.fs.write(data)
def EndBody(self):
data = GetFootingData(self.style, self.file)
if(data):
self.fs.write(data)
self.fs.write(" </body>\n")
def InsertScripts(self,file):
self.InsertJs(GetHighlightJs())
self.fs.write("<script>hljs.initHighlightingOnLoad();</script>\n")
self.InsertJs(GetCollapsibleCode())
def Postamble(self):
self.fs.write("<div id=\"postamble\" class=\"status\">")
if(self.date):
self.fs.write("<p class=\"date\">Date: {date}</p>".format(date=self.date))
if(self.author):
self.fs.write("<p class=\"author\">Author: {author}</p>".format(author=self.author))
self.fs.write("<p class=\"date\">Created: {date}</p>".format(date=str(datetime.datetime.now())))
self.fs.write("</div>")
def FinishDocCustom(self):
self.Postamble()
self.fs.write("</html>\n")
class HtmlExportHelper(exp.OrgExportHelper):
def __init__(self,view,index):
super(HtmlExportHelper,self).__init__(view,index)
def CustomBuildHead(self):
highlight = exp.GetGlobalOption(self.file,"HTML_HIGHLIGHT","HtmlHighlight","zenburn").lower()
self.doc.AddInlineStyle(GetHighlightJsCss(highlight))
self.doc.AddInlineStyle(GetCollapsibleCss())
self.doc.AddInlineStyle(GetStyleData(self.doc.style, self.file))
# Export the entire file using our internal exporter
class OrgExportFileOrgHtmlCommand(sublime_plugin.TextCommand):
def OnDoneSourceBlockExecution(self):
# Reload if necessary
self.file = db.Get().FindInfo(self.view)
doc = None
self.style = exp.GetGlobalOption(self.file,"HTML_STYLE","HtmlStyle","blocky").lower()
log.log(51,"EXPORT STYLE: " + self.style)
try:
outputFilename = exp.ExportFilename(self.view,".html", self.suffix)
doc = HtmlDoc(outputFilename, self.file)
doc.style = self.style
self.helper = HtmlExportHelper(self.view, self.index)
self.helper.Run(outputFilename, doc)
finally:
evt.EmitIf(self.onDone)
def run(self,edit, onDone=None, index=None, suffix=""):
self.file = db.Get().FindInfo(self.view)
self.onDone = onDone
self.suffix = suffix
if(index != None):
self.index = index
else:
self.index = None
if(None == self.file):
log.error("Not an org file? Cannot build reveal document")
evt.EmitIf(onDone)
return
if(sets.Get("htmlExecuteSourceOnExport",True)):
self.view.run_command('org_execute_all_source_blocks',{"onDone":evt.Make(self.OnDoneSourceBlockExecution),"amExporting": True})
else:
self.OnDoneSourceBlockExecution()
def sync_up_on_closed():
notice.Get().BuildToday()
class OrgDownloadHighlighJs(sublime_plugin.TextCommand):
def run(self,edit):
log.info("Trying to download highlightjs")
import OrgExtended.orgutil.webpull as wp
wp.download_highlightjs()
class OrgExportSubtreeAsOrgHtmlCommand(sublime_plugin.TextCommand):
def OnDone(self):
evt.EmitIf(self.onDone)
def run(self,edit,onDone=None):
self.onDone = onDone
n = db.Get().AtInView(self.view)
if(n == None):
log.error(" Failed to find node! Subtree cannot be exported!")
return
index = 0
for i in range(0,len(n.env._nodes)):
if(n == n.env._nodes[i]):
index = i
if(index == 0):
log.error(" Failed to find node in file! Something is wrong. Cannot export subtree!")
return
self.view.run_command('org_export_file_org_html', {"onDone": evt.Make(self.OnDone), "index": index, "suffix":"_subtree"})
| 33.180556
| 188
| 0.612576
| 14,358
| 0.667783
| 0
| 0
| 0
| 0
| 0
| 0
| 6,740
| 0.313474
|
b887416d23a942756c48311820dd05ec2e0e80d6
| 3,519
|
py
|
Python
|
qso_toolbox/LBT_MODS_script.py
|
jtschindler/qso_toolbox
|
d9864e0f87e0da3952b75949a7b17ae84ba7b839
|
[
"MIT"
] | null | null | null |
qso_toolbox/LBT_MODS_script.py
|
jtschindler/qso_toolbox
|
d9864e0f87e0da3952b75949a7b17ae84ba7b839
|
[
"MIT"
] | null | null | null |
qso_toolbox/LBT_MODS_script.py
|
jtschindler/qso_toolbox
|
d9864e0f87e0da3952b75949a7b17ae84ba7b839
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from qso_toolbox import utils as ut
from qso_toolbox import catalog_tools as ct
targets = pd.read_csv('/Users/schindler/Observations/LBT/MODS/190607-190615/lukas_efficiency_candidates.csv')
offsets = pd.read_csv('')
# query = 'rMeanPSFMag - rMeanApMag < 0.05 and 10 < zMeanPSFMag < 18'
# offsets = ct.get_offset_stars(targets, 'name', 'ps_ra', 'ps_dec', radius=300,
# quality_query=query)
#
# offsets.to_csv('lukas_offsets.csv', index=False)
os.system('modsProject -p LBTB PS1-QSO-LBTMODS')
os.chdir('./PS1-QSO-LBTMODS')
# Create observation and acquisition scripts
coord_list = ut.coord_to_hmsdms(targets['ps_ra'], targets['ps_dec'])
for idx in targets.index:
target_name = targets.loc[idx,'name']
target_mag = targets.loc[idx, 'zmag_AB']
target_priority = targets.loc[idx, 'priority']
# pos_angle =
if target_mag <= 20:
exp_time = 900
else:
exp_time = 1200
make_obs_string = "mkMODSObs -o {} -m red grating -s LS5x60x1.2 -l 1.2 " \
"-rfilter GG495 -e {} -n 1 {}_pr{}".format(target_name,
exp_time,
target_name,
target_priority)
print(make_obs_string)
os.system(make_obs_string)
target_ra_hms = coord_list[idx][0]
target_dec_dms = coord_list[idx][1]
make_acq_string = "mkMODSAcq -o {} -c '{} {}' -g '{} {}' -p {} -m " \
"longslit -a Red -f z_sdss -s LS5x60x1.2 -l 1.2 {}_pr{}".format(target_name,
target_ra_hms,
target_dec_dms,
target_ra_hms,
target_dec_dms,
pos_angle,
target_name,
target_priority)
print(make_acq_string)
os.system(make_acq_string)
# Create the blind offset acquistion scripts
for idx in targets.index:
target_name = targets.loc[idx,'name']
target_priority = targets.loc[idx, 'priority']
acq_filename = '{}_pr{}.acq'.format(target_name, target_priority)
blind_acq_filename = '{}_pr{}_blind.acq'.format(target_name,
target_priority)
target_offsets = offsets.query('target_name=="{}"'.format(target_name))
if target_offsets.shape[0] > 0 :
# Take first offset
dra = target_offsets.loc[target_offsets.index[0], 'dra_offset']
ddec = target_offsets.loc[target_offsets.index[0], 'ddec_offset']
file = open('./{}'.format(acq_filename), 'r')
file_lines = file.readlines()[:-2]
file.close()
new_acq_file = open('./{}'.format(blind_acq_filename), 'w')
for line in file_lines:
new_acq_file.write(line)
new_acq_file.write(" PAUSE\n")
new_acq_file.write(" syncoffset\n")
new_acq_file.write(" PAUSE\n")
new_acq_file.write(" OFFSET {} {} rel\n".format(dra, ddec))
new_acq_file.write(" UPDATEPOINTING\n")
new_acq_file.write(" SlitGO\n")
new_acq_file.write(" PAUSE\n")
new_acq_file.write("\n")
new_acq_file.write("end\n")
new_acq_file.close()
| 37.43617
| 109
| 0.545041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 974
| 0.276783
|
b8876be3ac0b9f4743f3b55d348997ace9a6d95d
| 1,451
|
py
|
Python
|
EASTAR/main/migrations/0008_auto_20191005_0012.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | 1
|
2020-09-21T16:46:19.000Z
|
2020-09-21T16:46:19.000Z
|
EASTAR/main/migrations/0008_auto_20191005_0012.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | null | null | null |
EASTAR/main/migrations/0008_auto_20191005_0012.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-04 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20191005_0011'),
]
operations = [
migrations.AddField(
model_name='technologies',
name='textAddEN',
field=models.TextField(default=None, verbose_name='Дополнительный текст EN'),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='textAddRU',
field=models.TextField(default=None, verbose_name='Дополнительный текст RU'),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='textAddUZ',
field=models.TextField(default=None, verbose_name='Дополнительный текст UZ'),
preserve_default=False,
),
migrations.AlterField(
model_name='technologies',
name='textEN',
field=models.TextField(verbose_name='Описание EN'),
),
migrations.AlterField(
model_name='technologies',
name='textRU',
field=models.TextField(verbose_name='Описание RU'),
),
migrations.AlterField(
model_name='technologies',
name='textUZ',
field=models.TextField(verbose_name='Описание UZ'),
),
]
| 30.87234
| 89
| 0.583046
| 1,439
| 0.939295
| 0
| 0
| 0
| 0
| 0
| 0
| 414
| 0.270235
|
b887c62ca86e34d3408f8bf9208020ecb0064fd5
| 152
|
py
|
Python
|
archived-stock-trading-bot-v1/utils/alerts.py
|
Allcallofduty10/stock-trading-bot
|
54e608b3c0b95b87e7753b065307fc23a045e230
|
[
"MIT"
] | 101
|
2020-05-20T02:17:45.000Z
|
2022-03-31T12:22:09.000Z
|
archived-stock-trading-bot-v1/utils/alerts.py
|
Allcallofduty10/stock-trading-bot
|
54e608b3c0b95b87e7753b065307fc23a045e230
|
[
"MIT"
] | 10
|
2020-09-02T14:55:12.000Z
|
2022-02-21T08:50:48.000Z
|
archived-stock-trading-bot-v1/utils/alerts.py
|
Allcallofduty10/stock-trading-bot
|
54e608b3c0b95b87e7753b065307fc23a045e230
|
[
"MIT"
] | 33
|
2021-02-13T15:38:51.000Z
|
2022-03-21T10:39:15.000Z
|
import os
from sys import platform
def say_beep(n: int):
for i in range(0, n):
if platform == "darwin":
os.system("say beep")
| 16.888889
| 33
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.118421
|
b88943d6ad79d038afe5d44eee2909d9f74948cd
| 1,685
|
py
|
Python
|
tests/test_company_apis.py
|
elaoshi/my_planet_flask_api_backend_with_mongo
|
7795034a14783a15772fae649c4f2c918b4b36f0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_company_apis.py
|
elaoshi/my_planet_flask_api_backend_with_mongo
|
7795034a14783a15772fae649c4f2c918b4b36f0
|
[
"Apache-2.0"
] | 3
|
2020-04-02T23:48:46.000Z
|
2021-06-10T22:43:22.000Z
|
tests/test_company_apis.py
|
elaoshi/my_planet_flask_api_backend_with_mongo
|
7795034a14783a15772fae649c4f2c918b4b36f0
|
[
"Apache-2.0"
] | null | null | null |
from starlette.testclient import TestClient
import pytest,os
# from server.app import app
import json
import requests
from faker import Faker
fake = Faker()
# The root url of the flask app
url = 'http://127.0.0.1:5000/employee'
@pytest.mark.skip(reason="long time running")
def test_company_api_by_name_not_exists():
for i in range(1,40):
fake = Faker()
campany_name = fake.name()
surl = url+"/company_name/" + campany_name
r = requests.get(surl)
assert r.status_code == 400
def test_company_api_by_name_lowercase():
campany_name = "ZENTRY"
surl = url+"/company_name/" + campany_name
r = requests.get(surl)
assert r.status_code == 200
assert len(r.json()['data']) > 0
def test_company_api_by_name_normal():
campany_name = "zentry"
surl = url+"/company_name/" + campany_name
r = requests.get(surl)
assert r.status_code == 200
assert len(r.json()['data']) > 0
def test_company_by_id_successful():
campany_id = "18"
surl = url+"/company_id/" + campany_id
r = requests.get(surl)
assert r.status_code == 200
assert len(r.json()['data']) > 0
def test_company_api_by_name_not_exists2():
campany_name = "-1"
surl = url+"/company_name/" + campany_name
r = requests.get(surl)
assert r.status_code == 400
def test_company_by_id_not_exists1():
# for i in range(1,10):
fake = Faker()
campany_id = "%s"%(fake.random_int(2999,9999))
# campany_id = "18"
surl = url+"/company_id/" + campany_id
r = requests.get(surl)
assert r.status_code == 204
# assert len(r.json()['data']) > 0
| 23.402778
| 54
| 0.636202
| 0
| 0
| 0
| 0
| 291
| 0.1727
| 0
| 0
| 324
| 0.192285
|
b889e8215d671ac9152cd6ccc561184f07b5f430
| 9,491
|
py
|
Python
|
doc2json/grobid2json/grobid/grobid_client.py
|
josephcc/s2orc-doc2json
|
8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d
|
[
"Apache-2.0"
] | null | null | null |
doc2json/grobid2json/grobid/grobid_client.py
|
josephcc/s2orc-doc2json
|
8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d
|
[
"Apache-2.0"
] | null | null | null |
doc2json/grobid2json/grobid/grobid_client.py
|
josephcc/s2orc-doc2json
|
8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d
|
[
"Apache-2.0"
] | null | null | null |
import os
import io
import json
import argparse
import time
import glob
from doc2json.grobid2json.grobid.client import ApiClient
import ntpath
from typing import List
'''
This version uses the standard ProcessPoolExecutor for parallelizing the concurrent calls to the GROBID services.
Given the limits of ThreadPoolExecutor (input stored in memory, blocking Executor.map until the whole input
is acquired), it works with batches of PDF of a size indicated in the config.json file (default is 1000 entries).
We are moving from first batch to the second one only when the first is entirely processed - which means it is
slightly sub-optimal, but should scale better. However acquiring a list of million of files in directories would
require something scalable too, which is not implemented for the moment.
'''
SERVER = 'localhost'
if 'GROBID_URL' in os.environ:
SERVER = os.environ['GROBID_URL']
DEFAULT_GROBID_CONFIG = {
"grobid_server": SERVER,
"grobid_port": "8070",
"batch_size": 1000,
"sleep_time": 5,
"generateIDs": False,
"consolidate_header": False,
"consolidate_citations": False,
"include_raw_citations": True,
"segment_sentences": True,
"include_coordinates": ['s', 'bib', 'biblStruct', 'ref'],
"include_raw_affiliations": False,
"max_workers": 2,
}
class GrobidClient(ApiClient):
def __init__(self, config=None):
self.config = config or DEFAULT_GROBID_CONFIG
self.generate_ids = self.config["generateIDs"]
self.consolidate_header = self.config["consolidate_header"]
self.consolidate_citations = self.config["consolidate_citations"]
self.include_raw_citations = self.config["include_raw_citations"]
self.include_raw_affiliations = self.config["include_raw_affiliations"]
self.include_coordinates = self.config["include_coordinates"]
self.segment_sentences = self.config["segment_sentences"]
self.max_workers = self.config["max_workers"]
self.grobid_server = self.config["grobid_server"]
self.grobid_port = self.config["grobid_port"]
self.sleep_time = self.config["sleep_time"]
def process(self, input: str, output: str, service: str):
batch_size_pdf = self.config['batch_size']
pdf_files = []
for pdf_file in glob.glob(input + "/*.pdf"):
pdf_files.append(pdf_file)
if len(pdf_files) == batch_size_pdf:
self.process_batch(pdf_files, output, service)
pdf_files = []
# last batch
if len(pdf_files) > 0:
self.process_batch(pdf_files, output, service)
def process_batch(self, pdf_files: List[str], output: str, service: str) -> None:
print(len(pdf_files), "PDF files to process")
for pdf_file in pdf_files:
self.process_pdf(pdf_file, output, service)
def process_pdf_stream(self, pdf_file: str, pdf_strm: bytes, output: str, service: str) -> str:
# process the stream
files = {
'input': (
pdf_file,
pdf_strm,
'application/pdf',
{'Expires': '0'}
)
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/" + service
# set the GROBID parameters
the_data = {}
if self.generate_ids:
the_data['generateIDs'] = '1'
else:
the_data['generateIDs'] = '0'
if self.consolidate_header:
the_data['consolidateHeader'] = '1'
else:
the_data['consolidateHeader'] = '0'
if self.consolidate_citations:
the_data['consolidateCitations'] = '1'
else:
the_data['consolidateCitations'] = '0'
if self.include_raw_affiliations:
the_data['includeRawAffiliations'] = '1'
else:
the_data['includeRawAffiliations'] = '0'
if self.include_raw_citations:
the_data['includeRawCitations'] = '1'
else:
the_data['includeRawCitations'] = '0'
if self.segment_sentences:
the_data['segmentSentences'] = '1'
else:
the_data['segmentSentences'] = '0'
if self.segment_sentences:
the_data['segmentSentences'] = '1'
else:
the_data['segmentSentences'] = '0'
if self.include_coordinates:
the_data['teiCoordinates'] = self.include_coordinates
res, status = self.post(
url=the_url,
files=files,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_pdf_stream(pdf_file, pdf_strm, service)
elif status != 200:
with open(os.path.join(output, "failed.log"), "a+") as failed:
failed.write(pdf_file.strip(".pdf") + "\n")
print('Processing failed with error ' + str(status))
return ""
else:
return res.text
def process_pdf(self, pdf_file: str, output: str, service: str) -> None:
# check if TEI file is already produced
# we use ntpath here to be sure it will work on Windows too
pdf_file_name = ntpath.basename(pdf_file)
filename = os.path.join(output, os.path.splitext(pdf_file_name)[0] + '.tei.xml')
if os.path.isfile(filename):
return
print(pdf_file)
pdf_strm = open(pdf_file, 'rb').read()
tei_text = self.process_pdf_stream(pdf_file, pdf_strm, output, service)
# writing TEI file
if tei_text:
with io.open(filename, 'w+', encoding='utf8') as tei_file:
tei_file.write(tei_text)
def process_citation(self, bib_string: str, log_file: str) -> str:
# process citation raw string and return corresponding dict
the_data = {
'citations': bib_string,
'consolidateCitations': '0'
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processCitation"
for _ in range(5):
try:
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
continue
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- BIBSTR --\n")
failed.write(bib_string + "\n\n")
break
else:
return res.text
except Exception:
continue
def process_header_names(self, header_string: str, log_file: str) -> str:
# process author names from header string
the_data = {
'names': header_string
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processHeaderNames"
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_header_names(header_string, log_file)
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- AUTHOR --\n")
failed.write(header_string + "\n\n")
else:
return res.text
def process_affiliations(self, aff_string: str, log_file: str) -> str:
# process affiliation from input string
the_data = {
'affiliations': aff_string
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processAffiliations"
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_affiliations(aff_string, log_file)
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- AFFILIATION --\n")
failed.write(aff_string + "\n\n")
else:
return res.text
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Client for GROBID services")
parser.add_argument("service", help="one of [processFulltextDocument, processHeaderDocument, processReferences]")
parser.add_argument("--input", default=None, help="path to the directory containing PDF to process")
parser.add_argument("--output", default=None, help="path to the directory where to put the results")
parser.add_argument("--config", default=None, help="path to the config file, default is ./config.json")
args = parser.parse_args()
input_path = args.input
config = json.load(open(args.config)) if args.config else DEFAULT_GROBID_CONFIG
output_path = args.output
service = args.service
client = GrobidClient(config=config)
start_time = time.time()
client.process(input_path, output_path, service)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
| 35.02214
| 117
| 0.596249
| 7,207
| 0.759351
| 0
| 0
| 0
| 0
| 0
| 0
| 2,587
| 0.272574
|