body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def exitClient(self):
'Teardown the client.'
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0)
print(os.remove(((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT))) | 4,262,132,436,711,552,500 | Teardown the client. | Task2/Client_dev.py | exitClient | Aiemu/CourseCN-Proj-RTP | python | def exitClient(self):
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0)
print(os.remove(((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT))) |
def pauseMovie(self):
'Pause movie.'
if (self.state == self.PLAYING):
self.sendRtspRequest(self.PAUSE) | 3,415,863,770,049,874,000 | Pause movie. | Task2/Client_dev.py | pauseMovie | Aiemu/CourseCN-Proj-RTP | python | def pauseMovie(self):
if (self.state == self.PLAYING):
self.sendRtspRequest(self.PAUSE) |
def playMovie(self):
'Play movie.'
if (self.state == self.READY):
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY) | 4,207,836,993,191,038,000 | Play movie. | Task2/Client_dev.py | playMovie | Aiemu/CourseCN-Proj-RTP | python | def playMovie(self):
if (self.state == self.READY):
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY) |
def listenRtp(self):
'Listen for RTP packets.'
while 1:
try:
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb+')
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
... | 7,159,088,159,690,293,000 | Listen for RTP packets. | Task2/Client_dev.py | listenRtp | Aiemu/CourseCN-Proj-RTP | python | def listenRtp(self):
while 1:
try:
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb+')
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()... |
def writeFrame(self):
'Write the received frame to a temp image file. Return the image file.'
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb')
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename | 7,812,888,565,929,767,000 | Write the received frame to a temp image file. Return the image file. | Task2/Client_dev.py | writeFrame | Aiemu/CourseCN-Proj-RTP | python | def writeFrame(self):
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb')
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename |
def updateMovie(self, imageFile):
'Update the image file as video frame in the GUI.'
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True) | -2,300,834,710,126,000,000 | Update the image file as video frame in the GUI. | Task2/Client_dev.py | updateMovie | Aiemu/CourseCN-Proj-RTP | python | def updateMovie(self, imageFile):
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True) |
def connectToServer(self):
'Connect to the Server. Start a new RTSP/TCP session.'
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
messagebox.showwarning('Connection Failed', ("Connection to '%s' ... | 4,777,276,545,653,517,000 | Connect to the Server. Start a new RTSP/TCP session. | Task2/Client_dev.py | connectToServer | Aiemu/CourseCN-Proj-RTP | python | def connectToServer(self):
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
messagebox.showwarning('Connection Failed', ("Connection to '%s' failed." % self.serverAddr)) |
def sendRtspRequest(self, requestCode):
'Send RTSP request to the server.'
if ((requestCode == self.SETUP) and (self.state == self.INIT)):
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq += 1
request = ((((('SETUP ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtsp... | 320,747,659,495,692,300 | Send RTSP request to the server. | Task2/Client_dev.py | sendRtspRequest | Aiemu/CourseCN-Proj-RTP | python | def sendRtspRequest(self, requestCode):
if ((requestCode == self.SETUP) and (self.state == self.INIT)):
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq += 1
request = ((((('SETUP ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nTransport: RTP/UDP; cli... |
def recvRtspReply(self):
'Receive RTSP reply from the server.'
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode('utf-8'))
if (self.requestSent == self.TEARDOWN):
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.r... | 5,389,725,507,352,486,000 | Receive RTSP reply from the server. | Task2/Client_dev.py | recvRtspReply | Aiemu/CourseCN-Proj-RTP | python | def recvRtspReply(self):
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode('utf-8'))
if (self.requestSent == self.TEARDOWN):
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break |
def parseRtspReply(self, data):
'Parse the RTSP reply from the server.'
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
if (seqNum == self.rtspSeq):
session = int(lines[2].split(' ')[1])
if (self.sessionId == 0):
self.sessionId = session
if (self.se... | 1,693,217,859,073,406,700 | Parse the RTSP reply from the server. | Task2/Client_dev.py | parseRtspReply | Aiemu/CourseCN-Proj-RTP | python | def parseRtspReply(self, data):
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
if (seqNum == self.rtspSeq):
session = int(lines[2].split(' ')[1])
if (self.sessionId == 0):
self.sessionId = session
if (self.sessionId == session):
if (in... |
def openRtpPort(self):
'Open RTP socket binded to a specified port.'
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind(('', self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', ('Unable to bind PORT=%d... | -8,856,207,915,541,562,000 | Open RTP socket binded to a specified port. | Task2/Client_dev.py | openRtpPort | Aiemu/CourseCN-Proj-RTP | python | def openRtpPort(self):
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind((, self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', ('Unable to bind PORT=%d' % self.rtpPort)) |
def handler(self):
'Handler on explicitly closing the GUI window.'
self.pauseMovie()
if messagebox.askokcancel('Quit?', 'Are you sure you want to quit?'):
self.exitClient()
else:
self.playMovie() | 704,200,587,336,194,600 | Handler on explicitly closing the GUI window. | Task2/Client_dev.py | handler | Aiemu/CourseCN-Proj-RTP | python | def handler(self):
self.pauseMovie()
if messagebox.askokcancel('Quit?', 'Are you sure you want to quit?'):
self.exitClient()
else:
self.playMovie() |
def test_requirements_from_source_info(tmpdir):
'Test the code path used by the exporter'
common.makeproject(tmpdir, 'test-project', deps=[('mod1', '')], imports=['mod1'])
project_dir = os.path.join(tmpdir, 'test-project')
libs_dir = os.path.join(project_dir, 'libs')
common.makemodule(libs_dir, 'mod... | -679,296,438,620,197,900 | Test the code path used by the exporter | tests/moduletool/test_python_dependencies.py | test_requirements_from_source_info | inmanta/inmanta-core | python | def test_requirements_from_source_info(tmpdir):
common.makeproject(tmpdir, 'test-project', deps=[('mod1', )], imports=['mod1'])
project_dir = os.path.join(tmpdir, 'test-project')
libs_dir = os.path.join(project_dir, 'libs')
common.makemodule(libs_dir, 'mod1', project=False)
mod1 = os.path.join(... |
@app.task
def execute_command(command):
'airflow worker 执行shell命令 .'
log = LoggingMixin().log
log.info('Executing command in Celery: %s', command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT, close_fds=True, env=env)
except subprocess.... | 5,545,134,767,227,514,000 | airflow worker 执行shell命令 . | airflow/executors/celery_executor.py | execute_command | fengzhongzhu1621/XAirflow | python | @app.task
def execute_command(command):
log = LoggingMixin().log
log.info('Executing command in Celery: %s', command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT, close_fds=True, env=env)
except subprocess.CalledProcessError as e:
... |
def get_object(self, queryset=None):
"\n If the status of the entry is not PUBLISHED,\n a preview is requested, so we check if the user\n has the 'zinnia.can_view_all' permission or if\n it's an author of the entry.\n "
obj = super(EntryPreviewMixin, self).get_object(queryset)... | -6,498,698,108,583,524,000 | If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry. | zinnia/views/mixins/entry_preview.py | get_object | Admoroux/django-blog-zinnia | python | def get_object(self, queryset=None):
"\n If the status of the entry is not PUBLISHED,\n a preview is requested, so we check if the user\n has the 'zinnia.can_view_all' permission or if\n it's an author of the entry.\n "
obj = super(EntryPreviewMixin, self).get_object(queryset)... |
def find_reachable_vertices(g: Graph, sources: set) -> set:
'\n Returns the set of vertices of a graph which are reachable\n from a set of source vertices.\n Args:\n g: Graph, an instance of `Graph`\n sources: set, a set of integers representing the source vertices\n Returns:\n The ... | 5,854,856,957,845,311,000 | Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices | pybgl/prune_incidence_automaton.py | find_reachable_vertices | nokia/PyBGL | python | def find_reachable_vertices(g: Graph, sources: set) -> set:
'\n Returns the set of vertices of a graph which are reachable\n from a set of source vertices.\n Args:\n g: Graph, an instance of `Graph`\n sources: set, a set of integers representing the source vertices\n Returns:\n The ... |
def prune_incidence_automaton(g: IncidenceAutomaton):
'\n Prunes the vertices of an IncidenceAutomaton that cannot be reached\n from the intial state, or that cannot reach a final state.\n Args:\n g: IncidenceAutomaton, an instance of IncidenceAutomaton\n '
to_keep = find_reachable_vertices(g... | -5,881,924,977,987,115,000 | Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton | pybgl/prune_incidence_automaton.py | prune_incidence_automaton | nokia/PyBGL | python | def prune_incidence_automaton(g: IncidenceAutomaton):
'\n Prunes the vertices of an IncidenceAutomaton that cannot be reached\n from the intial state, or that cannot reach a final state.\n Args:\n g: IncidenceAutomaton, an instance of IncidenceAutomaton\n '
to_keep = find_reachable_vertices(g... |
def example_fn_build_report(report, pvarray):
'Example function that builds a report when used in the\n :py:class:`~pvfactors.engine.PVEngine` with full mode simulations.\n Here it will be a dictionary with lists of calculated values.\n\n Parameters\n ----------\n report : dict\n Initially ``N... | 8,642,434,767,632,493,000 | Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
... | pvfactors/report.py | example_fn_build_report | tcapelle/pvfactors | python | def example_fn_build_report(report, pvarray):
'Example function that builds a report when used in the\n :py:class:`~pvfactors.engine.PVEngine` with full mode simulations.\n Here it will be a dictionary with lists of calculated values.\n\n Parameters\n ----------\n report : dict\n Initially ``N... |
@staticmethod
def build(report, pvarray):
"Method that will build the simulation report. Here we're using the\n previously defined\n :py:function:`~pvfactors.report.example_fn_build_report`.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be ... | 4,803,367,268,110,759,000 | Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation val... | pvfactors/report.py | build | tcapelle/pvfactors | python | @staticmethod
def build(report, pvarray):
"Method that will build the simulation report. Here we're using the\n previously defined\n :py:function:`~pvfactors.report.example_fn_build_report`.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be ... |
@staticmethod
def merge(reports):
'Method used to merge multiple reports together. Here it simply\n concatenates the lists of values saved in the different reports.\n\n Parameters\n ----------\n reports : list of dict\n List of reports that need to be concatenated together\n\n... | 5,527,112,909,777,533,000 | Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values | pvfactors/report.py | merge | tcapelle/pvfactors | python | @staticmethod
def merge(reports):
'Method used to merge multiple reports together. Here it simply\n concatenates the lists of values saved in the different reports.\n\n Parameters\n ----------\n reports : list of dict\n List of reports that need to be concatenated together\n\n... |
def uniform_mix_C(mixing_ratio, num_classes):
'\n returns a linear interpolation of a uniform matrix and an identity matrix\n '
return ((mixing_ratio * np.full((num_classes, num_classes), (1 / num_classes))) + ((1 - mixing_ratio) * np.eye(num_classes))) | 523,720,610,122,376,450 | returns a linear interpolation of a uniform matrix and an identity matrix | dataloader.py | uniform_mix_C | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def uniform_mix_C(mixing_ratio, num_classes):
'\n \n '
return ((mixing_ratio * np.full((num_classes, num_classes), (1 / num_classes))) + ((1 - mixing_ratio) * np.eye(num_classes))) |
def flip_labels_C(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(n... | 1,236,321,925,871,866,400 | returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row | dataloader.py | flip_labels_C | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def flip_labels_C(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(n... |
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.aran... | 3,365,218,847,267,169,300 | returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row | dataloader.py | flip_labels_C_two | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.aran... |
def loadbasis(cmd: str, dtype: torch.dtype=_dtype, device: torch.device=_device, requires_grad: bool=False) -> List[CGTOBasis]:
'\n Load basis from a file and return the list of CGTOBasis.\n\n Arguments\n ---------\n cmd: str\n This can be a file path where the basis is stored or a\n strin... | 2,860,282,354,001,892,400 | Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
... | dqc/api/loadbasis.py | loadbasis | Jaikinator/dqc | python | def loadbasis(cmd: str, dtype: torch.dtype=_dtype, device: torch.device=_device, requires_grad: bool=False) -> List[CGTOBasis]:
'\n Load basis from a file and return the list of CGTOBasis.\n\n Arguments\n ---------\n cmd: str\n This can be a file path where the basis is stored or a\n strin... |
@contextlib.contextmanager
def stored(self, key):
'\n\t\tThis is a convenience tool to make plugin storage easier.\n\t\t'
value = self[key]
try:
(yield value)
finally:
self[key] = value | 2,452,644,208,444,902,400 | This is a convenience tool to make plugin storage easier. | plugins/otp/otp.py | stored | hosom/jarvis | python | @contextlib.contextmanager
def stored(self, key):
'\n\t\t\n\t\t'
value = self[key]
try:
(yield value)
finally:
self[key] = value |
def build_qrcode(self, user, url):
'Internal method used to build the QRCode image for token provisioning.'
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png') | -683,330,556,530,843,500 | Internal method used to build the QRCode image for token provisioning. | plugins/otp/otp.py | build_qrcode | hosom/jarvis | python | def build_qrcode(self, user, url):
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png') |
def get_identity(self, message):
'Wrapper to make sure the correct identity object is used.'
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person | 3,442,578,752,912,167,000 | Wrapper to make sure the correct identity object is used. | plugins/otp/otp.py | get_identity | hosom/jarvis | python | def get_identity(self, message):
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person |
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'\n\t\tWARNING: This command removes ALL OTP entries.\n\t\t'
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.' | 3,795,642,453,982,059,500 | WARNING: This command removes ALL OTP entries. | plugins/otp/otp.py | otp_delete_all | hosom/jarvis | python | @botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'\n\t\t\n\t\t'
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.' |
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'\n\t\tAdd a command to OTP command filtering.\n\t\t'
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd) | -6,308,578,379,057,276,000 | Add a command to OTP command filtering. | plugins/otp/otp.py | otp_add_command | hosom/jarvis | python | @arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'\n\t\t\n\t\t'
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd) |
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'\n\t\tRemove a command from OTP command filtering.\n\t\t'
with self.lock:
with self.stored('commands') as commands:
if (cmd not in commands):
return ... | 6,625,291,410,303,574,000 | Remove a command from OTP command filtering. | plugins/otp/otp.py | otp_remove_command | hosom/jarvis | python | @arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'\n\t\t\n\t\t'
with self.lock:
with self.stored('commands') as commands:
if (cmd not in commands):
return dict(err=True, command=cmd)
comm... |
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'\n\t\tList the commands that are filtered by OTP.\n\t\t'
return dict(commands=self['commands']) | -200,299,054,489,281,120 | List the commands that are filtered by OTP. | plugins/otp/otp.py | otp_commands | hosom/jarvis | python | @botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'\n\t\t\n\t\t'
return dict(commands=self['commands']) |
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'\n\t\tSend a new secret for a user.\n\t\t'
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BA... | -9,044,136,800,075,633,000 | Send a new secret for a user. | plugins/otp/otp.py | otp_secret_create | hosom/jarvis | python | @arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'\n\t\t\n\t\t'
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOT... |
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'\n\t\tAuthenticate with OTP to the bot to pass OTP filtering.\n\t\t'
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if (identity not in self['secrets']):
ret... | 9,054,655,397,197,392,000 | Authenticate with OTP to the bot to pass OTP filtering. | plugins/otp/otp.py | otp_auth | hosom/jarvis | python | @arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'\n\t\t\n\t\t'
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if (identity not in self['secrets']):
return dict(not_enrolled=True)
(secret, attempts, _) =... |
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
'\n\t\tFilter commands to determine if user has recently validated with OTP.\n\t\t'
with self.lock:
if (command in self['commands']):
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity =... | -7,518,729,145,550,202,000 | Filter commands to determine if user has recently validated with OTP. | plugins/otp/otp.py | otp_filter | hosom/jarvis | python | @cmdfilter
def otp_filter(self, message, command, args, dry_run):
'\n\t\t\n\t\t'
with self.lock:
if (command in self['commands']):
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
... |
def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
'\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (`tf.Tensor`): o... | -4,769,499,439,953,555,000 | Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor. | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
'\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (`tf.Tensor`): o... |
def _prune_heads(self, heads_to_prune):
'\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n '
raise NotImplementedError | -6,215,471,936,727,332,000 | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel | src/transformers/models/convbert/modeling_tf_convbert.py | _prune_heads | AK391/transformers | python | def _prune_heads(self, heads_to_prune):
'\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n '
raise NotImplementedError |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_ty... | -8,487,996,097,200,511,000 | labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the to... | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_ty... |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None... | -9,053,624,724,062,643,000 | labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss ... | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None... |
@property
def dummy_inputs(self):
'\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n '
return {'input_ids': tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)} | 4,374,154,624,472,328,700 | Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs | src/transformers/models/convbert/modeling_tf_convbert.py | dummy_inputs | AK391/transformers | python | @property
def dummy_inputs(self):
'\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n '
return {'input_ids': tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)} |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, atten... | -3,477,998,566,155,918,000 | labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, atten... |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, t... | 6,359,083,734,458,689,000 | labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, t... |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=... | -8,155,518,814,180,526,000 | start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account ... | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=... |
def pLK0(self):
'\n Default LK Params.\n '
return dict(winSize=(12, 6), maxLevel=4, crit=((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT), 100, 0.03), flags=0, minEigThreshold=0.001) | 2,156,167,278,745,075,700 | Default LK Params. | core/track.py | pLK0 | yycho0108/MoRoL | python | def pLK0(self):
'\n \n '
return dict(winSize=(12, 6), maxLevel=4, crit=((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT), 100, 0.03), flags=0, minEigThreshold=0.001) |
def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False):
'\n Arguments:\n img1(np.ndarray) : previous image. (color/mono) (HxWx?)\n img2(np.ndarray) : current image (color/mono) (HxWx?)\n pt1(np.ndarray) : previous points. (Mx2)\n pt2(np.ndarra... | 8,712,494,715,450,819,000 | Arguments:
img1(np.ndarray) : previous image. (color/mono) (HxWx?)
img2(np.ndarray) : current image (color/mono) (HxWx?)
pt1(np.ndarray) : previous points. (Mx2)
pt2(np.ndarray) : [Optional] current points estimate (Mx2)
thresh(float) : Flow Back-projection Error threshold
Returns:
pt2(np.... | core/track.py | __call__ | yycho0108/MoRoL | python | def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False):
'\n Arguments:\n img1(np.ndarray) : previous image. (color/mono) (HxWx?)\n img2(np.ndarray) : current image (color/mono) (HxWx?)\n pt1(np.ndarray) : previous points. (Mx2)\n pt2(np.ndarra... |
def read_fasta(filename):
'Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.\n first element in tuple is header and second the sequence.\n \n Key Arguments:\n filename -- fasta file.\n '
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fast... | -592,529,539,078,873,700 | Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file. | pridcon/utils.py | read_fasta | Mirindi95/PrIDcon | python | def read_fasta(filename):
'Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.\n first element in tuple is header and second the sequence.\n \n Key Arguments:\n filename -- fasta file.\n '
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fast... |
def write_fasta(outfile, seq_dict):
'Writes fasta with dictionary where keys are headers and values sequences.\n \n Key Arguments:\n outfile.\n '
step = 70
with open(outfile, 'w') as file:
for (header, sequence) in seq_dict.items():
sequence_list = [sequence[(i - step):i]... | -5,630,814,507,906,850,000 | Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile. | pridcon/utils.py | write_fasta | Mirindi95/PrIDcon | python | def write_fasta(outfile, seq_dict):
'Writes fasta with dictionary where keys are headers and values sequences.\n \n Key Arguments:\n outfile.\n '
step = 70
with open(outfile, 'w') as file:
for (header, sequence) in seq_dict.items():
sequence_list = [sequence[(i - step):i]... |
def reads_generator(fasta_file, read_length, k):
'This function simulates the reads generation from a fasta file with a coverage not less than 50.\n It will return a list of tuples. First element in tuple is read ID and second the sequence.\n \n Key Arguments:\n fasta_file -- fasta file.\n re... | 1,295,099,355,066,453,500 | This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads. | pridcon/utils.py | reads_generator | Mirindi95/PrIDcon | python | def reads_generator(fasta_file, read_length, k):
'This function simulates the reads generation from a fasta file with a coverage not less than 50.\n It will return a list of tuples. First element in tuple is read ID and second the sequence.\n \n Key Arguments:\n fasta_file -- fasta file.\n re... |
def write_fastq(reads_list, filename):
'This function created a FASTQ file from a list of read generated by the reads_generator function.\n Key Arguments:\n reads_list -- list of reads generated with reads_generator.\n filename -- name of output file WITH EXTENSION.\n '
with open(filename, '... | -2,214,286,735,490,284,500 | This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION. | pridcon/utils.py | write_fastq | Mirindi95/PrIDcon | python | def write_fastq(reads_list, filename):
'This function created a FASTQ file from a list of read generated by the reads_generator function.\n Key Arguments:\n reads_list -- list of reads generated with reads_generator.\n filename -- name of output file WITH EXTENSION.\n '
with open(filename, '... |
def read_fastq(filename):
'This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.\n This function does not consider + and score lines.\n \n Key Arguments:\n filename -- name of FASTQ input file.\n '
reads_dict = dict()
with open(fil... | 1,161,349,585,305,516,800 | This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file. | pridcon/utils.py | read_fastq | Mirindi95/PrIDcon | python | def read_fastq(filename):
'This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.\n This function does not consider + and score lines.\n \n Key Arguments:\n filename -- name of FASTQ input file.\n '
reads_dict = dict()
with open(fil... |
def create_user(self, email, password=None, **extra_fields):
'Creates and saves a new user'
if (not email):
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
retur... | -4,414,797,265,921,968,600 | Creates and saves a new user | app/core/models.py | create_user | StoikovOleh/recipe-app-api | python | def create_user(self, email, password=None, **extra_fields):
if (not email):
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user |
def create_superuser(self, email, password):
'Creates and saves a new super user'
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user | 2,904,805,345,650,944,500 | Creates and saves a new super user | app/core/models.py | create_superuser | StoikovOleh/recipe-app-api | python | def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user |
def toDF(self, schema=None, sampleRatio=None):
"\n Converts current :class:`RDD` into a :class:`DataFrame`\n\n This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``\n\n :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns\n :param sampl... | 1,476,514,188,411,419,600 | Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().... | python/pyspark/sql/session.py | toDF | DislabNJU/Spark | python | def toDF(self, schema=None, sampleRatio=None):
"\n Converts current :class:`RDD` into a :class:`DataFrame`\n\n This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``\n\n :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns\n :param sampl... |
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
'Creates a new SparkSession.\n\n >>> from datetime import datetime\n >>> spark = SparkSession(sc)\n >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,\n ... b=True, list=[1, 2, 3], dict={"s": 0}... | -449,384,611,703,641,860 | Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView(... | python/pyspark/sql/session.py | __init__ | DislabNJU/Spark | python | @ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
'Creates a new SparkSession.\n\n >>> from datetime import datetime\n >>> spark = SparkSession(sc)\n >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,\n ... b=True, list=[1, 2, 3], dict={"s": 0}... |
@since(2.0)
def newSession(self):
'\n Returns a new SparkSession as new session, that has separate SQLConf,\n registered temporary views and UDFs, but shared SparkContext and\n table cache.\n '
return self.__class__(self._sc, self._jsparkSession.newSession()) | 4,860,885,721,390,664,000 | Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache. | python/pyspark/sql/session.py | newSession | DislabNJU/Spark | python | @since(2.0)
def newSession(self):
'\n Returns a new SparkSession as new session, that has separate SQLConf,\n registered temporary views and UDFs, but shared SparkContext and\n table cache.\n '
return self.__class__(self._sc, self._jsparkSession.newSession()) |
@property
@since(2.0)
def sparkContext(self):
'Returns the underlying :class:`SparkContext`.'
return self._sc | 3,306,938,129,485,477,000 | Returns the underlying :class:`SparkContext`. | python/pyspark/sql/session.py | sparkContext | DislabNJU/Spark | python | @property
@since(2.0)
def sparkContext(self):
return self._sc |
@property
@since(2.0)
def version(self):
'The version of Spark on which this application is running.'
return self._jsparkSession.version() | 838,769,964,761,334,300 | The version of Spark on which this application is running. | python/pyspark/sql/session.py | version | DislabNJU/Spark | python | @property
@since(2.0)
def version(self):
return self._jsparkSession.version() |
@property
@since(2.0)
def conf(self):
'Runtime configuration interface for Spark.\n\n This is the interface through which the user can get and set all Spark and Hadoop\n configurations that are relevant to Spark SQL. When getting the value of a config,\n this defaults to the value set in the un... | -7,057,829,792,864,211,000 | Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any. | python/pyspark/sql/session.py | conf | DislabNJU/Spark | python | @property
@since(2.0)
def conf(self):
'Runtime configuration interface for Spark.\n\n This is the interface through which the user can get and set all Spark and Hadoop\n configurations that are relevant to Spark SQL. When getting the value of a config,\n this defaults to the value set in the un... |
@property
@since(2.0)
def catalog(self):
'Interface through which the user may create, drop, alter or query underlying\n databases, tables, functions etc.\n '
if (not hasattr(self, '_catalog')):
self._catalog = Catalog(self)
return self._catalog | 711,370,211,427,092,000 | Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc. | python/pyspark/sql/session.py | catalog | DislabNJU/Spark | python | @property
@since(2.0)
def catalog(self):
'Interface through which the user may create, drop, alter or query underlying\n databases, tables, functions etc.\n '
if (not hasattr(self, '_catalog')):
self._catalog = Catalog(self)
return self._catalog |
@property
@since(2.0)
def udf(self):
'Returns a :class:`UDFRegistration` for UDF registration.\n\n :return: :class:`UDFRegistration`\n '
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped) | 3,305,879,536,619,469,300 | Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration` | python/pyspark/sql/session.py | udf | DislabNJU/Spark | python | @property
@since(2.0)
def udf(self):
'Returns a :class:`UDFRegistration` for UDF registration.\n\n :return: :class:`UDFRegistration`\n '
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped) |
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
'\n Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named\n ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with\n step value ``step``.\n\n :param sta... | 3,370,571,786,270,893,600 | Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPa... | python/pyspark/sql/session.py | range | DislabNJU/Spark | python | @since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
'\n Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named\n ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with\n step value ``step``.\n\n :param sta... |
def _inferSchemaFromList(self, data):
'\n Infer schema from list of Row or tuple.\n\n :param data: list of Row or tuple\n :return: :class:`pyspark.sql.types.StructType`\n '
if (not data):
raise ValueError('can not infer schema from empty dataset')
first = data[0]
if (... | 800,975,137,581,594,200 | Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType` | python/pyspark/sql/session.py | _inferSchemaFromList | DislabNJU/Spark | python | def _inferSchemaFromList(self, data):
'\n Infer schema from list of Row or tuple.\n\n :param data: list of Row or tuple\n :return: :class:`pyspark.sql.types.StructType`\n '
if (not data):
raise ValueError('can not infer schema from empty dataset')
first = data[0]
if (... |
def _inferSchema(self, rdd, samplingRatio=None):
'\n Infer schema from an RDD of Row or tuple.\n\n :param rdd: an RDD of Row or tuple\n :param samplingRatio: sampling ratio, or no sampling (default)\n :return: :class:`pyspark.sql.types.StructType`\n '
first = rdd.first()
i... | -2,158,780,547,185,956,600 | Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType` | python/pyspark/sql/session.py | _inferSchema | DislabNJU/Spark | python | def _inferSchema(self, rdd, samplingRatio=None):
'\n Infer schema from an RDD of Row or tuple.\n\n :param rdd: an RDD of Row or tuple\n :param samplingRatio: sampling ratio, or no sampling (default)\n :return: :class:`pyspark.sql.types.StructType`\n '
first = rdd.first()
i... |
def _createFromRDD(self, rdd, schema, samplingRatio):
'\n Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.\n '
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struc... | -1,601,657,814,421,343,200 | Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. | python/pyspark/sql/session.py | _createFromRDD | DislabNJU/Spark | python | def _createFromRDD(self, rdd, schema, samplingRatio):
'\n \n '
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tupl... |
def _createFromLocal(self, data, schema):
'\n Create an RDD for DataFrame from a list or pandas.DataFrame, returns\n the RDD and schema.\n '
if (not isinstance(data, list)):
data = list(data)
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._infe... | -773,945,324,360,580,500 | Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema. | python/pyspark/sql/session.py | _createFromLocal | DislabNJU/Spark | python | def _createFromLocal(self, data, schema):
'\n Create an RDD for DataFrame from a list or pandas.DataFrame, returns\n the RDD and schema.\n '
if (not isinstance(data, list)):
data = list(data)
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._infe... |
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
'\n Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.\n\n When ``schema`` is a list of column names, the type of each column\n will be ... | -4,672,668,649,996,320,000 | Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :c... | python/pyspark/sql/session.py | createDataFrame | DislabNJU/Spark | python | @since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
'\n Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.\n\n When ``schema`` is a list of column names, the type of each column\n will be ... |
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
'Returns a :class:`DataFrame` representing the result of the given query.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")\n >>> ... | 6,139,502,417,837,409,000 | Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] | python/pyspark/sql/session.py | sql | DislabNJU/Spark | python | @ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
'Returns a :class:`DataFrame` representing the result of the given query.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")\n >>> ... |
@since(2.0)
def table(self, tableName):
'Returns the specified table as a :class:`DataFrame`.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.table("table1")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n '
re... | -1,685,094,057,636,237,000 | Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True | python/pyspark/sql/session.py | table | DislabNJU/Spark | python | @since(2.0)
def table(self, tableName):
'Returns the specified table as a :class:`DataFrame`.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.table("table1")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n '
re... |
@property
@since(2.0)
def read(self):
'\n Returns a :class:`DataFrameReader` that can be used to read data\n in as a :class:`DataFrame`.\n\n :return: :class:`DataFrameReader`\n '
return DataFrameReader(self._wrapped) | 8,627,199,859,067,963,000 | Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader` | python/pyspark/sql/session.py | read | DislabNJU/Spark | python | @property
@since(2.0)
def read(self):
'\n Returns a :class:`DataFrameReader` that can be used to read data\n in as a :class:`DataFrame`.\n\n :return: :class:`DataFrameReader`\n '
return DataFrameReader(self._wrapped) |
@property
@since(2.0)
def readStream(self):
'\n Returns a :class:`DataStreamReader` that can be used to read data streams\n as a streaming :class:`DataFrame`.\n\n .. note:: Experimental.\n\n :return: :class:`DataStreamReader`\n '
return DataStreamReader(self._wrapped) | 7,571,571,327,525,079,000 | Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader` | python/pyspark/sql/session.py | readStream | DislabNJU/Spark | python | @property
@since(2.0)
def readStream(self):
'\n Returns a :class:`DataStreamReader` that can be used to read data streams\n as a streaming :class:`DataFrame`.\n\n .. note:: Experimental.\n\n :return: :class:`DataStreamReader`\n '
return DataStreamReader(self._wrapped) |
@property
@since(2.0)
def streams(self):
'Returns a :class:`StreamingQueryManager` that allows managing all the\n :class:`StreamingQuery` StreamingQueries active on `this` context.\n\n .. note:: Experimental.\n\n :return: :class:`StreamingQueryManager`\n '
from pyspark.sql.streaming ... | -486,802,360,053,188,400 | Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager` | python/pyspark/sql/session.py | streams | DislabNJU/Spark | python | @property
@since(2.0)
def streams(self):
'Returns a :class:`StreamingQueryManager` that allows managing all the\n :class:`StreamingQuery` StreamingQueries active on `this` context.\n\n .. note:: Experimental.\n\n :return: :class:`StreamingQueryManager`\n '
from pyspark.sql.streaming ... |
@since(2.0)
def stop(self):
'Stop the underlying :class:`SparkContext`.\n '
self._sc.stop()
SparkSession._instantiatedSession = None | -528,874,284,068,885,300 | Stop the underlying :class:`SparkContext`. | python/pyspark/sql/session.py | stop | DislabNJU/Spark | python | @since(2.0)
def stop(self):
'\n '
self._sc.stop()
SparkSession._instantiatedSession = None |
@since(2.0)
def __enter__(self):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n "
return self | 6,458,408,526,494,362,000 | Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. | python/pyspark/sql/session.py | __enter__ | DislabNJU/Spark | python | @since(2.0)
def __enter__(self):
"\n \n "
return self |
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n\n Specifically stop the SparkSession on exit of the with block.\n "
self.stop() | -7,856,880,423,868,570,000 | Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block. | python/pyspark/sql/session.py | __exit__ | DislabNJU/Spark | python | @since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n\n Specifically stop the SparkSession on exit of the with block.\n "
self.stop() |
@since(2.0)
def config(self, key=None, value=None, conf=None):
'Sets a config option. Options set using this method are automatically propagated to\n both :class:`SparkConf` and :class:`SparkSession`\'s own configuration.\n\n For an existing SparkConf, use `conf` parameter.\n\n >>> ... | 6,164,548,905,806,063,000 | Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
... | python/pyspark/sql/session.py | config | DislabNJU/Spark | python | @since(2.0)
def config(self, key=None, value=None, conf=None):
'Sets a config option. Options set using this method are automatically propagated to\n both :class:`SparkConf` and :class:`SparkSession`\'s own configuration.\n\n For an existing SparkConf, use `conf` parameter.\n\n >>> ... |
@since(2.0)
def master(self, master):
'Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"\n to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone\n cluster.\n\n :param master: a url for spark master\n '
ret... | 7,944,548,636,390,787,000 | Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master | python/pyspark/sql/session.py | master | DislabNJU/Spark | python | @since(2.0)
def master(self, master):
'Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"\n to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone\n cluster.\n\n :param master: a url for spark master\n '
ret... |
@since(2.0)
def appName(self, name):
'Sets a name for the application, which will be shown in the Spark web UI.\n\n If no application name is set, a randomly generated name will be used.\n\n :param name: an application name\n '
return self.config('spark.app.name', name) | -3,828,958,710,499,067,400 | Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name | python/pyspark/sql/session.py | appName | DislabNJU/Spark | python | @since(2.0)
def appName(self, name):
'Sets a name for the application, which will be shown in the Spark web UI.\n\n If no application name is set, a randomly generated name will be used.\n\n :param name: an application name\n '
return self.config('spark.app.name', name) |
@since(2.0)
def enableHiveSupport(self):
'Enables Hive support, including connectivity to a persistent Hive metastore, support\n for Hive serdes, and Hive user-defined functions.\n '
return self.config('spark.sql.catalogImplementation', 'hive') | -6,293,888,213,969,502,000 | Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions. | python/pyspark/sql/session.py | enableHiveSupport | DislabNJU/Spark | python | @since(2.0)
def enableHiveSupport(self):
'Enables Hive support, including connectivity to a persistent Hive metastore, support\n for Hive serdes, and Hive user-defined functions.\n '
return self.config('spark.sql.catalogImplementation', 'hive') |
@since(2.0)
def getOrCreate(self):
'Gets an existing :class:`SparkSession` or, if there is no existing one, creates a\n new one based on the options set in this builder.\n\n This method first checks whether there is a valid global default SparkSession, and if\n yes, return that one.... | 4,076,772,902,933,181,400 | Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkS... | python/pyspark/sql/session.py | getOrCreate | DislabNJU/Spark | python | @since(2.0)
def getOrCreate(self):
'Gets an existing :class:`SparkSession` or, if there is no existing one, creates a\n new one based on the options set in this builder.\n\n This method first checks whether there is a valid global default SparkSession, and if\n yes, return that one.... |
def add_author_to_blog(apps, schema_editor):
'Author is the claimant'
Blog = apps.get_model('lowfat', 'Blog')
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save() | -5,402,210,210,942,466,000 | Author is the claimant | lowfat/migrations/0090_auto_20170307_1518.py | add_author_to_blog | elena-kolomeets/lowfat | python | def add_author_to_blog(apps, schema_editor):
Blog = apps.get_model('lowfat', 'Blog')
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save() |
def partition(predicate: Callable[([Any], bool)], iterator: Sequence[Any]) -> Tuple[(List[Any], List[Any])]:
'A stable, out-of-place partition.'
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
return (results[1], results[0]) | 8,080,160,133,644,472,000 | A stable, out-of-place partition. | bin/fixup_oslogin_v1_keywords.py | partition | fahmi-aa/tmdb | python | def partition(predicate: Callable[([Any], bool)], iterator: Sequence[Any]) -> Tuple[(List[Any], List[Any])]:
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
return (results[1], results[0]) |
def fix_files(in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=osloginCallTransformer()):
'Duplicate the input dir to the output dir, fixing file method calls.\n\n Preconditions:\n * in_dir is a real directory\n * out_dir is a real, empty directory\n '
pyfile_gen = (pathlib.Path(os.path.j... | 3,131,379,621,797,957,600 | Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory | bin/fixup_oslogin_v1_keywords.py | fix_files | fahmi-aa/tmdb | python | def fix_files(in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=osloginCallTransformer()):
'Duplicate the input dir to the output dir, fixing file method calls.\n\n Preconditions:\n * in_dir is a real directory\n * out_dir is a real, empty directory\n '
pyfile_gen = (pathlib.Path(os.path.j... |
def recalc_path(self, model_inst):
'计算上传路径,允许是function'
try:
uSettings = self.upload_settings
if ('filePathFormat' in self._upload_settings):
uSettings['filePathFormat'] = calc_path(self._upload_settings['filePathFormat'], model_inst)
if ('imagePathFormat' in self._upload_set... | 215,576,986,300,150,700 | 计算上传路径,允许是function | DjangoUeditor/widgets.py | recalc_path | Jeyrce/ishare | python | def recalc_path(self, model_inst):
try:
uSettings = self.upload_settings
if ('filePathFormat' in self._upload_settings):
uSettings['filePathFormat'] = calc_path(self._upload_settings['filePathFormat'], model_inst)
if ('imagePathFormat' in self._upload_settings):
... |
def get_file_terms(file):
'Ruturns a list of text blocks.'
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms if term_text.startswith('[Term]')]
return file_terms | 1,029,190,457,889,887,900 | Ruturns a list of text blocks. | scripts/proteinInteractionEBI/parse_ebi_test.py | get_file_terms | pradh/data | python | def get_file_terms(file):
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms if term_text.startswith('[Term]')]
return file_terms |
def test_get_id_maps(self):
"Test function get_id_maps. Note that id_to_node here doesn't have parent_child\n relation, so only map keys are tested."
(id_to_class_name, id_to_node) = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqua... | 7,544,115,261,596,032,000 | Test function get_id_maps. Note that id_to_node here doesn't have parent_child
relation, so only map keys are tested. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_get_id_maps | pradh/data | python | def test_get_id_maps(self):
"Test function get_id_maps. Note that id_to_node here doesn't have parent_child\n relation, so only map keys are tested."
(id_to_class_name, id_to_node) = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqua... |
def test_build_child_parent_link(self):
'Test function build_child_parent_link by checking the values of\n child_list and parent_list.'
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(nod... | -4,787,189,117,368,866,000 | Test function build_child_parent_link by checking the values of
child_list and parent_list. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_build_child_parent_link | pradh/data | python | def test_build_child_parent_link(self):
'Test function build_child_parent_link by checking the values of\n child_list and parent_list.'
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(nod... |
def test_TreeBuilder(self):
'Test TreeBuilder class.'
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET) | -7,194,737,580,420,136,000 | Test TreeBuilder class. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_TreeBuilder | pradh/data | python | def test_TreeBuilder(self):
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET) |
def test_get_schema_from_text(self):
'Test function get_schema_from_text by comparing the final schema.'
new_source_map = {'references': {}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SE... | -3,037,779,471,115,059,700 | Test function get_schema_from_text by comparing the final schema. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_get_schema_from_text | pradh/data | python | def test_get_schema_from_text(self):
new_source_map = {'references': {}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA1)... |
def isValidBST(self, root):
'\n :type root: TreeNode\n :rtype: bool\n '
MAX = sys.maxint
MIN = ((- sys.maxint) - 1)
return self.isValidBSTHelper(root, MIN, MAX) | -8,699,976,859,105,511,000 | :type root: TreeNode
:rtype: bool | leetcode.com/python/98_Validate_Binary_Search_Tree.py | isValidBST | Ajaykumar98/Algorithms | python | def isValidBST(self, root):
'\n :type root: TreeNode\n :rtype: bool\n '
MAX = sys.maxint
MIN = ((- sys.maxint) - 1)
return self.isValidBSTHelper(root, MIN, MAX) |
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
'\n Compute the linear model response to an input array sampled at given time\n instances.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n u : array_like\n The real-valu... | 1,990,838,259,651,487,500 | Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as t... | harold/_time_domain.py | simulate_linear_system | TavaresFilipe/harold | python | def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
'\n Compute the linear model response to an input array sampled at given time\n instances.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n u : array_like\n The real-valu... |
def simulate_step_response(sys, t=None):
'\n Compute the linear model response to an Heaviside function (or all-ones\n array) sampled at given time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys ... | -5,924,860,459,075,212,000 | Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real... | harold/_time_domain.py | simulate_step_response | TavaresFilipe/harold | python | def simulate_step_response(sys, t=None):
'\n Compute the linear model response to an Heaviside function (or all-ones\n array) sampled at given time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys ... |
def simulate_impulse_response(sys, t=None):
'\n Compute the linear model response to an Dirac delta pulse (or all-zeros\n array except the first sample being 1/dt at each channel) sampled at given\n time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the pole... | 4,456,269,683,030,728,000 | Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The syste... | harold/_time_domain.py | simulate_impulse_response | TavaresFilipe/harold | python | def simulate_impulse_response(sys, t=None):
'\n Compute the linear model response to an Dirac delta pulse (or all-zeros\n array except the first sample being 1/dt at each channel) sampled at given\n time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the pole... |
def _compute_tfinal_and_dt(sys, is_step=True):
'\n Helper function to estimate a final time and a sampling period for\n time domain simulations. It is essentially geared towards impulse response\n but is also used for step responses.\n\n For discrete-time models, obviously dt is inherent and only tfinal... | 3,334,180,236,767,385,600 | Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The syste... | harold/_time_domain.py | _compute_tfinal_and_dt | TavaresFilipe/harold | python | def _compute_tfinal_and_dt(sys, is_step=True):
'\n Helper function to estimate a final time and a sampling period for\n time domain simulations. It is essentially geared towards impulse response\n but is also used for step responses.\n\n For discrete-time models, obviously dt is inherent and only tfinal... |
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
'\n Helper function to validate the input arguments for simulate_linear_system\n '
if (t is None):
if (not isdiscrete):
raise ValueError('Continuous time models need an evenly spaced time sequence from which the sampling perio... | 6,866,641,658,556,078,000 | Helper function to validate the input arguments for simulate_linear_system | harold/_time_domain.py | _check_u_and_t_for_simulation | TavaresFilipe/harold | python | def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
'\n \n '
if (t is None):
if (not isdiscrete):
raise ValueError('Continuous time models need an evenly spaced time sequence from which the sampling period will be obtained.')
else:
u_samples = len(u)
... |
def _check_custom_time_input(t):
'\n Helper function for simple and rather expensive checks for sanity\n '
t = atleast_1d(t)
if (t.ndim > 1):
t = squeeze(t)
if (t.ndim > 1):
raise ValueError('Time array should be a 1D array but has {} nontrivial dimensions'.format(t.ndim))
... | -6,231,156,308,016,383,000 | Helper function for simple and rather expensive checks for sanity | harold/_time_domain.py | _check_custom_time_input | TavaresFilipe/harold | python | def _check_custom_time_input(t):
'\n \n '
t = atleast_1d(t)
if (t.ndim > 1):
t = squeeze(t)
if (t.ndim > 1):
raise ValueError('Time array should be a 1D array but has {} nontrivial dimensions'.format(t.ndim))
if (t.size < 2):
raise ValueError('Time array should ... |
def unet(inputI, output_channel):
'3D U-net'
phase_flag = 1
concat_dim = 4
conv1_1 = conv3d(input=inputI, output_chn=64, kernel_size=3, stride=1, use_bias=False, name='conv1')
conv1_bn = tf.contrib.layers.batch_norm(conv1_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training... | 6,890,914,431,589,737,000 | 3D U-net | src/models.py | unet | JohnleeHIT/Brats2019 | python | def unet(inputI, output_channel):
phase_flag = 1
concat_dim = 4
conv1_1 = conv3d(input=inputI, output_chn=64, kernel_size=3, stride=1, use_bias=False, name='conv1')
conv1_bn = tf.contrib.layers.batch_norm(conv1_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_fla... |
def extractReMonsterWiki(item):
"\n\tParser for 'Re:Monster Wiki'\n\t"
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType... | 2,093,858,153,736,959,000 | Parser for 'Re:Monster Wiki' | WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py | extractReMonsterWiki | fake-name/ReadableWebProxy | python | def extractReMonsterWiki(item):
"\n\t\n\t"
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, fr... |
def update_fields(module, p):
'This updates the module field names\n to match the field names tower-cli expects to make\n calling of the modify/delete methods easier.\n '
params = p.copy()
field_map = {'fact_caching_enabled': 'use_fact_cache', 'ask_diff_mode': 'ask_diff_mode_on_launch', 'ask_extra_... | -303,015,818,293,956,200 | This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier. | awx_collection/plugins/modules/tower_job_template.py | update_fields | activelan/awx | python | def update_fields(module, p):
'This updates the module field names\n to match the field names tower-cli expects to make\n calling of the modify/delete methods easier.\n '
params = p.copy()
field_map = {'fact_caching_enabled': 'use_fact_cache', 'ask_diff_mode': 'ask_diff_mode_on_launch', 'ask_extra_... |
@command
def echo(bot, mask, target, args):
'Echo command\n\n %%echo <words>...\n '
(yield ' '.join(args['<words>'])) | 1,500,019,934,740,953,900 | Echo command
%%echo <words>... | examples/mycommands.py | echo | gawel/irc3 | python | @command
def echo(bot, mask, target, args):
'Echo command\n\n %%echo <words>...\n '
(yield ' '.join(args['<words>'])) |
@command(permission='admin', public=False)
def adduser(bot, mask, target, args):
'Add a user\n\n %%adduser <name> <password>\n '
bot.privmsg(mask.nick, 'User added') | 3,192,555,016,350,542,000 | Add a user
%%adduser <name> <password> | examples/mycommands.py | adduser | gawel/irc3 | python | @command(permission='admin', public=False)
def adduser(bot, mask, target, args):
'Add a user\n\n %%adduser <name> <password>\n '
bot.privmsg(mask.nick, 'User added') |
@command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
"Do something you don't want in !help all the time\n\n %%my_secret_operation\n "
(yield 'I like turtles') | -5,074,065,158,678,942,000 | Do something you don't want in !help all the time
%%my_secret_operation | examples/mycommands.py | my_secret_operation | gawel/irc3 | python | @command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
"Do something you don't want in !help all the time\n\n %%my_secret_operation\n "
(yield 'I like turtles') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.