blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2fd1b907e6eff215b937433a3f361834b3dd96ec | a355b16b9b4cebdd39beb69a6c5aa4e175ae52f6 | /phytosanitary/urls/links.py | 8d16c92f08f546895ad6e4779cd0a8695434b8ee | [] | no_license | hypertexthero/Phytosanitary | e2ba31116b432a8623b332e53a390ff31c24fc10 | 4f001436c90de7a64649e82089e577af6981b793 | refs/heads/master | 2016-09-05T09:47:01.448846 | 2012-11-28T16:34:03 | 2012-11-28T16:34:03 | 3,460,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | from django.conf.urls.defaults import *
from phytosanitary.models import Link
link_info_dict = {
'queryset': Link.objects.all(),
'date_field': 'pub_date',
}
urlpatterns = patterns('django.views.generic.date_based',
(r'^$', 'archive_index', link_info_dict, 'phytosanitary_link_archive_index'),
(r'^(?P<year>\d{4})/$', 'archive_year', link_info_dict, 'phytosanitary_link_archive_year'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', 'archive_month', link_info_dict, 'phytosanitary_link_archive_month'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', 'archive_day', link_info_dict, 'phytosanitary_link_archive_day'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', 'object_detail', link_info_dict, 'phytosanitary_link_detail'),
) | [
"simon@hypertexthero.com"
] | simon@hypertexthero.com |
48be6df478a910b9d3c13cffb98277f3082c0fe2 | d41aa512f8ad7a28121121cf96f2286abc5391c3 | /scrape_argos/settings.py | e1d3d896b509fcae2e0caab6b9ec23a69f38a0a7 | [
"MIT"
] | permissive | andyregan/scrape_argos | 8b1757819b013bbdb0d0c67ee6b205455aff5ea7 | a3cb44f29173cb4b64e8d73204aecfb40b9edfd9 | refs/heads/master | 2021-01-01T06:50:54.760280 | 2013-05-11T10:08:43 | 2013-05-11T10:08:43 | 9,894,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # Scrapy settings for scrape_argos project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
BOT_NAME = 'scrape_argos'
SPIDER_MODULES = ['scrape_argos.spiders']
NEWSPIDER_MODULE = 'scrape_argos.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrape_argos (+http://www.yourdomain.com)'
| [
"andrewjregan@gmail.com"
] | andrewjregan@gmail.com |
3160ede5e603262448964d8dc9e3a89b58592466 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Depth-first Search/111.Minimum Depth of Binary Tree.py | 28976c05b41b56e4880a2b5192eea9b5868c08e4 | [] | no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | # coding=utf-8
# Definition for singly-linked list.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
解题思路:
递归
分几种情况考虑:
1,树为空,则为0。
2,根节点如果只存在左子树或者只存在右子树,则返回值应为左子树或者右子树的(最小深度+1)。
3,如果根节点的左子树和右子树都存在,则返回值为(左右子树的最小深度的较小值+1)。
'''
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
if root.left == None and root.right != None:
return self.minDepth( root.right ) + 1
if root.left != None and root.right == None:
return self.minDepth( root.left ) + 1
return min( self.minDepth( root.left ), self.minDepth( root.right ) ) + 1
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
if __name__ == "__main__":
mnode = ListNode(3)
mnode.next = ListNode(5)
mnode.next.next = ListNode(6)
mnode.next.next.next = ListNode(7)
mnode.next.next.next.next = ListNode(8)
result = Solution().rotateRight(mnode, 6)
print(result.val)
| [
"Evan123mg@gmail.com"
] | Evan123mg@gmail.com |
64cbbf12cccecdd79098ee784933598a826d5869 | b9f7c7a87292c1a9c231ce89933ae9d4bc51f487 | /src/sst/elements/simpleElementExample/tests/basicStatistics0.py | 3ea5c138cfc1b3558768044804877e0a4e49d5e9 | [
"BSD-3-Clause"
] | permissive | sstsimulator/sst-elements | 3a8db475a7a6cbd4c2a5d737c32718752da9797a | 68cdb3ac843750705805653b3fdcd4b015e84089 | refs/heads/master | 2023-08-17T03:30:24.145168 | 2023-08-16T13:58:07 | 2023-08-16T13:58:07 | 43,475,440 | 85 | 145 | NOASSERTION | 2023-09-12T13:59:11 | 2015-10-01T02:57:18 | C++ | UTF-8 | Python | false | false | 3,054 | py | # Import the SST module
import sst
# The basicStatisticsX.py scripts demonstrate user-side configuration of statistics.
# Each one focuses on a different aspect of user-side configuration
#
# This example demonstrates:
# 1. Default output behavior (reporting statistics at the end of simulation)
# 2. Various output formats for statistics
#
# This component has no links and SST will produce a warning because that is an unusual configuration
# that often points to a mis-configuration. For this simulation, the warning can be ignored.
#
# Relevant code:
# simpleElementExample/basicStatistics.h
# simpleElementExample/basicStatistics.cc
# simpleElementExample/basicEvent.h
#
# Output:
# simpleElementExample/tests/refFiles/basicStatistics0.out
# simpleElementExample/tests/refFiles/basicStatistics0.csv
#
### Create two components (to compare different components' output in the CSV file)
component0 = sst.Component("StatisticComponent0", "simpleElementExample.basicStatistics")
component1 = sst.Component("StatisticComponent1", "simpleElementExample.basicStatistics")
### Parameterize the components.
# Run 'sst-info simpleElementExample.basicStatistics' at the command line
# to see parameter documentation
params0 = {
"marsagliaZ" : 438, # Seed for Marsaglia RNG
"marsagliaW" : 9375794, # Seed for Marsaglia RNG
"mersenne" : 102485, # Seed for Mersenne RNG
"run_cycles" : 1000, # Number of cycles to run for
"subids" : 3 # Number of SUBID_statistic instances
}
component0.addParams(params0)
params1 = {
"marsagliaZ" : 957537, # Seed for Marsaglia RNG
"marsagliaW" : 5857, # Seed for Marsaglia RNG
"mersenne" : 860, # Seed for Mersenne RNG
"run_cycles" : 1200, # Number of cycles to run for
"subids" : 6 # Number of SUBID_statistic instances
}
component1.addParams(params1)
### Enable statistics
## Limit the verbosity of statistics to any with a load level from 0-4
# This component's statistics range from 1-4 (see sst-info)
sst.setStatisticLoadLevel(4)
## Determine where statistics should be sent. By default this script uses CSV, other options are
# commented out below. Output locations are case-insensitive (e.g., statOutputCSV = statoutputcsv).
# Default: Output to CSV. Filename and separator can be specified
sst.setStatisticOutput("sst.statOutputCSV", { "filepath" : "./basicStatistics0.csv", "separator" : "," } )
# Option: Output to the terminal
#sst.setStatisticOutput("sst.statoutputconsole")
# Option: Output to a text file
#sst.setStatisticOutput("sst.statOutputTXT", { "filepath" : "./basicStatistics0.txt" } )
# Option: Output to HDF5. Requires sst-core to be configured with HDF5 library.
#sst.setStatisticOutput("sst.statoutputhd5f")
# Option: Output to JSON
#sst.setStatisticOutput("sst.statOutputJSON", { "filepath" : "./basicStatistics0.json" } )
## Enable statistics on the components
sst.enableAllStatisticsForComponentType("simpleElementExample.basicStatistics")
| [
"grvosku@sandia.gov"
] | grvosku@sandia.gov |
fc4e2f70fdb42770a7c8e6dd0beb93b61e367911 | 1c7ac6a675fa16e7c7d85b90a02eaddbadf80738 | /skulpt/python/binaire_alarme2.py | 6250021f1bdda748c16d8f42c4f4e05dd8be832c | [] | no_license | mistert14/mistert-skulpt | e19c432264fd532e90fdfcc06e6b5d1d9cac7936 | 7a5990e03466a1889922ad3c1e4b0e736cca569f | refs/heads/master | 2020-09-24T13:26:44.632719 | 2014-05-19T19:37:42 | 2014-05-19T19:37:42 | 40,827,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import canvas, math, math2
canvas.clear_timers()
#variables generales
bits = {'A': 0, 'B': 0, 'C': 0 }
out = '0'
#fonction de calcul de la sortie
def process(val):
global bits, out
parser = math2.RpnMathParser(val, bits);
out = parser.get_result()
if str(out) == '0.0':
out = '0'
else:
out = '1'
#gestion des cases a cocher
def chk(id,value):
global bits
if value:
bits[id] = 1
else:
bits[id] = 0
process(inp.get_text())
#gestion de l'equation
def check(val):
process(val)
#creation de l'interface
canvas.add_checkbox("chkA","A",chk,20)
canvas.add_checkbox("chkB","B",chk,20)
canvas.add_checkbox("chkC","C",chk,20)
inp = canvas.add_input("equ:",check,200)
inp.set_text("A & (not(B) | not(C))")
process(inp.get_text())
def color(value):
if value == '1':
return 'Yellow'
else:
return 'White'
#ceci est la fonction qui dessine l'ecran toutes les 17 millisecondes
def draw():
global bits, out
canvas.fill_rect(0,0,500,500)
left = 80
top = 30
canvas.draw_line((0,30+top),(500,30+top),4,'Blue')
cl2 = 'Yellow'
canvas.draw_text("ENTREES",(left-30,25+top),24,cl2)
canvas.draw_text("SORTIES",(left+250,25+top),24,cl2)
canvas.draw_circle((left, 50+top), 10, 2, 'Blue', color(str(bits['A'])))
canvas.draw_circle((left, 73+top), 10, 2, 'Blue', color(str(bits['B'])))
canvas.draw_circle((left, 96+top), 10, 2, 'Blue', color(str(bits['C'])))
canvas.draw_circle((left+170, 50+top), 10, 2, 'Blue', color(out))
canvas.draw_text("A: "+str(bits['A']),(left+15,58+top),24,cl2)
canvas.draw_text("B: "+str(bits['B']),(left+15,80+top),24,cl2)
canvas.draw_text("C: "+str(bits['C']),(left+15,102+top),24,cl2)
canvas.draw_text(inp.get_text(),(left+185,58+top),24,cl2)
"""
A FAIRE:
Dessiner avec draw_circle et draw_line les interrupteurs
et les faire basculer et eteindre quand les entrees changent
"""
#appel de la temporisation de dessin de l'ecran
t = canvas.create_timer(17,draw)
t.start()
| [
"mrtseb@gmail.com"
] | mrtseb@gmail.com |
41240f0a6dd4ad176be9cc6e8b9eb2e11cf60e08 | e7b0547134291e4707f9760d4c4ce7bf678b2e7a | /block.py | 6017f3d3de2325fadaecaf921e1a4ba28c148138 | [] | no_license | ThisLiftIsGoingDown/Garden-Railway-Control | 154c51c321360c5289077ed10ff3a80b55b2210e | f66ff0f7750400151e688cab23d1316a4c2909f0 | refs/heads/main | 2023-07-15T07:41:59.640067 | 2021-08-24T07:52:42 | 2021-08-24T07:52:42 | 373,764,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from enum import Enum
class State(Enum):
vacant = 0
occupied = 1
outOfService = 2
class Block:
def __init__(self, node , startState = State.vacant):
self.node = node
self.state = startState
def checkState(self):
return self.state
def updateState(self, newState):
self.state = newState
| [
"david.bartsch@bluewin.ch"
] | david.bartsch@bluewin.ch |
1ae71121fe67533c75e20874fc8ff41f033c1d67 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.16/_downloads/plot_artifacts_detection.py | 86f915a1f8213e207c582dae54ccbc31f59c58bd | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 5,773 | py | """
Introduction to artifacts and artifact detection
================================================
Since MNE supports the data of many different acquisition systems, the
particular artifacts in your data might behave very differently from the
artifacts you can observe in our tutorials and examples.
Therefore you should be aware of the different approaches and of
the variability of artifact rejection (automatic/manual) procedures described
onwards. At the end consider always to visually inspect your data
after artifact rejection or correction.
Background: what is an artifact?
--------------------------------
Artifacts are signal interference that can be
endogenous (biological) and exogenous (environmental).
Typical biological artifacts are head movements, eye blinks
or eye movements, heart beats. The most common environmental
artifact is due to the power line, the so-called *line noise*.
How to handle artifacts?
------------------------
MNE deals with artifacts by first identifying them, and subsequently removing
them. Detection of artifacts can be done visually, or using automatic routines
(or a combination of both). After you know what the artifacts are, you need
remove them. This can be done by:
- *ignoring* the piece of corrupted data
- *fixing* the corrupted data
For the artifact detection the functions MNE provides depend on whether
your data is continuous (Raw) or epoch-based (Epochs) and depending on
whether your data is stored on disk or already in memory.
Detecting the artifacts without reading the complete data into memory allows
you to work with datasets that are too large to fit in memory all at once.
Detecting the artifacts in continuous data allows you to apply filters
(e.g. a band-pass filter to zoom in on the muscle artifacts on the temporal
channels) without having to worry about edge effects due to the filter
(i.e. filter ringing). Having the data in memory after segmenting/epoching is
however a very efficient way of browsing through the data which helps
in visualizing. So to conclude, there is not a single most optimal manner
to detect the artifacts: it just depends on the data properties and your
own preferences.
In this tutorial we show how to detect artifacts visually and automatically.
For how to correct artifacts by rejection see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_rejection.py`.
To discover how to correct certain artifacts by filtering see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
and to learn how to correct artifacts
with subspace methods like SSP and ICA see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
and :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`.
Artifacts Detection
-------------------
This tutorial discusses a couple of major artifacts that most analyses
have to deal with and demonstrates how to detect them.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
###############################################################################
# Low frequency drifts and line noise
(raw.copy().pick_types(meg='mag')
.del_proj(0)
.plot(duration=60, n_channels=100, remove_dc=False))
###############################################################################
# we see high amplitude undulations in low frequencies, spanning across tens of
# seconds
raw.plot_psd(tmax=np.inf, fmax=250)
###############################################################################
# On MEG sensors we see narrow frequency peaks at 60, 120, 180, 240 Hz,
# related to line noise.
# But also some high amplitude signals between 25 and 32 Hz, hinting at other
# biological artifacts such as ECG. These can be most easily detected in the
# time domain using MNE helper functions
#
# See :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`.
###############################################################################
# ECG
# ---
#
# finds ECG events, creates epochs, averages and plots
average_ecg = create_ecg_epochs(raw).average()
print('We found %i ECG events' % average_ecg.nave)
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
average_ecg.plot_joint(**joint_kwargs)
###############################################################################
# we can see typical time courses and non dipolar topographies
# not the order of magnitude of the average artifact related signal and
# compare this to what you observe for brain signals
###############################################################################
# EOG
# ---
average_eog = create_eog_epochs(raw).average()
print('We found %i EOG events' % average_eog.nave)
average_eog.plot_joint(**joint_kwargs)
###############################################################################
# Knowing these artifact patterns is of paramount importance when
# judging about the quality of artifact removal techniques such as SSP or ICA.
# As a rule of thumb you need artifact amplitudes orders of magnitude higher
# than your signal of interest and you need a few of such events in order
# to find decompositions that allow you to estimate and remove patterns related
# to artifacts.
#
# Consider the following tutorials for correcting this class of artifacts:
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
ca295de07a553fcbc33476193a590c0edf04cefc | 64310ffff77de9878f4a51e8e1c74ae6e796a79c | /external/gnuradio/gfsk_rx.py | af6cdc083decc97bba9b1aee10931101a178d2e3 | [] | no_license | learning-lte/gnuradio-modem-gmsk | f9849f35dadc95f145d92a67a28d42fd6939093d | dab60f749f39466ca8708a693b41fdbee4603d7b | refs/heads/master | 2022-01-19T21:02:59.480389 | 2019-04-29T18:08:20 | 2019-04-29T18:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,621 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: GFSK Receiver
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import iio
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import correctiq
import math
import satellites
import sip
import sys
from gnuradio import qtgui
class gfsk_rx(gr.top_block, Qt.QWidget):
def __init__(self, default_bandwidth=20e3, default_baud=9600, default_bin_file_sink="/tmp/rx_data.bin", default_dev=4950/2, default_freq=436750000, default_gain=16, default_ip='127.0.0.1', default_port=7000, default_samp=1920000, sdr_dev="rtl=0"):
gr.top_block.__init__(self, "GFSK Receiver")
Qt.QWidget.__init__(self)
self.setWindowTitle("GFSK Receiver")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "gfsk_rx")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.default_bandwidth = default_bandwidth
self.default_baud = default_baud
self.default_bin_file_sink = default_bin_file_sink
self.default_dev = default_dev
self.default_freq = default_freq
self.default_gain = default_gain
self.default_ip = default_ip
self.default_port = default_port
self.default_samp = default_samp
self.sdr_dev = sdr_dev
##################################################
# Variables
##################################################
self.samp_rate_dec = samp_rate_dec = default_baud*8
self.interp_tx = interp_tx = default_samp/default_baud
self.dec_rx = dec_rx = default_samp/samp_rate_dec
self.sps_rx = sps_rx = interp_tx/dec_rx
self.t_points = t_points = 5000
self.rx_gain = rx_gain = 64
self.rrc_taps = rrc_taps = firdes.root_raised_cosine(1, samp_rate_dec, sps_rx, 0.3, 88)
self.low_pass_taps_2 = low_pass_taps_2 = firdes.low_pass(1.0, samp_rate_dec, 9600, 1200, firdes.WIN_HAMMING, 6.76)
self.low_pass_taps = low_pass_taps = firdes.low_pass(1.0, default_samp, 150000, 20000, firdes.WIN_HAMMING, 6.76)
self.freq_xlating = freq_xlating = 000000
self.freq_offset = freq_offset = 2200
self.filter_offset = filter_offset = 0
self.demod_gain = demod_gain = (samp_rate_dec)/(2*math.pi*default_dev)
self.cc_omega_lim = cc_omega_lim = 0.002
self.cc_mu_gain = cc_mu_gain = 0.175
self.cc_mu = cc_mu = 0.5
self.cc_gain = cc_gain = 0.25*0.175*0.175
##################################################
# Blocks
##################################################
self.controls = Qt.QTabWidget()
self.controls_widget_0 = Qt.QWidget()
self.controls_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.controls_widget_0)
self.controls_grid_layout_0 = Qt.QGridLayout()
self.controls_layout_0.addLayout(self.controls_grid_layout_0)
self.controls.addTab(self.controls_widget_0, 'RF')
self.controls_widget_1 = Qt.QWidget()
self.controls_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.controls_widget_1)
self.controls_grid_layout_1 = Qt.QGridLayout()
self.controls_layout_1.addLayout(self.controls_grid_layout_1)
self.controls.addTab(self.controls_widget_1, 'Filter/Demod')
self.controls_widget_2 = Qt.QWidget()
self.controls_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.controls_widget_2)
self.controls_grid_layout_2 = Qt.QGridLayout()
self.controls_layout_2.addLayout(self.controls_grid_layout_2)
self.controls.addTab(self.controls_widget_2, 'Receiver DSP')
self.top_grid_layout.addWidget(self.controls, 0, 0, 1, 4)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._demod_gain_range = Range(1, 100, 1, (samp_rate_dec)/(2*math.pi*default_dev), 200)
self._demod_gain_win = RangeWidget(self._demod_gain_range, self.set_demod_gain, 'Demodulator Gain', "counter_slider", float)
self.controls_grid_layout_1.addWidget(self._demod_gain_win, 0, 0, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 1):
self.controls_grid_layout_1.setColumnStretch(c, 1)
self.signals = Qt.QTabWidget()
self.signals_widget_0 = Qt.QWidget()
self.signals_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.signals_widget_0)
self.signals_grid_layout_0 = Qt.QGridLayout()
self.signals_layout_0.addLayout(self.signals_grid_layout_0)
self.signals.addTab(self.signals_widget_0, 'Receiver')
self.signals_widget_1 = Qt.QWidget()
self.signals_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.signals_widget_1)
self.signals_grid_layout_1 = Qt.QGridLayout()
self.signals_layout_1.addLayout(self.signals_grid_layout_1)
self.signals.addTab(self.signals_widget_1, 'Filter RX')
self.signals_widget_2 = Qt.QWidget()
self.signals_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.signals_widget_2)
self.signals_grid_layout_2 = Qt.QGridLayout()
self.signals_layout_2.addLayout(self.signals_grid_layout_2)
self.signals.addTab(self.signals_widget_2, 'Modulator')
self.signals_widget_3 = Qt.QWidget()
self.signals_layout_3 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.signals_widget_3)
self.signals_grid_layout_3 = Qt.QGridLayout()
self.signals_layout_3.addLayout(self.signals_grid_layout_3)
self.signals.addTab(self.signals_widget_3, 'Dec Filter')
self.signals_widget_4 = Qt.QWidget()
self.signals_layout_4 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.signals_widget_4)
self.signals_grid_layout_4 = Qt.QGridLayout()
self.signals_layout_4.addLayout(self.signals_grid_layout_4)
self.signals.addTab(self.signals_widget_4, 'Clock Recovery/Bitstream')
self.top_grid_layout.addWidget(self.signals, 1, 0, 2, 4)
for r in range(1, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._rx_gain_range = Range(0, 100, 1, 64, 200)
self._rx_gain_win = RangeWidget(self._rx_gain_range, self.set_rx_gain, 'RX Power Gain', "counter_slider", float)
self.controls_grid_layout_0.addWidget(self._rx_gain_win, 0, 0, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 1):
self.controls_grid_layout_0.setColumnStretch(c, 1)
self._freq_offset_range = Range(-20000, 20000, 100, 2200, 200)
self._freq_offset_win = RangeWidget(self._freq_offset_range, self.set_freq_offset, 'Signal Frequency Offset', "counter_slider", int)
self.controls_grid_layout_0.addWidget(self._freq_offset_win, 0, 1, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 2):
self.controls_grid_layout_0.setColumnStretch(c, 1)
self._filter_offset_range = Range(-1*demod_gain, 1*demod_gain, 0.01, 0, 200)
self._filter_offset_win = RangeWidget(self._filter_offset_range, self.set_filter_offset, 'Signal Offset', "counter_slider", float)
self.controls_grid_layout_1.addWidget(self._filter_offset_win, 0, 1, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_1.setRowStretch(r, 1)
for c in range(1, 2):
self.controls_grid_layout_1.setColumnStretch(c, 1)
self._cc_omega_lim_range = Range(0.0005, 0.02, 0.0001, 0.002, 200)
self._cc_omega_lim_win = RangeWidget(self._cc_omega_lim_range, self.set_cc_omega_lim, 'CC Omega Lim', "counter_slider", float)
self.controls_grid_layout_2.addWidget(self._cc_omega_lim_win, 0, 3, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_2.setRowStretch(r, 1)
for c in range(3, 4):
self.controls_grid_layout_2.setColumnStretch(c, 1)
self._cc_mu_gain_range = Range(0.01, 0.5, 0.05, 0.175, 200)
self._cc_mu_gain_win = RangeWidget(self._cc_mu_gain_range, self.set_cc_mu_gain, 'CC MU gain', "counter_slider", float)
self.controls_grid_layout_2.addWidget(self._cc_mu_gain_win, 0, 2, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_2.setRowStretch(r, 1)
for c in range(2, 3):
self.controls_grid_layout_2.setColumnStretch(c, 1)
self._cc_mu_range = Range(0.1, 2, 0.1, 0.5, 200)
self._cc_mu_win = RangeWidget(self._cc_mu_range, self.set_cc_mu, 'CC MU', "counter_slider", float)
self.controls_grid_layout_2.addWidget(self._cc_mu_win, 0, 1, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_2.setRowStretch(r, 1)
for c in range(1, 2):
self.controls_grid_layout_2.setColumnStretch(c, 1)
self.satellites_nrzi_decode_0_0 = satellites.nrzi_decode()
self.satellites_nrzi_decode_0 = satellites.nrzi_decode()
self.qtgui_waterfall_sink_x_0_0_0_0_0 = qtgui.waterfall_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_update_time(0.10)
self.qtgui_waterfall_sink_x_0_0_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0_0_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_3.addWidget(self._qtgui_waterfall_sink_x_0_0_0_0_0_win, 1, 3, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_3.setRowStretch(r, 1)
for c in range(3, 6):
self.signals_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_0_0 = qtgui.waterfall_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0_0.set_update_time(0.10)
self.qtgui_waterfall_sink_x_0_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_2.addWidget(self._qtgui_waterfall_sink_x_0_0_0_0_win, 1, 3, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_2.setRowStretch(r, 1)
for c in range(3, 6):
self.signals_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0.set_update_time(0.10)
self.qtgui_waterfall_sink_x_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_0_0_win, 1, 3, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_1.setRowStretch(r, 1)
for c in range(3, 6):
self.signals_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
default_samp, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.0000010)
self.qtgui_waterfall_sink_x_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 2, 0, 1, 6)
for r in range(2, 3):
self.signals_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 6):
self.signals_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0_0_0_1 = qtgui.time_sink_f(
t_points, #size
samp_rate_dec, #samp_rate
'', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0_0_0_1.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0_0_0_1.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0_0_0_0_0_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0_0_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_grid(False)
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0_0_0_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0_0_0_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0_0_0_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_0_0_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0_0_0_1.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_0_0_0_0_1_win, 0, 0, 1, 6)
for r in range(0, 1):
self.signals_grid_layout_3.setRowStretch(r, 1)
for c in range(0, 6):
self.signals_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0_0_0_0_0 = qtgui.time_sink_f(
t_points, #size
samp_rate_dec/8, #samp_rate
'Time RX In', #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0_0_0_0_0.disable_legend()
labels = ['Clock Recovery', 'Bitstream', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, 0, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_0_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0_0_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_4.addWidget(self._qtgui_time_sink_x_0_0_0_0_0_0_0_win, 0, 0, 2, 2)
for r in range(0, 2):
self.signals_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 2):
self.signals_grid_layout_4.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0_0_0 = qtgui.time_sink_f(
t_points*2, #size
samp_rate_dec, #samp_rate
'', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0_0_0.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0_0_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0_0_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_0_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_2.addWidget(self._qtgui_time_sink_x_0_0_0_0_0_win, 0, 0, 1, 6)
for r in range(0, 1):
self.signals_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 6):
self.signals_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0_0 = qtgui.time_sink_c(
200, #size
samp_rate_dec, #samp_rate
'Time RX In', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0_0.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_0_0_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0_0_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_1.addWidget(self._qtgui_time_sink_x_0_0_0_0_win, 0, 0, 1, 6)
for r in range(0, 1):
self.signals_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 6):
self.signals_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0 = qtgui.time_sink_c(
t_points+1000, #size
default_samp, #samp_rate
'Time RX In', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0.set_y_axis(-2, 2)
self.qtgui_time_sink_x_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_0_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_0_0_win, 0, 0, 1, 3)
for r in range(0, 1):
self.signals_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 3):
self.signals_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0_1_0_0_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
'FFT RX in', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_1_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_1_0_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_fft_average(0.05)
self.qtgui_freq_sink_x_0_0_1_0_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_1_0_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0_1_0_0_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [2, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_1_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_1_0_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_3.addWidget(self._qtgui_freq_sink_x_0_0_1_0_0_0_win, 1, 0, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_3.setRowStretch(r, 1)
for c in range(0, 3):
self.signals_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0_1_0_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
'FFT RX in', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_1_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0_1_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0_1_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_1_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_1_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_1_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0_1_0_0.set_fft_average(0.05)
self.qtgui_freq_sink_x_0_0_1_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_1_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0_1_0_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_0_0_1_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [2, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_1_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_1_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_1_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_1_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_1_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_1_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_1_0_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_2.addWidget(self._qtgui_freq_sink_x_0_0_1_0_0_win, 1, 0, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 3):
self.signals_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0_1_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate_dec, #bw
'FFT RX in', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_1_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0_1_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0_1_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_1_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_1_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0_1_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0_1_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_1_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0_1_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_1_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [2, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_1_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_1_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_1_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_1_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_1_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_1_0.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_1.addWidget(self._qtgui_freq_sink_x_0_0_1_0_win, 1, 0, 1, 3)
for r in range(1, 2):
self.signals_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 3):
self.signals_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0_1 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
default_samp, #bw
'FFT RX in', #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_1.set_update_time(0.0000010)
self.qtgui_freq_sink_x_0_0_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_1.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_1.enable_grid(False)
self.qtgui_freq_sink_x_0_0_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [2, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_1.pyqwidget(), Qt.QWidget)
self.signals_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_0_1_win, 0, 3, 1, 3)
for r in range(0, 1):
self.signals_grid_layout_0.setRowStretch(r, 1)
for c in range(3, 6):
self.signals_grid_layout_0.setColumnStretch(c, 1)
self.iio_fmcomms2_source_0 = iio.fmcomms2_source_f32c('ip:pluto.local', default_freq-freq_xlating+freq_offset, default_samp, 20000000, True, False, 0x8000, True, True, True, "fast_attack", rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_ccc(dec_rx, (low_pass_taps), freq_xlating, default_samp)
self.fir_filter_xxx_0_0 = filter.fir_filter_fff(1, (low_pass_taps_2))
self.fir_filter_xxx_0_0.declare_sample_delay(0)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(sps_rx, 0.25*0.175*0.175, cc_mu, cc_mu_gain, cc_omega_lim)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.correctiq_correctiq_0 = correctiq.correctiq()
self._cc_gain_range = Range(1e-3, 50e-3, 1e-3, 0.25*0.175*0.175, 200)
self._cc_gain_win = RangeWidget(self._cc_gain_range, self.set_cc_gain, 'CC Omega Gain', "counter_slider", float)
self.controls_grid_layout_2.addWidget(self._cc_gain_win, 0, 0, 1, 1)
for r in range(0, 1):
self.controls_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 1):
self.controls_grid_layout_2.setColumnStretch(c, 1)
self.blocks_pack_k_bits_bb_0_0 = blocks.pack_k_bits_bb(8)
self.blocks_char_to_float_0 = blocks.char_to_float(1, 1)
self.blocks_add_const_vxx_0 = blocks.add_const_vff((filter_offset*demod_gain, ))
self.blks2_tcp_sink_0 = grc_blks2.tcp_sink(
itemsize=gr.sizeof_char*1,
addr=default_ip,
port=default_port,
server=True,
)
self.analog_quadrature_demod_cf_0 = analog.quadrature_demod_cf(demod_gain)
##################################################
# Connections
##################################################
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.fir_filter_xxx_0_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.qtgui_freq_sink_x_0_0_1_0_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.qtgui_time_sink_x_0_0_0_0_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.qtgui_waterfall_sink_x_0_0_0_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_freq_sink_x_0_0_1_0_0_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_time_sink_x_0_0_0_0_0_1, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_waterfall_sink_x_0_0_0_0_0, 0))
self.connect((self.blocks_char_to_float_0, 0), (self.qtgui_time_sink_x_0_0_0_0_0_0_0, 1))
self.connect((self.blocks_pack_k_bits_bb_0_0, 0), (self.blks2_tcp_sink_0, 0))
self.connect((self.correctiq_correctiq_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.satellites_nrzi_decode_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.satellites_nrzi_decode_0_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.qtgui_time_sink_x_0_0_0_0_0_0_0, 0))
self.connect((self.fir_filter_xxx_0_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.analog_quadrature_demod_cf_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_freq_sink_x_0_0_1_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_time_sink_x_0_0_0_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_waterfall_sink_x_0_0_0, 0))
self.connect((self.iio_fmcomms2_source_0, 0), (self.correctiq_correctiq_0, 0))
self.connect((self.iio_fmcomms2_source_0, 0), (self.qtgui_freq_sink_x_0_0_1, 0))
self.connect((self.iio_fmcomms2_source_0, 0), (self.qtgui_time_sink_x_0_0_0, 0))
self.connect((self.iio_fmcomms2_source_0, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.satellites_nrzi_decode_0, 0), (self.blocks_char_to_float_0, 0))
self.connect((self.satellites_nrzi_decode_0_0, 0), (self.blocks_pack_k_bits_bb_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "gfsk_rx")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_default_bandwidth(self):
return self.default_bandwidth
def set_default_bandwidth(self, default_bandwidth):
self.default_bandwidth = default_bandwidth
def get_default_baud(self):
return self.default_baud
def set_default_baud(self, default_baud):
self.default_baud = default_baud
self.set_samp_rate_dec(self.default_baud*8)
self.set_interp_tx(self.default_samp/self.default_baud)
def get_default_bin_file_sink(self):
return self.default_bin_file_sink
def set_default_bin_file_sink(self, default_bin_file_sink):
self.default_bin_file_sink = default_bin_file_sink
def get_default_dev(self):
return self.default_dev
def set_default_dev(self, default_dev):
self.default_dev = default_dev
self.set_demod_gain((self.samp_rate_dec)/(2*math.pi*self.default_dev))
def get_default_freq(self):
return self.default_freq
def set_default_freq(self, default_freq):
self.default_freq = default_freq
self.iio_fmcomms2_source_0.set_params(self.default_freq-self.freq_xlating+self.freq_offset, self.default_samp, 20000000, True, True, True, "fast_attack", self.rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
def get_default_gain(self):
return self.default_gain
def set_default_gain(self, default_gain):
self.default_gain = default_gain
def get_default_ip(self):
return self.default_ip
def set_default_ip(self, default_ip):
self.default_ip = default_ip
def get_default_port(self):
return self.default_port
def set_default_port(self, default_port):
self.default_port = default_port
def get_default_samp(self):
return self.default_samp
def set_default_samp(self, default_samp):
self.default_samp = default_samp
self.set_dec_rx(self.default_samp/self.samp_rate_dec)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.default_samp)
self.qtgui_time_sink_x_0_0_0.set_samp_rate(self.default_samp)
self.qtgui_freq_sink_x_0_0_1.set_frequency_range(0, self.default_samp)
self.set_interp_tx(self.default_samp/self.default_baud)
self.iio_fmcomms2_source_0.set_params(self.default_freq-self.freq_xlating+self.freq_offset, self.default_samp, 20000000, True, True, True, "fast_attack", self.rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
def get_sdr_dev(self):
return self.sdr_dev
def set_sdr_dev(self, sdr_dev):
self.sdr_dev = sdr_dev
def get_samp_rate_dec(self):
return self.samp_rate_dec
def set_samp_rate_dec(self, samp_rate_dec):
self.samp_rate_dec = samp_rate_dec
self.set_demod_gain((self.samp_rate_dec)/(2*math.pi*self.default_dev))
self.set_dec_rx(self.default_samp/self.samp_rate_dec)
self.qtgui_waterfall_sink_x_0_0_0_0_0.set_frequency_range(0, self.samp_rate_dec)
self.qtgui_waterfall_sink_x_0_0_0_0.set_frequency_range(0, self.samp_rate_dec)
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate_dec)
self.qtgui_time_sink_x_0_0_0_0_0_1.set_samp_rate(self.samp_rate_dec)
self.qtgui_time_sink_x_0_0_0_0_0_0_0.set_samp_rate(self.samp_rate_dec/8)
self.qtgui_time_sink_x_0_0_0_0_0.set_samp_rate(self.samp_rate_dec)
self.qtgui_time_sink_x_0_0_0_0.set_samp_rate(self.samp_rate_dec)
self.qtgui_freq_sink_x_0_0_1_0_0_0.set_frequency_range(0, self.samp_rate_dec)
self.qtgui_freq_sink_x_0_0_1_0_0.set_frequency_range(0, self.samp_rate_dec)
self.qtgui_freq_sink_x_0_0_1_0.set_frequency_range(0, self.samp_rate_dec)
def get_interp_tx(self):
return self.interp_tx
def set_interp_tx(self, interp_tx):
self.interp_tx = interp_tx
self.set_sps_rx(self.interp_tx/self.dec_rx)
def get_dec_rx(self):
return self.dec_rx
def set_dec_rx(self, dec_rx):
self.dec_rx = dec_rx
self.set_sps_rx(self.interp_tx/self.dec_rx)
def get_sps_rx(self):
return self.sps_rx
def set_sps_rx(self, sps_rx):
self.sps_rx = sps_rx
self.digital_clock_recovery_mm_xx_0.set_omega(self.sps_rx)
def get_t_points(self):
return self.t_points
def set_t_points(self, t_points):
self.t_points = t_points
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
self.iio_fmcomms2_source_0.set_params(self.default_freq-self.freq_xlating+self.freq_offset, self.default_samp, 20000000, True, True, True, "fast_attack", self.rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
def get_rrc_taps(self):
return self.rrc_taps
def set_rrc_taps(self, rrc_taps):
self.rrc_taps = rrc_taps
def get_low_pass_taps_2(self):
return self.low_pass_taps_2
def set_low_pass_taps_2(self, low_pass_taps_2):
self.low_pass_taps_2 = low_pass_taps_2
self.fir_filter_xxx_0_0.set_taps((self.low_pass_taps_2))
def get_low_pass_taps(self):
return self.low_pass_taps
def set_low_pass_taps(self, low_pass_taps):
self.low_pass_taps = low_pass_taps
self.freq_xlating_fir_filter_xxx_0.set_taps((self.low_pass_taps))
def get_freq_xlating(self):
return self.freq_xlating
def set_freq_xlating(self, freq_xlating):
self.freq_xlating = freq_xlating
self.iio_fmcomms2_source_0.set_params(self.default_freq-self.freq_xlating+self.freq_offset, self.default_samp, 20000000, True, True, True, "fast_attack", self.rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
self.freq_xlating_fir_filter_xxx_0.set_center_freq(self.freq_xlating)
def get_freq_offset(self):
return self.freq_offset
def set_freq_offset(self, freq_offset):
self.freq_offset = freq_offset
self.iio_fmcomms2_source_0.set_params(self.default_freq-self.freq_xlating+self.freq_offset, self.default_samp, 20000000, True, True, True, "fast_attack", self.rx_gain, "fast_attack", 64.0, "A_BALANCED", '', True)
def get_filter_offset(self):
return self.filter_offset
def set_filter_offset(self, filter_offset):
self.filter_offset = filter_offset
self.blocks_add_const_vxx_0.set_k((self.filter_offset*self.demod_gain, ))
def get_demod_gain(self):
return self.demod_gain
def set_demod_gain(self, demod_gain):
self.demod_gain = demod_gain
self.blocks_add_const_vxx_0.set_k((self.filter_offset*self.demod_gain, ))
self.analog_quadrature_demod_cf_0.set_gain(self.demod_gain)
def get_cc_omega_lim(self):
return self.cc_omega_lim
def set_cc_omega_lim(self, cc_omega_lim):
self.cc_omega_lim = cc_omega_lim
def get_cc_mu_gain(self):
return self.cc_mu_gain
def set_cc_mu_gain(self, cc_mu_gain):
self.cc_mu_gain = cc_mu_gain
self.digital_clock_recovery_mm_xx_0.set_gain_mu(self.cc_mu_gain)
def get_cc_mu(self):
return self.cc_mu
def set_cc_mu(self, cc_mu):
self.cc_mu = cc_mu
self.digital_clock_recovery_mm_xx_0.set_mu(self.cc_mu)
def get_cc_gain(self):
return self.cc_gain
def set_cc_gain(self, cc_gain):
self.cc_gain = cc_gain
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"-w", "--default-bandwidth", dest="default_bandwidth", type="eng_float", default=eng_notation.num_to_str(20e3),
help="Set default_bandwidth [default=%default]")
parser.add_option(
"-b", "--default-baud", dest="default_baud", type="intx", default=9600,
help="Set default_baud [default=%default]")
parser.add_option(
"-o", "--default-bin-file-sink", dest="default_bin_file_sink", type="string", default="/tmp/rx_data.bin",
help="Set default_bin_file_sink [default=%default]")
parser.add_option(
"-j", "--default-dev", dest="default_dev", type="eng_float", default=eng_notation.num_to_str(4950/2),
help="Set Input [default=%default]")
parser.add_option(
"-f", "--default-freq", dest="default_freq", type="intx", default=436750000,
help="Set default_freq [default=%default]")
parser.add_option(
"-g", "--default-gain", dest="default_gain", type="eng_float", default=eng_notation.num_to_str(16),
help="Set default_gain [default=%default]")
parser.add_option(
"-i", "--default-ip", dest="default_ip", type="string", default='127.0.0.1',
help="Set default_ip [default=%default]")
parser.add_option(
"-p", "--default-port", dest="default_port", type="intx", default=7000,
help="Set default_port [default=%default]")
parser.add_option(
"-s", "--default-samp", dest="default_samp", type="intx", default=1920000,
help="Set default_samp [default=%default]")
parser.add_option(
"-d", "--sdr-dev", dest="sdr_dev", type="string", default="rtl=0",
help="Set sdr_dev [default=%default]")
return parser
def main(top_block_cls=gfsk_rx, options=None):
if options is None:
options, _ = argument_parser().parse_args()
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Error: failed to enable real-time scheduling."
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(default_bandwidth=options.default_bandwidth, default_baud=options.default_baud, default_bin_file_sink=options.default_bin_file_sink, default_dev=options.default_dev, default_freq=options.default_freq, default_gain=options.default_gain, default_ip=options.default_ip, default_port=options.default_port, default_samp=options.default_samp, sdr_dev=options.sdr_dev)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| [
"mario.baldini@gmail.com"
] | mario.baldini@gmail.com |
0e694119ae71816af6ee234acde50cfb3b07d971 | 259637f225cf17885f972edfc8b8221dfce8f81f | /week-03/day-01/factorio.py | a35246f21bf9bd600e6f75950da6d1a2798938be | [] | no_license | green-fox-academy/IBS_guthixx23 | c9570d0c3a3db4ebe4f6affa4f8980f66178d5fe | f3629e88cf4d9a74adfca2c94b8c928ec669beb3 | refs/heads/main | 2023-01-31T01:12:49.060303 | 2020-12-08T17:58:04 | 2020-12-08T17:58:04 | 304,072,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | def factorio(num):
ans = 1
for i in range(1, num+1):
ans *= i
return ans
print(factorio(4)) | [
"guthixx23@gmail.com"
] | guthixx23@gmail.com |
0a1322cfc3522be936cc35d976692015418c04b7 | 78f69f8c44cdb6387bd5fc82919c3e3a7b19abee | /__init__.py | dd2ade445cf488090daac9395705d5804ce632f7 | [] | no_license | evanwike/grocery-list-skill | 63abe9adfe7f81aa7902e2d969a22fbf9e1b3d1b | 672109a0693f6a92738584a0b59a2110d71b4ce5 | refs/heads/master | 2020-05-05T09:42:18.283319 | 2019-04-08T04:26:42 | 2019-04-08T04:26:42 | 179,913,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,448 | py | from mycroft import MycroftSkill, intent_file_handler
from mycroft.util.log import getLogger
import pymongo
LOGGER = getLogger(__name__)
URI = 'mongodb://root:password1@ds049446.mlab.com:49446/hackathon'
CLIENT = pymongo.MongoClient(URI)
DB = CLIENT.get_database()
USER = 'user'
lists = DB['lists']
class GroceryList(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.grocery_list = lists.find_one({'name': USER})['items']
# Add item to grocery list
@intent_file_handler('add_item.intent')
def handle_add_item_intent(self, message):
item = message.data.get("item")
if item not in self.grocery_list:
self.grocery_list.append(item)
update_db(self.grocery_list)
message = item + (' have' if item[len(item) - 1] == 's' else ' has')
self.speak_dialog('add_success', data={'message': message})
else:
self.speak_dialog('add_error', data={'item': item})
# Remove item from grocery list
@intent_file_handler('remove_item.intent')
def handle_remove_item_intent(self, message):
item = message.data.get('item')
if item not in self.grocery_list:
self.speak_dialog('remove_error', data={'item': item})
else:
self.grocery_list.remove(item)
update_db(self.grocery_list)
message = item + (' have' if item[len(item) - 1] == 's' else ' has')
self.speak_dialog('remove_success', data={'message': message})
# Detect if item is plural for has/have
# How many items are on my grocery list?
@intent_file_handler('count_items.intent')
def handle_count_items(self, message):
plural = len(self.grocery_list) > 1
verb = 'are' if plural else 'is'
s = 's' if plural else ''
self.speak_dialog('count_items', data={'n': len(self.grocery_list), 'verb': verb, 's': s})
@intent_file_handler('list_items.intent')
def handle_list_grocery(self, message):
if len(self.grocery_list) > 0:
self.speak_dialog("list_items")
for item in self.grocery_list:
self.speak(item)
else:
self.speak_dialog("empty_list")
# self.speak_dialog('list.grocery')
def create_skill():
return GroceryList()
def update_db(grocery_list: list):
lists.update_one({'name': USER}, {'$set': {'items': grocery_list}}, upsert=True)
| [
"sportsdude716@gmail.com"
] | sportsdude716@gmail.com |
507f6b0403a78b43766a63432e623686cc5a0493 | 466ba928ab060cc6e9b84cf4f64f742cc6153eb2 | /checkForUpdates.py | f7c00929fceb0b51fc493ea4a83745b57f8259c2 | [] | no_license | Joshua1337/FreifunkNodeChecker | 16de6a35a4343357d357d193ecc843ba89482571 | 53b9a3ef6890ed58f26a35a6accea03597596124 | refs/heads/master | 2021-11-10T01:25:19.307961 | 2021-10-24T11:59:13 | 2021-10-24T11:59:13 | 80,371,054 | 0 | 0 | null | 2020-09-24T21:39:52 | 2017-01-29T20:38:06 | Python | UTF-8 | Python | false | false | 2,612 | py | # coding: utf-8
import argparse
import logging
import os
import json
import requests
from telegram.ext import Updater
from time import sleep
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class Check():
def __init__(self, authToken, url, chatId):
self.lastContent = ""
self.filePath = os.path.dirname(os.path.realpath(__file__)) + "\cache"
self.authToken = authToken
self.url = url
self.chatId = chatId
def cacheContainsId(self, id, cache):
for j in cache['nodes']:
if j['id'] == id:
return True
return False
def run(self):
while True:
if not os.path.isfile(self.filePath) or os.path.getsize(self.filePath) == 0:
with open(self.filePath, "w") as file:
self.lastContent = json.loads(requests.get(self.url).text)
json.dump(self.lastContent, file)
else:
with open(self.filePath, "r") as file:
self.lastContent = json.load(file)
r = requests.get(self.url)
js = json.loads(r.text)
if self.lastContent['nodes'] != js['nodes']:
updater = Updater(self.authToken)
for i in js['nodes']:
isNew = self.cacheContainsId(i['id'], self.lastContent)
if not isNew:
updater.bot.sendMessage(chat_id=self.chatId,
text="Neuer Knoten <a href=\"https://map.freifunk-hennef.de/#!v:m;n:{}\"\
>{}</a>".format(i['id'], i['name']), parse_mode="html")
self.lastContent = js
with open(self.filePath, "w") as file:
json.dump(self.lastContent, file)
logging.info("Sleeping 60s")
sleep(60)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Überprüft eine JSON-Datei nach Änderungen")
parser.add_argument("-token", type=str, required=True, help="Authtoken für den Telegram Bot")
parser.add_argument("-url", type=str, required=True, help="Netzwerkpfad zur JSON-Datei")
parser.add_argument("-chat", type=int, required=True,
help="Telegram Chat-ID an die die Benachrichtigung gesendet werden soll")
parsed_args = parser.parse_args()
if not parsed_args.token:
parser.print_help()
exit()
Check(parsed_args.token, parsed_args.url, parsed_args.chat).run()
| [
"thecop@thecop.us"
] | thecop@thecop.us |
acdeccf893f67cfeddb6d3a93bcc2068e4d109c4 | 25d02ff4a1c5375321943b6830d9f9386010a76b | /relayer.py | 9eea4b27caadf47cfaa57449da44b676d06db116 | [
"MIT"
] | permissive | minddrive/image-relayer | a50219ef147238281799e6dc7e0bb62a35eaa09f | ff1d0cc39e93e44eae92b8fc1a1c73139b56d202 | refs/heads/main | 2023-04-29T13:48:25.969809 | 2021-05-21T05:42:51 | 2021-05-21T05:42:51 | 369,377,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | #!/usr/bin/env python
import json
import logging
import re
import cloudscraper
import discord
import yaml
logger = logging.getLogger("relayer")
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename="relayer.log", encoding="utf-8", mode="w")
handler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s"))
logger.addHandler(handler)
site_regexes = {
"fur_affinity": r"(furaffinity\.net/view/(\d+))",
"weasyl": r"(weasyl\.com/~\w+/submissions/(\d+))"
}
def read_config():
with open('relayer.yml') as cfg:
return yaml.full_load(cfg)
class SiteBase:
def __init__(self):
self.url = None
self.headers = None
self.author = True
self.author_name = None
self.author_icon = None
self.image = None
self.image_url = None
class FurAffinity(SiteBase):
def __init__(self, site_id):
super().__init__()
self.url = f"https://bawk.space/fapi/submission/{site_id}"
def gather_info(self, data):
self.author_name = data["author"]
self.author_icon = data["avatar"]
self.image = data["title"]
self.image_url = data["image_url"]
class Weasyl(SiteBase):
def __init__(self, site_id):
super().__init__()
self.url = f"https://www.weasyl.com/api/submissions/{site_id}/view"
self.headers = {'X-Weasyl-API-Key': relayer_config["weasyl_api_key"]}
def gather_info(self, data):
self.author_name = data["owner"]
self.author_icon = data["owner_media"]["avatar"][0]["url"]
self.image = data["title"]
self.image_url = data["media"]["submission"][0]["links"]["cover"][0]["url"]
class RelayerClient(discord.Client):
def __init__(self, **options):
super().__init__(**options)
self.scraper = cloudscraper.create_scraper()
@staticmethod
def log_details(message, content):
logger.info(
f"{message.author.name}#{message.author.discriminator}@"
f"{message.guild.name}:{message.channel.name}: {content}"
)
async def on_ready(self):
logger.info(f"Logged in as {self.user}")
async def on_message(self, message):
# Bot should not reply to itself
if message.author == client.user:
return
for name, regex in site_regexes.items():
site_class = ''.join(w.capitalize() for w in name.split('_'))
comp_regex = re.compile(regex)
links = comp_regex.findall(message.content)
logger.debug(links)
for link, site_id in links:
site = globals()[site_class](site_id)
# If no response, just skip
if not (resp := self.scraper.get(site.url, headers=site.headers)):
continue
data = json.loads(resp.text)
site.gather_info(data)
self.log_details(message, site.image_url)
embed = discord.Embed(title=site.image)
embed.set_image(url=site.image_url)
if site.author:
embed.set_author(
name=site.author_name,
icon_url=site.author_icon
)
await message.channel.send(embed=embed)
relayer_config = read_config()
client = RelayerClient()
client.run(relayer_config["discord_token"])
| [
"elessar@numenor.org"
] | elessar@numenor.org |
72a48d03a7274c7f1ba3ddca5b8865827f62836e | 144df6ebbae1caf145c868579b335f579bf81357 | /test.py | 3986f037d12b93923af6b2accd4074cccbba40a2 | [] | no_license | xsnk/GreyHatPythonRead | f78645c00bb60e812bdefb5091971a2f94ccfa61 | 83cd36be8d5145be3b8f327f97619cb25110b774 | refs/heads/master | 2020-03-17T17:53:57.062965 | 2018-05-17T12:01:19 | 2018-05-17T12:01:19 | 133,806,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | import zdebug
debugger = zdebug.Debugger()
debugger.load("c:/Windows/System32/calc.exe")
| [
"noreply@github.com"
] | noreply@github.com |
994488c0995c4cb3859a16fbd3481c780bdb7c61 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/aio/operations/_load_balancer_load_balancing_rules_operations.py | 80034f67d188e49b5f19806c7376dfe4dd5c6385 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 8,796 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerLoadBalancingRulesOperations:
"""LoadBalancerLoadBalancingRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerLoadBalancingRuleListResult"]:
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerLoadBalancingRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.LoadBalancerLoadBalancingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerLoadBalancingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerLoadBalancingRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
load_balancing_rule_name: str,
**kwargs
) -> "_models.LoadBalancingRule":
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancingRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.LoadBalancingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
801a2a01933e03fb0f56781ece4a79654cc8788c | b72d0900bec98fcee6c725cef035c02ca29bbf1b | /Python/100Excersises/1 to 25/25/25.py | 38dc3ba7dc12908e54d10b12f5a442b5a1ccd3cd | [
"MIT"
] | permissive | sugamkarki/NAMI-Year-II-TERM-I-Group_Project | 68b8808c8607858a313e8b4d601d8d12c6edda2b | f0a9a5f219ccbec024eb5316361db3fca46e171c | refs/heads/master | 2023-06-28T19:07:19.330236 | 2021-07-24T03:05:42 | 2021-07-24T03:05:42 | 312,819,148 | 0 | 0 | MIT | 2021-07-24T12:45:06 | 2020-11-14T13:08:08 | Python | UTF-8 | Python | false | false | 163 | py | alphabet=[]
for letters in range(97,123):
alphabet.append(chr(letters))
d=dict(a=alphabet)
for item in d.values():
for alpha in item:
print(alpha)
| [
"sugamkarki7058@gmail.com"
] | sugamkarki7058@gmail.com |
12519564ac2077f1120fb5cbb0e9bfaf0c9762c4 | 0bb991864bb1c68eb41c40229b2a78adcbbf69c9 | /python/model_features/statistics.py | 5f73b2e6b61173784966955ab4a9f0dc70ecff90 | [] | no_license | kristianeschenburg/Parcellating-connectivity | ab78a62a11e549f027a177f57c15924ef6eafb9e | 19edaba4d923b1d283b182f21dca4f46a0fbd2f6 | refs/heads/master | 2020-03-22T13:37:16.801653 | 2018-07-29T18:33:47 | 2018-07-29T18:33:47 | 140,120,191 | 0 | 0 | null | 2018-07-07T22:16:40 | 2018-07-07T22:16:39 | null | UTF-8 | Python | false | false | 1,568 | py | import numpy as np
import time
def UpdateStats(stats, t0, curr_lp, max_lp, K, z, c, steps, gt_z, map_z, verbose):
"""
Update diagnostic statistics.
Parameters:
- - - - -
t0 : initial start time
curr_lp : current log-probability of map
max_lp : max log-probability
K : number of clusters
z : current map
c : current parent links
steps : total number of steps taken
gt_z : ground truth map
map_z : maximum a-posterior map
verbose : flag to print status updates
"""
stats['lp'].append(curr_lp)
stats['max_lp'].append(max_lp)
stats['K'].append(K)
stats['z'] = np.row_stack([stats['z'],z])
stats['c'] = np.row_stack([stats['c'],c])
curr_time = time.clock() - t0
stats['times'].append(curr_time)
if verbose:
print('Step: ' + str(steps) + ' Time: ' + str(curr_time) +
' LP: ' + str(curr_lp) + ' K: ' + str(K) + ' MaxLP: ' + str(max_lp))
if np.any(gt_z):
stats['NMI'].append(NMI(gt_z, map_z))
return stats
def NMI(z1, z2):
"""
Compute normalized mutual information between two maps.two
Parameters:
- - - - -
z1, z2 : maps to compare
"""
N = len(z1)
assert N == len(z2)
p1 = np.bincount(z1)/N
p1[p1 == 0] = 1
H1 = (-p1*np.log(p1)).sum()
p2 = np.bincount(z2)/N
p2[p2 == 0] = 1
H2 = (-p2*np.log(p2)).sum()
joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)],
normed=True)
joint_p = joint[0]
pdiv = joint_p/np.outer(p1,p2)
pdiv[joint_p == 0] = 1
MI = (joint_p*np.log(pdiv)).sum()
if MI == 0:
NMI = 0
else:
NMI = MI/np.sqrt(H1*H2)
return NMI | [
"keschenb@uw.edu"
] | keschenb@uw.edu |
d25e7326a9da02a7ac488bd3ef17368a45448185 | d6d4a1e4a4c33b7410fc63852a17ab2de089ef78 | /test2.py | 495b395916689f9f3f29750bfcfea5a50d9c2ee2 | [] | no_license | ankittiwari101/learning_git | 054ffcbf52f785a506a37d4aa49d3eb25951f8ee | 43a3166c98e46fbac9dd2c8dff7371d2aa2b392e | refs/heads/master | 2021-03-29T07:01:56.915364 | 2020-03-17T10:20:08 | 2020-03-17T10:20:08 | 247,928,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | print("Tiwari Again!!This is my second commit at a github repository.") | [
"noreply@github.com"
] | noreply@github.com |
216cfa0a771df09a4201a43f6c87376c2b2194ba | ac31ab210a9d4688e0ba90872fe48a6c97886b9b | /ReLink.py | f13036d27d75a1e963928ea54802a1835895c68b | [] | no_license | springltd/link2_pi_demo01 | 647120ab2d5cd15189fd8e76f3a699829f4342f2 | 94865945fb0b54a66da3df08f1e604a050f70107 | refs/heads/master | 2022-10-22T12:21:31.301083 | 2020-06-16T09:43:02 | 2020-06-16T09:43:02 | 272,659,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | #!/usr/bin/env python3
#Library imports
from tkinter import *
from tkinter import StringVar
import time
from functools import partial
class App:
# Class to manage ReLink PiHat
# ---------------------------------------
def __init__(self, master):
# Init function for class
# -----------------------
frame = Frame(master)
frame.pack()
#arrays of IO states and GPIO pins (we always use J pin number convention in this program)
self.IOState=[0,0,0,0]
self.jPin =[15,22,29,36,]
self.AllState = 0
# Create and position each of the buttons
self.ChannelButton15 = Button(frame, text="15",bg = "red",height=1, width=1)
self.ChannelButton15.grid(row=7,column=2);
self.ChannelButton22 = Button(frame, text="22",bg = "red",height=1, width=1)
self.ChannelButton22.grid(row=10,column=3);
self.ChannelButton29 = Button(frame, text="29",bg = "red",height=1, width=1)
self.ChannelButton29.grid(row=14,column=2);
self.ChannelButton36 = Button(frame, text="36",bg = "red",height=1, width=1)
self.ChannelButton36.grid(row=17,column=3);
# create on and off actions for each button
action_toggle15= partial(self.ToggleOnOff, 0, self.ChannelButton15)
action_toggle22= partial(self.ToggleOnOff, 1, self.ChannelButton22)
action_toggle29= partial(self.ToggleOnOff, 2, self.ChannelButton29)
action_toggle36= partial(self.ToggleOnOff, 3, self.ChannelButton36)
#associate the actions with the button
self.ChannelButton15.config(command=action_toggle15)
self.ChannelButton22.config(command=action_toggle22)
self.ChannelButton29.config(command=action_toggle29)
self.ChannelButton36.config(command=action_toggle36)
# Create the GPIO labels alongside the buttons
l15 = Label(frame, text = "GPIO22", height=1, width=6);
l15.grid (row=7, column=0)
l22 = Label(frame, text = "GPIO25", height=1, width=6);
l22.grid (row=10, column=4)
l29 = Label(frame, text = "GPIO05", height=1, width=6);
l29.grid (row=14, column=0)
l36 = Label(frame, text = "GPIO16", height=1, width=6);
l36.grid (row=17, column=4)
# Create the Toggle All button
ToggleAllButton = Button(frame, text="Toggle All", height=1, width=25, command =self.ToggleAll)
ToggleAllButton.grid(row=20, column=0,columnspan=5)
def ToggleAll(self):
# toggle all i/os on or off
# -------------------------
if self.AllState==1:
self.AllState = 0
bgclr="red"
fgclr="black"
else:
self.AllState = 1
bgclr="green"
fgclr="white"
# update the button colours according to the IO state
self.ChannelButton15.config(fg = fgclr , bg = bgclr)
self.ChannelButton22.config(fg = fgclr , bg = bgclr)
self.ChannelButton29.config(fg = fgclr , bg = bgclr)
self.ChannelButton36.config(fg = fgclr , bg = bgclr)
# put the new i/o states in the array of i/o states
for idx in range(4):
GPIO.output(self.jPin[idx] ,self.AllState)
self.IOState[idx] = self.AllState
def ToggleOnOff(self, idx, button):
# Toggle an i/o on or off
# -----------------------
if (self.IOState[idx] == 0):
self.IOState[idx] = 1
button.config(bg="green", fg="white")
else:
self.IOState[idx] = 0
button.config(bg="red", fg="black")
GPIO.output(self.jPin[idx] ,self.IOState[idx])
def SetAllOff(self):
# Drive all outputs to the 'off' state
# ------------------------------------
for idx in range(4):
GPIO.output(self.jPin[idx] ,0)
self.IOState[idx] = 0
# Main program
# ------------
import RPi.GPIO as GPIO
#Turn off GPIO warnings
GPIO.setwarnings(False)
#Set the GPIO numbering convention to be header pin numbers
GPIO.setmode(GPIO.BOARD)
#Configure each GPIO pin as an output
GPIO.setup(15,GPIO.OUT)
GPIO.setup(22,GPIO.OUT)
GPIO.setup(29,GPIO.OUT)
GPIO.setup(36,GPIO.OUT)
#Create our window using Tkinter
root = Tk()
root.title('ReLink PiHat')
root.resizable(width=FALSE, height=FALSE)
app = App(root)
#Turn all the GPIO off to start with
app.SetAllOff()
#Main loop - responds to dialog events
root.mainloop()
#we exit the main loop if user has closed the window
#reset the GPIO and end the program
GPIO.cleanup()
| [
"andrew.gatt@springltd.co"
] | andrew.gatt@springltd.co |
67143a8e1eb81c79e74cb83a07a1483096d620ba | 4ffb9e383f7c2759bd39a7e1772ecb437e7b4082 | /cursosweb/bin/pip3.7 | 4fefeae8196f856e476355f5b36066cfd9842145 | [] | no_license | meridiaz/x-serv-15.8-cms-users-put | 62fe3a9cd418ced2c67b9b66b1d525107831579e | 67ce09cc2f04b76a5d99149e71f833636c94b6d4 | refs/heads/master | 2022-12-11T18:49:04.635968 | 2020-04-18T17:32:49 | 2020-04-18T17:32:49 | 295,018,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | 7 | #!/home/meri/Escritorio/sat/gitlab/try2git/x-serv-15.6-django-cms-put/cursosweb/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mdiaz"
] | mdiaz |
9014134d43bb036fbffc60790f20a299ae4710ab | 5605d4637f78f6d41958029e692b3b33818c2081 | /src/database.py | 7d3c0212509dfb34325441a6a957736c69568b5e | [] | no_license | ssynn/C-S_chat_program | 0dcc9f922f6416339b45d3fc5e66fc6a03fad306 | f847e5fe192a96ad3337cf64be34e760409069bd | refs/heads/master | 2020-05-24T21:58:20.778252 | 2019-06-16T09:20:09 | 2019-06-16T09:20:09 | 187,487,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,279 | py | import sqlite3
# 登录
def login(user_message: dict) -> bool:
'''
传入以下格式的字典
user_message{
'ID': str,
'PASSWORD': str
}
'''
ans = None
try:
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
cursor.execute('''
SELECT ID
FROM users
WHERE ID=? AND PASSWORD=?
''', (
user_message['ID'],
user_message['PASSWORD']
))
temp = cursor.fetchall()
if len(temp) == 1:
ans = {'answer': 'success'}
else:
ans = {'answer': 'fail'}
except Exception as e:
print('Login error!')
print(e)
finally:
conn.close()
return ans
# 注册
def signup(user_message: dict) -> dict:
'''
传入以下格式的字典
user_message{
'ID': str,
'PASSWORD': str
}
'''
message = dict()
try:
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
# print(user_message)
cursor.execute('''
SELECT *
FROM users
WHERE ID = ?
''',[user_message['ID']]
)
if len(cursor.fetchall()) != 0:
message['reason'] = '用户已存在!'
message['answer'] = 'fail'
raise Exception('用户已存在!')
cursor.execute('''
INSERT
INTO users
VALUES(?, ?)
''', [
user_message['ID'],
user_message['PASSWORD']
])
conn.commit()
message['answer'] = 'success'
except Exception as e:
print('Signup error!')
print(e)
finally:
conn.close()
return message
def makeFriend(user1: str, user2: str) -> dict:
'''
先检查两个人是否已经成为朋友,然后建立朋友行, 传入的两个用户不能为同一个人
返回{'answer': 'fail/seccuss', 'reason':str(e)}
'''
newFriends = [user1, user2]
newFriends.sort()
ans = None
try:
# 检查用户是否重复
if user1 == user2:
raise Exception('用户重复')
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
# 先查找用户是否存在
cursor.execute('''
SELECT *
FROM users
WHERE ID=? OR ID=?
''', newFriends)
num = cursor.fetchall()
if len(num) != 2:
raise Exception('无效用户!')
# 建立新朋友行
cursor.execute('''
INSERT
INTO friends
values(?,?)
''', newFriends)
conn.commit()
conn.close()
ans = {'answer': 'success'}
except Exception as e:
print('Make friends error!')
print(e)
ans = {'answer': 'fail', 'reason':str(e)}
finally:
return ans
def get_my_friends(userID) -> list:
ans = []
try:
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
# 建立新朋友行
cursor.execute('''
SELECT *
FROM friends
WHERE ID1 = ? or ID2 = ?
''', [userID, userID])
ans = cursor.fetchall()
conn.close()
ans = list(map(lambda x: x[0] if x[0] != userID else x[1], ans))
except Exception as e:
print('Search friends error!')
print(e)
finally:
return ans
def get_all_users() -> list:
users = []
try:
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
cursor.execute('''
SELECT ID
FROM users
''')
users = cursor.fetchall()
users = list(map(lambda x: x[0], users))
except Exception as e:
print(e)
finally:
conn.close()
return users
def delete_friend(user1, user2) -> dict():
'''
删除两个用户之间的好友关系
'''
newFriends = [user1, user2]
newFriends.sort()
ans = None
try:
conn = sqlite3.connect('./data/data.db')
cursor = conn.cursor()
# 先查找关系是否存在
cursor.execute('''
SELECT *
FROM friends
WHERE ID1 = ? and ID2 = ?
''', newFriends)
num = cursor.fetchall()
if len(num) != 1:
raise Exception('用户间并非好友!')
# 建立新朋友行
cursor.execute('''
DELETE
FROM friends
WHERE ID1=? and ID2=?
''', newFriends)
conn.commit()
conn.close()
ans = {'answer': 'success'}
except Exception as e:
print('Delete friends error!')
print(e)
ans = {'answer': 'fail', 'reason':str(e)}
finally:
return ans
if __name__ == "__main__":
# print(get_all_users())
# signup({
# 'ID':'5',
# 'PASSWORD':'1'
# })
# print(get_all_users())
# 交朋友测试
# print(makeFriend('1', '1'))
# print(makeFriend('1', '2'))
# print(makeFriend('1', '3'))
# print(makeFriend('1', '4'))
# 删除朋友测试
print(get_my_friends('1'))
print(delete_friend('1', '2'))
print(get_my_friends('1'))
pass
| [
"824063458@qq.com"
] | 824063458@qq.com |
e913df03c7803afd5541a37caa8edf1ec5ee78b8 | 621b856c4f181128e7f7039a6f5508f3ffddc983 | /Peoggramms/salman_h_changed_loop.py | 0ecf8caeee7434c0260eddd5b3e3497d710b096b | [] | no_license | olesyaogorodnikova/Robot_kinematics | 31853f64266f15cce05b6cfc70427819975e7442 | cbc1284540fd7bf1c01f2ef36f319d1ac898590c | refs/heads/master | 2021-01-07T14:22:15.171320 | 2020-02-19T21:50:15 | 2020-02-19T21:50:15 | 241,723,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,959 | py | #!/usr/bin/env python
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from numpy import array, matrix, cos, sin, pi, arccos, arctan2, sqrt
from numpy.linalg import inv
import time
def get_Table():
# Define variables for joint angles
theta1, theta2, theta3, theta4, theta5, theta6 = 0., 0., 0., 0., 0., 0.
# Construct DH Table with measurements from 'kr210.urdf.xacro' file
s = {'alpha0': 0, 'a0': 0, 'd1': 0.75, 'theta1': theta1,
'alpha1': -pi/2, 'a1': 0.35, 'd2': 0, 'theta2': theta2,
'alpha2': 0, 'a2': 1.25, 'd3': 0, 'theta3': theta3,
'alpha3': -pi/2, 'a3': -0.054, 'd4': 1.50, 'theta4': theta4,
'alpha4': pi/2, 'a4': 0, 'd5': 0, 'theta5': theta5,
'alpha5': -pi/2, 'a5': 0, 'd6': 0, 'theta6': theta6,
'alpha6': 0, 'a6': 0, 'd7': 0.303, 'theta7': 0}
return s
def H_Transformation(alpha, a, d, theta):
A = matrix([[ cos(theta), -sin(theta), 0, a],
[ sin(theta)*cos(alpha), cos(theta)*cos(alpha), -sin(alpha), -sin(alpha)*d],
[ sin(theta)*sin(alpha), cos(theta)*sin(alpha), cos(alpha), cos(alpha)*d],
[ 0, 0, 0, 1]])
return A
def Rotation_Rx(theta):
"""Define matrix for rotation (roll) about x axis."""
Rx = matrix([[1, 0, 0],
[0, cos(theta), -sin(theta)],
[0, sin(theta), cos(theta)]])
return Rx
def Rotation_Ry(theta):
"""Define matrix for rotation (pitch) about y axis."""
Ry = matrix([[cos(theta), 0, sin(theta)],
[ 0, 1, 0],
[-sin(theta), 0, cos(theta)]])
return Ry
def Rotation_Rz(theta):
"""Define matrix for rotation (yaw) about z axis."""
Rz = matrix([[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0],
[ 0, 0, 1]])
return Rz
def get_Gripper_pose(geometry_msg):
"""
Extract EE pose from received trajectory pose in an IK request message.
NOTE: Pose is position (cartesian coords) and orientation (euler angles)
Docs: https://github.com/ros/geometry/blob/indigo-devel/
tf/src/tf/transformations.py#L1089
"""
px = geometry_msg.position.x
py = geometry_msg.position.y
pz = geometry_msg.position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[geometry_msg.orientation.x, geometry_msg.orientation.y,
geometry_msg.orientation.z, geometry_msg.orientation.w])
gripper_pose = (px, py, pz)
orient = (roll, pitch, yaw)
return gripper_pose, orient
def get_R0_EE(gripper_pose):
"""
Compute EE Rotation matrix w.r.t base frame.
Computed from EE orientation (roll, pitch, yaw) and describes the
orientation of each axis of EE w.r.t the base frame
"""
roll, pitch, yaw = gripper_pose[1]
# Perform extrinsic (fixed-axis) sequence of rotations of EE about
# x, y, and z axes by roll, pitch, and yaw radians respectively
Rzyx = Rotation_Rz(yaw) * Rotation_Ry(pitch) * Rotation_Rx(roll)
# Align EE frames in URDF vs DH params through a sequence of
# intrinsic (body-fixed) rotations: 180 deg yaw and -90 deg pitch
R_corr = Rotation_Rz(pi) * Rotation_Ry(-pi/2)
# Account for this frame alignment error in EE pose
R0_EE = Rzyx * R_corr
return R0_EE
def get_Wc(s, R0_EE, gripper_pose):
"""
Compute Wrist Center position (cartesian coords) w.r.t base frame.
Keyword arguments:
R_ee -- EE Rotation matrix w.r.t base frame
ee_pose -- tuple of cartesian coords and euler angles describing EE
Return values:
Wc -- vector of cartesian coords of WC
"""
px, py, pz = gripper_pose[0]
# Define EE position as a vector
Pxyz = matrix([[px],
[py],
[pz]])
# Get Col3 vector from Rxyz that describes z-axis orientation of EE
# nx = R0_EE[0, 2]
# ny = R0_EE[1, 2]
# nz = R0_EE[2, 2]
# N_ee = matrix ([[nx],
# [ny],
# [nz]])
N_EE = R0_EE[:, 2]
# WC is a displacement from EE equal to a translation along
# the EE z-axis of magnitude dG w.r.t base frame (Refer to DH Table)
# wcx = px - s['d7']*nx
# wcy = py - s['d7']*ny
# wcz = pz - s['d7']*nz
Wc = Pxyz - s['d7']*N_EE
return Wc
def get_joints1_2_3(s, Wc):
"""
Calculate joint angles 1,2,3 using geometric IK method.
NOTE: Joints 1,2,3 control position of WC (joint 5)
"""
wcx, wcy, wcz = Wc[0], Wc[1], Wc[2]
# theta1 is calculated by viewing joint 1 and arm from top-down
theta1 = arctan2(wcy, wcx)
# theta2,3 are calculated using Cosine Law on a triangle with edges
# at joints 1,2 and WC viewed from side and
# forming angles A, B and C repectively
m = sqrt(wcx**2 + wcy**2)-s['a1']
w = arctan2(wcz - s['d1'], m)
b = sqrt((wcz - s['d1'])**2 + m**2)
c = sqrt(s['d4']**2 + s['a3']**2)
A = arccos((b**2 + s['a2']**2 - c**2) / (2*b*s['a2']))
B = arccos((c**2 + s['a2']**2 - b**2) / (2*c*s['a2']))
theta2 = pi/2 - A - w
theta3 = pi/2 - B - arctan2(s['a3'], s['d4'])
return theta1, theta2, theta3
def get_joints4_5_6(s, R0_EE, theta1, theta2, theta3):
"""
Calculate joint Euler angles 4,5,6 using analytical IK method.
NOTE: Joints 4,5,6 constitute the wrist and control WC orientation
"""
# Compute individual transforms between adjacent links
# T(i-1)_i = Rx(alpha(i-1)) * Dx(alpha(i-1)) * Rz(theta(i)) * Dz(d(i))
T0_1 = H_Transformation(s['alpha0'], s['a0'], s['d1'], s['theta1'])
T1_2 = H_Transformation(s['alpha1'], s['a1'], s['d2'], s['theta2'])
T2_3 = H_Transformation(s['alpha2'], s['a2'], s['d3'], s['theta3'])
T0_3 = T0_1 * T1_2 * T2_3
# Extract rotation components of joints 1,2,3 from their
# respective individual link Transforms
R0_1 = T0_1[:3, :3]
R1_2 = T1_2[:3, :3]
R2_3 = T2_3[:3, :3]
# Evaluate the composite rotation matrix fromed by composing
# these individual rotation matrices
R0_3 = R0_1 * R1_2 * R2_3
# R3_6 is the composite rotation matrix formed from an extrinsic
# x-y-z (roll-pitch-yaw) rotation sequence that orients WC
#R3_6 = T0_3[:3, :3].transpose() * R0_EE # b/c R0_6 == R_ee = R0_3*R3_6
R3_6 = inv(array(R0_3, dtype='float')) * R0_EE
r21 = R3_6[1, 0] # sin(theta5)*cos(theta6)
r22 = R3_6[1, 1] # -sin(theta5)*sin(theta6)
r13 = R3_6[0, 2] # -sin(theta5)*cos(theta4)
r23 = R3_6[1, 2] # cos(theta5)
r33 = R3_6[2, 2] # sin(theta4)*sin(theta5)
# Compute Euler angles theta 4,5,6 from R3_6 by individually
# isolating and explicitly solving each angle
angles_pre = (0,0,0,0,0,0)
if np.abs(r23) is not 1:
theta5 = arctan2(sqrt(r13**2 + r33**2), r23)
if sin(theta5) < 0:
theta4 = arctan2(-r33, r13)
theta6 = arctan2(r22, -r21)
else:
theta4 = arctan2(r33, -r13)
theta6 = arctan2(-r22, r21)
else:
theta6 = angles_pre[5]
if r23 == 1:
theta5 = 0
theta4 = -theta6 + arctan2(-r12, -r32)
else:
theta5 = 0
theta4 = q6 - arctan2(r12, -r32)
return theta4, theta5, theta6
def handle_calculate_IK(req):
"""Handle request from a CalculateIK type service."""
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print "No valid poses received"
return -1
else:
s = get_Table()
# Initialize service response consisting of a list of
# joint trajectory positions (joint angles) corresponding
# to a given gripper pose
joint_trajectory_list = []
# To store coordinates for plotting (in plot_ee() function)
#received_ee_points = []
#fk_EE_points = []
#EE_errors = []
# For each gripper pose a response of six joint angles is computed
loop_start_time = time.time()
len_poses = len(req.poses)
for x in xrange(0, len_poses):
loop_current_time = time.time()
joint_trajectory_point = JointTrajectoryPoint()
# INVERSE KINEMATICS
gripper_pose = get_Gripper_pose(req.poses[x])
#received_ee_points.append(ee_pose[0])
R0_EE = get_R0_EE(gripper_pose)
Wc = get_Wc(s, R0_EE, gripper_pose)
# Calculate angles for joints 1,2,3 and update dh table
theta1, theta2, theta3 = get_joints1_2_3(s, Wc)
s['theta1'] = theta1
s['theta2'] = theta2-pi/2 # account for 90 deg constant offset
s['theta3'] = theta3
# Calculate angles for joints 4,5,6 and update dh table
theta4, theta5, theta6 = get_joints4_5_6(s, R0_EE, theta1, theta2, theta3)
s['theta4'] = theta4
s['theta5'] = theta5
s['theta6'] = theta6
# Populate response for the IK request
joint_trajectory_point.positions = [theta1, theta2, theta3,
theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
def calculate_FK():
"""Calculate Forward Kinematics for verifying joint angles."""
# Compute individual transforms between adjacent links
# T(i-1)_i = Rx(alpha(i-1)) * Dx(alpha(i-1)) * Rz(theta(i)) * Dz(d(i))
T0_1 = H_Transformation(s['alpha0'], s['a0'], s['d1'], s['theta1'])
T1_2 = H_Transformation(s['alpha1'], s['a1'], s['d2'], s['theta2'])
T2_3 = H_Transformation(s['alpha2'], s['a2'], s['d3'], s['theta3'])
T3_4 = H_Transformation(s['alpha3'], s['a3'], s['d4'], s['theta4'])
T4_5 = H_Transformation(s['alpha4'], s['a4'], s['d5'], s['theta5'])
T5_6 = H_Transformation(s['alpha5'], s['a5'], s['d6'], s['theta6'])
T6_EE = H_Transformation(s['alpha6'], s['a6'], s['d7'], s['theta7'])
# Create overall transform between base frame and EE by
# composing the individual link transforms
T0_EE = T0_1 * T1_2 * T2_3 * T3_4 * T4_5 * T5_6 * T6_EE
fk_EE = [T0_EE[0, 3], T0_EE[1, 3], T0_EE[2, 3]]
fk_EE_points.append([(fk_EE[0].item(0)),
(fk_EE[1].item(0)),
(fk_EE[2].item(0))])
error_x = abs(fk_EE[0] - EE_pose[0][0])
error_y = abs(fk_EE[1] - EE_pose[0][1])
error_z = abs(fk_EE[2] - EE_pose[0][2])
EE_errors.append([(error_x.item(0)),
(error_y.item(0)),
(error_z.item(0))])
# NOTE: Uncomment following line to compute FK for plotting EE
#calculate_FK()
print "Total time:", round(time.time() - loop_start_time, 4)
rospy.loginfo("Number of joint trajectory points:" +
" %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
"""Initialize IK_server ROS node and declare calculate_ik service."""
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
| [
"ooleszja@gmail.com"
] | ooleszja@gmail.com |
f6bc950e15c4b64504ccaad6a8a45115c40cb4de | 447101726b535b2a12fb3c0d8336a8dd06f7dea3 | /modex/core.py | 8de5347336123cdb58e3910e439e818b1c38e5af | [] | no_license | weihaigang/CrackDict | 15503911cd73521151d5fc5a7aa2af075dbd3b3f | d752bd54cbc230e8c610a2f95beaff9247f2898a | refs/heads/master | 2023-07-31T19:01:46.000370 | 2021-09-19T00:17:13 | 2021-09-19T00:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,814 | py | # @Author: JogFeelingVi
# @Date: 2021-08-30 23:14:22
# @Last Modified by: By JogFeelingVi
# @Last Modified time: 2021-08-30 23:14:22
from . import rplan
from itertools import zip_longest, repeat, product
from functools import reduce
import re
class curls:
Archives = {
'var': 1.02,
}
Custom = []
fmu = []
Synthesis = None
nName = ''
Count = 0
Dataforplan = {'plan': 'TMDsSpPhHf'}
__act_dict = {
'cust': lambda s, l: curls.__act_cust__(s, l),
'plan': lambda s, p: curls.__act_plan__(s, p),
'out': lambda s, o: curls.__act_out__(s, o),
'list': lambda s, b: curls.__act_list__(s, b),
'dual_md': lambda s, m: curls.__act_dual_md__(s, m),
'dual_m': lambda s, m: curls.__act_dual_m__(s, m),
'dual_d': lambda s, m: curls.__act_dual_d__(s, m),
'minpw': lambda s, d: curls.__act_minpw__(s, d),
'fmu': lambda s, l: curls.__act_fmu__(s, l),
}
def __act_fmu__(self, l: list):
if l is None:
return
self.fmu = l
print(f'Save fmu {self.fmu} --fmu 442000 027')
def __act_dual_m__(self, m: bool):
if m == False:
return
tmp = [f'{int(m):02}' for m in self.Dataforplan['M']]
self.Dataforplan['M'] = tmp
def __act_dual_d__(self, m: bool):
if m == False:
return
tmp = [f'{int(m):02}' for m in self.Dataforplan['D']]
self.Dataforplan['D'] = tmp
def __act_dual_md__(self, m: bool):
if m == False:
return
self.__act_dual_m__(True)
self.__act_dual_d__(True)
def __act_minpw__(self, d: int):
self.Archives['minpw'] = d
def __act_cust__(self, l: list):
if l is None:
return
self.Custom = l
print(f'Save Custom list {self.Custom}, Use -p c!')
def __act_plan__(self, S: str):
if S is None:
return
# add [3456789] {1}
GPS = []
plan_m = re.finditer('(\[([^\[\]]*)\])|([TMDdSspPfhcH])', S)
for m in plan_m:
x, y = m.span()
if y - x > 1:
# []
#kh = m.string[x:y].replace('[', '').replace(']','')
kh = m.string[x:y][1:-1]
GPS.append([f'{x}' for x in kh])
# end
elif y - x == 1:
# TMDdSspPfhc
cl = m.string[x:y]
GPS.append([self.rPlan(cl), self.Custom][cl == 'c'])
# end
else:
return
#('0', '1', '2', '7', '7', '5', '9', '7')
self.Count = reduce(lambda x, y: x * y, [len(x) for x in GPS])
start = rplan.jionStr(*[x[0] for x in GPS])
ends = rplan.jionStr(*[x[-1] for x in GPS])
lse = {len(start), len(ends)}
minpw = self.Archives['minpw']
# mksnumber
bijiao = {True if x >= minpw else False for x in lse}
if True in bijiao:
self.Synthesis = GPS
print(f'Number of password digits {lse}')
print(f'Scope: {start}-{ends}')
self.nName = self.fname_invalid(f'{ends}')
# File name invalid
print(f'Count: {self.Count:,} Done!')
else:
print(
f'minpw seting {minpw}, [ -p {S} ] Not eligible. Refer {lse}')
def __act_out__(self, o: str):
if self.Synthesis is None:
return
else:
path = f'./{self.nName}.lst' if o is None else o
outf = rplan.pathx(path)
print(f'OutFile: {outf}')
#rplan.wfilefor(outf, curls.Synthesis)
wplan = rplan.wfileplus(outf, self.Synthesis, self.Count)
minpw = self.Archives['minpw']
if self.fmu != None and 'plus_fmu' in self.Archives.keys():
wplan.fmus(self.fmu, self.Archives['plus_fmu'])
else:
print('Plus_fmu Start Error!')
# wplan.fumc = {'fum': funcobj, 'args': [x, y, z]}
print(f'Minimum password length {wplan.minpw(minpw)}')
wplan.writeLc()
def __act_list__(self, b: bool):
if b == False:
return
plankeys = 'M,D,d,s,S,f,p,P,T'.split(',')
for key in plankeys:
value = ','.join(self.rPlan(key))
print(f'- {key} {value}')
print('- h ba,pa,ma,fa,da,tu...')
print('- H Ba,Zhang,Zhao,Yun...')
print('- c Custom list, -c xxx yyy zzz')
@staticmethod
def fname_invalid(fname:str) -> str:
if len(fname) > 255:
fname = fname[0:10]
if fname[0] in ['+', '-', '.']:
fname = fname[1:-1]
blacklist = ['/', '\t', '\b', '@', '#', '$', '%', '^', '&', '*', '(', ')', '[', ']', '?']
intersection = set(blacklist) & set(fname)
if len(intersection) != 0:
regx = '[{}]'.format(''.join(intersection))
fname = re.sub(regx, '', fname)
return fname
def rPlan(self, key: str):
if key in self.Dataforplan.keys():
return self.Dataforplan[key]
else:
return None
def InitializationPlan(self):
plan_d = rplan.plan.__members__
for k, v in plan_d.items():
if k in 'MDhH':
tmp = v.value.split(',')
else:
tmp = list(v.value)
self.Dataforplan[k] = tmp
def __init__(self, args: dict) -> None:
self.Archives = {**self.Archives, **args}
self.InitializationPlan()
print(f'Archives: {self.Archives}')
def Action(self):
Sequence = 'minpw,cust,fmu,dual_m,dual_d,dual_md,plan,out,list'.split(
',')
for Seq in Sequence:
vals = self.Archives[Seq]
self.__act_dict[Seq](self, vals) | [
"lifelse@outlook.com"
] | lifelse@outlook.com |
99a4ce09a2f6f8b5ae00d0f27e9e5310227a043c | 5290b41db07900b9ec0e818976480918031766eb | /kb_site/manage.py | 99088931b2f798a59a85ee09abb9039ff33deba0 | [] | no_license | aleluk/KnowledgeBase | 8db061bf6b0531f2414e9d8dde8c2482d20e799c | e38c82dfa8269443a24d12f31096b82052c9c026 | refs/heads/master | 2023-04-11T07:48:00.266479 | 2021-04-18T12:35:59 | 2021-04-18T12:35:59 | 352,182,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kb_site_main.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"64218199+aleluk@users.noreply.github.com"
] | 64218199+aleluk@users.noreply.github.com |
49d98b69895f2db5dd9fa22267d1e67e92e73d52 | 669196cb7444c699b9c477bd36d76082d534e08a | /tests/unit/test_user_email.py | c475eef807feb4dd45015fb7490c85ba2be6c329 | [
"MIT"
] | permissive | tilgovi/pyramid_fullauth | d51ad9fabca0ef380f6981c0f62e5c36d8484cba | 3de2f784e89c2e82104dbe36acbb85597e4fff31 | refs/heads/master | 2021-01-24T15:15:28.691347 | 2014-11-02T18:45:05 | 2014-11-02T18:45:05 | 26,466,736 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """Test email related User methods."""
from pyramid.compat import text_type
from pyramid_fullauth.models import User
NEW_EMAIL = text_type('new@example.com')
def test_set_new_email():
"""
Test User.set_new_email method.
setting new email should result in setting new_email field,
and key used to activate the change.
"""
user = User()
assert user.email_change_key is None
assert user.new_email is None
user.set_new_email(NEW_EMAIL)
assert user.new_email == NEW_EMAIL
assert user.email_change_key
def test_change_email():
"""
Test User.change_email method.
Calling it should copy new email set by set_new_email method
into regular email field.
"""
user = User()
assert not user.email
user.set_new_email(NEW_EMAIL)
user.change_email()
assert not user.email_change_key
assert user.email == NEW_EMAIL
| [
"fizyk@fizyk.net.pl"
] | fizyk@fizyk.net.pl |
00404615272f8b216b6112c31abf170d3dbb3ac4 | af200bff16d3b176c0cab30d0d71666f9511c3cd | /__init__.py | 2b7440ba08e59b803ecb9580382b5e9f80ef1c0e | [] | no_license | NKLASS/Russian-Grammar-Analyser | af222c307730bed8c97b55dd4672a6443bdead47 | e1d4ae8ceb427df412d93ca09a5e63651e4a72bc | refs/heads/master | 2023-03-17T21:54:38.729881 | 2017-03-29T14:20:59 | 2017-03-29T14:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # -*- coding: UTF-8 -*-
import sys
import json
reload(sys)
from flask import Flask, jsonify,request,render_template
from translator import analyseGrammar
sys.setdefaultencoding('utf-8')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('computer.html')
@app.route('/_add_numbers')
def add_numbers():
user_input = request.args.get('a')
list = []
dict = analyseGrammar(user_input.lstrip(' '))
list.append(dict)
return jsonify(result=list)
if __name__ == '__main__':
app.run(debug=True) | [
"bjamurray@gmail.com"
] | bjamurray@gmail.com |
476aa2a98daed909cdc94030738f19b05ded46bf | 454fc28a4db23ff10e0642bc9c67e01d95230d42 | /functions.py | 849d5ae0f362aa9dfddb6a2964e55bfc10b6502a | [] | no_license | SarsenovZ2z/fontrec_dataset | 9d739418109b98a348771c9a63dd5343f3ba1493 | 94f948bc3f942de478c1244cd1f963fa1cea66b1 | refs/heads/master | 2020-05-18T20:42:32.492312 | 2019-05-03T00:42:54 | 2019-05-03T00:42:54 | 184,628,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import re
import random
import os
def getFonts(path):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.ttf' in file:
files.append({'path': os.path.join(r, file), 'name': file.replace('.ttf', '')})
return files
def getRandomText(file):
return re.sub(r'(\n\s*)+\n+', '\n', open(file, "r").read()).splitlines()
def rand():
return random.uniform(0, 1)
def randInt(maxNum):
return random.randint(0, maxNum)
| [
"nurik9293709@gmail.com"
] | nurik9293709@gmail.com |
cd6a459ece5a08bd23ac75e022e08a981b4e98c4 | 5d09e3b32b0f7dee1147139e5e57822f33dc0f32 | /lib/authorship_simulate_citations.py | dfe00d94e2db5ca746145205494cf1700d1da662 | [] | no_license | scone-snu/pyflib2 | cb797f625100d280f6bd3b757795040ca892b1ed | bb2ad7d9974903ac8c3b01ac48b4d6ab72d2ac80 | refs/heads/master | 2020-03-31T17:37:54.216805 | 2011-05-06T04:43:31 | 2011-05-06T04:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | import os
import glob
import re
import networkx as nx
import itertools
import matplotlib.pyplot as plt
import pickle
from collections import defaultdict
from PlotFunctions import *
import random
from scipy.stats import gamma
import math
# Variables that can be modified
START_YEAR = 1997 # Year to start simulation from (i.e. start simulation from START_YEAR+1)
NEW_EDGES_PER_YEAR = 1370 # Number of new edges per year
T = 6 # Years to simulate
P = 0.4 # Probability of choosing a neighbor
Q = 0.4 # Probability of choosing at random or closing a triangle, etc.
PREFIX = "ca"
# # Simulate from the single-edge graph
# G = nx.Graph()
# G.add_edge("1","2", weight=1, years=[START_YEAR])
# Simulate from START_YEAR
G = nx.read_edgelist("../data/parsed/authorship_%d.edgelist" % START_YEAR, create_using=nx.Graph(), comments='#', delimiter='|', data=True, encoding='utf-8')
# Load year of first publication for each author
with open("../data/parsed/authorship.year", "r") as f:
first_paper = pickle.load(f)
# Load # of papers each author produces in his/her lifetime
with open("../data/parsed/authorship.count", "r") as f:
num_papers = pickle.load(f)
max_gam = max(gamma.pdf(range(1,12),3,scale=2))
def num_new_nodes(year, author):
# Constant Activity Level
if random.random() < 0.648:
return 1
else:
return 0
def num_papers_dist():
return 4
def num_citations_dist():
return 71
new_num_citations = {}
for t in range(START_YEAR+1,START_YEAR+1+T):
print "Simulating year %d..." % t
# Load # of citations
with open("../data/parsed/citations_%d.count" % t) as f:
num_citations = pickle.load(f)
num_citations.update(new_num_citations)
# Create new edges for existing nodes
print "\t for existing nodes"
for node in G.nodes_iter():
for i in range(0, num_new_nodes(t,node)):
# See if we want to form an edge and set target if we want to
rand = random.random()
target = None
if rand < P:
# Pick a node proportional to edge weight
bins = []
for nbr in G.neighbors(node):
#print node,nbr,G[node][nbr]
mult = max([num_citations[p] for p in G[node][nbr]['papers']])
#clist = [num_citations[p] for p in G[node][nbr]['papers']]
#mult = int(round(float(sum(clist)) / len(clist)))
bins += [nbr] * mult
if len(bins) == 0:
bins = G.neighbors(node)
target = random.choice(bins)
elif rand < P + Q:
# Degree-random
bins = []
for nbr in G.neighbors(node):
for nbr2 in G.neighbors(nbr):
bins += [nbr2]
target = random.choice(bins)
# Form an edge if target is set, don't form self-loops
if target:
#print "Adding edge from %s to %s" % (node,target)
new_paper = "N"+str(t)+"_"+node+"_"+target
num_citations[new_paper] = num_citations_dist()
if G.has_edge(node,target):
G[node][target]['weight'] += 1
G[node][target]['years'].append(t)
G[node][target]['papers'].append(new_paper)
elif node != target:
G.add_edge(node, target, weight=1, years=[t], papers=[new_paper])
# New node additions
print "\t for new nodes"
if len(G.nodes()) > 0:
# Generate bins for preferential attachment
bins = []
for node,degree in G.degree_iter():
bins += [node] * degree
# Add new nodes and connect them to existing nodes using preferential attachment
for i in range(0,NEW_EDGES_PER_YEAR):
new_node = "N"+str(t)+"_"+str(i)
new_paper = "N"+str(t)+"_"+new_node
new_num_citations[new_paper] = num_citations_dist()
first_paper[new_node] = t
num_papers[new_node] = num_papers_dist()
# Pick & connect to a random node
G.add_edge(random.choice(bins), new_node, weight=1, years=[t], papers=[new_paper])
nx.write_edgelist(G, "../data/simulations/%ssim_%d_%d_%f_%f.edgelist" % (PREFIX, START_YEAR, t, P, Q), comments='#', delimiter='|', data=True, encoding='utf-8')
#print G.edges()
# # Uncomment the below to visualize the graph. Might take extremely long to render!
# nx.draw_graphviz(G)
# plt.show() | [
"jccccf@gmail.com"
] | jccccf@gmail.com |
790830077d8069ae93383389d8841eccf07aeda2 | bc0f99dba2233f02e1f1b59711164bc2eb47e072 | /LOBDeepPP/LOBDeepPP_model/__LOB_models_output2D.py | 8679322c5bbb76988d2073f925c8c23fa9be2086 | [
"MIT"
] | permissive | mariussterling/LOBDeepPP_code | 29e483b70ee81f4302ea977c47a25d8ec743b2b9 | 010782f8db9a745940753f49d953361c32ee1190 | refs/heads/master | 2022-10-10T15:45:40.770829 | 2020-06-09T22:48:27 | 2020-06-09T22:48:27 | 255,342,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | from keras import layers, models
from .__activations import PReLU2
def output_model_askbid(inp, params, output_shape, interpretable, **kwargs):
if interpretable:
out = inp
else:
out_inp = layers.InputLayer(
input_shape=inp.get_shape().as_list()[1:],
name='out_inp')
out = out_inp.output
out = layers.Cropping2D(
cropping=((out.shape[1].value - 1, 0), (0, 0)),
name=f'out_cropping')(out)
out = layers.Reshape(
target_shape=[i.value for i in out.shape[2:]],
name='out_reshape')(out)
out_ask = output_model_b(
out, params, output_shape[0],
interpretable=kwargs.get('interpretable_nested', True),
name='ask')
out_bid = output_model_b(
out, params, output_shape[0],
interpretable=kwargs.get('interpretable_nested', True),
name='bid')
out = layers.concatenate([out_ask, out_bid], name='out_concatenate')
if interpretable:
return out
else:
return models.Model(inputs=out_inp.input, outputs=out, name='out')(inp)
def output_model_b(inp, params, output_shape, interpretable, name=''):
# h = params.get('output').get('h', output_shape)
if interpretable:
out = inp
else:
out_inp = layers.InputLayer(
input_shape=inp.get_shape().as_list()[1:],
name=f'out_{name}_inp')
out = out_inp.output
filters = params['output'].get('filters', None)
for i, f in enumerate(filters):
out = layers.Dense(f, name=f'out_{name}_dense{i}')(out)
out = PReLU2(name=f'out_{name}_dense{i}_relu')(out)
out = layers.BatchNormalization(name=f'out_{name}_dense{i}_bn')(out)
out = layers.Flatten(name=f'out_{name}_flatten')(out)
out_p = layers.Dense(
output_shape,
name=f'out_{name}_out_pos')(out)
out_p = PReLU2(name=f'out_{name}_out_pos_relu')(out_p)
out_n = layers.Lambda(
lambda x: x * -1,
name=f'out_{name}_out_neg0')(out)
out_n = layers.Dense(
output_shape,
# activation='relu',
name=f'out_{name}_out_neg')(out_n)
out_n = PReLU2(name=f'out_{name}_out_neg_relu')(out_n)
out = layers.Subtract(name=f'out_{name}_out')([out_p, out_n])
out = layers.Reshape(
target_shape=out.get_shape().as_list()[1:] + [1],
name=f'out_{name}_reshape')(out)
if interpretable:
return out
else:
return models.Model(
inputs=out_inp.input,
outputs=out,
name=f'out_{name}'
)(inp)
| [
"marius.sterling@hu-berlin.de"
] | marius.sterling@hu-berlin.de |
775bc8ad2440dec3fa0750bcca10332e6a975a4f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/16a4c177de3f63055c5f0252c3f8ba202175fb41-<start_merge>-bug.py | 488cafe673b3ea8201fc11c222ab29d021e87ebf | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | def start_merge(self, project_id, previous_group_ids, new_group_id):
if (not previous_group_ids):
return
state = {
'transaction_id': uuid4().hex,
'project_id': project_id,
'previous_group_ids': previous_group_ids,
'new_group_id': new_group_id,
'datetime': datetime.now(tz=pytz.utc),
}
self._send(project_id, 'merge', extra_data=(state,), asynchronous=False) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
75ed8c814760c96bc4cb333a81523c02f6fce8d5 | 52a4d282f6ecaf3e68d798798099d2286a9daa4f | /test_sa.py | 81104dd1d3c6c5b477f238e92d7d1b4e9c05347a | [
"MIT"
] | permissive | bkovitz/FARGish | f0d1c05f5caf9901f520c8665d35780502b67dcc | 3dbf99d44a6e43ae4d9bba32272e0d618ee4aa21 | refs/heads/master | 2023-07-10T15:20:57.479172 | 2023-06-25T19:06:33 | 2023-06-25T19:06:33 | 124,162,924 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,332 | py | # test_sa.py -- Test of spreading activation
import unittest
from pprint import pprint as pp
import inspect
from time import process_time
from dataclasses import dataclass
import operator
from operator import itemgetter
from heapq import nlargest
from typing import Union, List, Tuple, Dict, Set, FrozenSet, Iterable, Any, \
NewType, Type, ClassVar, Sequence, Callable, Hashable
from itertools import chain
import networkx as nx
from Propagator import Propagator, Delta
NodeId = NewType('NodeId', int)
@dataclass
class MyProp(Propagator):
noise: float = 0.0
def make_deltas(self, g, old_d):
#print() #DEBUG
return chain.from_iterable(
self.deltas_from(g, old_d, nodeid)
for nodeid in old_d
)
def deltas_from(self, g, old_d, nodeid) \
-> List[Delta]:
'''Deltas from nodeid to its neighbors.'''
result: List[Delta] = []
nodeid_a = old_d.get(nodeid, 0.0)
for neighborid, edge_d in g.adj[nodeid].items():
weight = edge_d.get('weight', 1.0)
delta = Delta(
neighborid,
weight * nodeid_a,
nodeid
)
result.append(delta)
return result
def min_value(self, g, nodeid):
return 0.0
class Node:
nodeid: NodeId
@dataclass(frozen=True)
class Operator:
func: Callable
name: str
def call(self, *operands: int) -> int:
return self.func(*operands)
def __str__(self):
return self.name
plus = Operator(operator.add, '+')
times = Operator(operator.mul, 'x')
minus = Operator(operator.sub, '-')
@dataclass(frozen=True)
class Before:
'''A feature meaning that .obj was present before the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'Before({self.obj})'
@dataclass(frozen=True)
class After:
'''A feature meaning that .obj was present after the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'After({self.obj})'
@dataclass(frozen=True)
class Equation(Node):
operands: Tuple[int]
operator: Operator
result: int
def features(self) -> Iterable[Hashable]:
for operand in self.operands:
yield operand
yield Before(operand)
yield self.operator
yield self.result
yield After(self.result)
#return set(self.operands + (self.operator, self.result, Before
def __str__(self):
expr = f' {self.operator} '.join(str(n) for n in self.operands)
return f'{expr} = {self.result}'
class TestSA(unittest.TestCase):
def test_sa(self):
p = MyProp(positive_feedback_rate=0.0)
self.assertEqual(p.noise, 0.0)
g = nx.Graph() # undirected graph
g.add_edge(1, 2, weight=1.0)
g.add_edge(1, 3, weight=1.3)
g.add_node(4)
#print(g.edges[1, 2]['weight'])
#for neighbor in g.adj[1].items():
#print(neighbor)
# Let's give all nodes activation=1.0.
initial_a_dict = dict((nodeid, 1.0) for nodeid in g.nodes)
# Propagate
got: Dict[NodeId, float] = p.propagate(g, initial_a_dict)
self.assertEqual(got, {1: 1.026, 2: 1.0, 3: 1.006, 4: 0.98})
def test_eqns(self):
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=10):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
# Make slipnet: a bipartite graph of Equations and features
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
tups = query(g, [4, 5, Before(4), Before(5)], k=3)
self.assertCountEqual(
['5 + 4 = 9', '5 x 4 = 20', '5 - 4 = 1'],
[str(eqn) for (eqn, a) in tups]
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ion()
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=4):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
#e1 = Equation((2, 3), plus, plus.call(2, 3))
#print(e1)
# g.add_node(e1)
# for f in e1.features():
# g.add_edge(f, e1, weight=1.0)
# a0 = dict((f, 1.0) for f in [4, 5, Before(4), Before(5)])
# #a0 = dict((f, 1.0) for f in [7, 6, Before(7), Before(6)])
# see(a0)
# print()
#
# start = process_time()
# a1 = p.propagate(g, a0, num_iterations=10)
# end = process_time()
# print(end - start)
# #see(a1)
# print(sum(a1.values()))
es = query(g, [4, 5, Before(4), Before(5)])
pp(es)
#nx.draw(g, with_labels=True, pos=nx.bipartite_layout(g, [n for n in g.nodes if isinstance(n, Equation)]))
#plt.show()
| [
"bkovitz@indiana.edu"
] | bkovitz@indiana.edu |
0d6f563bf487e50143491c9294e56c9e298e24ec | a7596165a29e5186bc6c4718e3b6e835939b105d | /apps/pig/src/pig/views.py | 47823c4bb576f890292573687f7d79887416ac0b | [
"Apache-2.0"
] | permissive | lockhart39/HueQualityAndIngestionApp | f0c778665f0fbe699ec30e0df5e9f3ed8a9c3384 | c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c | refs/heads/master | 2021-08-20T00:31:29.481333 | 2017-11-27T19:22:16 | 2017-11-27T19:22:16 | 112,237,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,542 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.models import Document
from oozie.views.dashboard import show_oozie_error, check_job_access_permission,\
check_job_edition_permission
from pig import api
from pig.management.commands import pig_setup
from pig.models import get_workflow_output, hdfs_link, PigScript,\
create_or_update_script, get_scripts
LOG = logging.getLogger(__name__)
@ensure_csrf_cookie
def app(request):
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={}) + '/'
except:
LOG.exception('failed to find autocomplete base url')
return render('app.mako', request, {
'autocomplete_base_url': autocomplete_base_url,
})
def scripts(request):
return JsonResponse(get_scripts(request.user, is_design=True), safe=False)
@show_oozie_error
def dashboard(request):
pig_api = api.get(request.fs, request.jt, request.user)
jobs = pig_api.get_jobs()
hue_jobs = Document.objects.available(PigScript, request.user, with_history=True)
massaged_jobs = pig_api.massaged_jobs_for_json(request, jobs, hue_jobs)
return JsonResponse(massaged_jobs, safe=False)
def save(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
}
pig_script = create_or_update_script(**attrs)
pig_script.is_design = True
pig_script.save()
response = {
'id': pig_script.id,
'docId': pig_script.doc.get().id
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def stop(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
job_id = pig_script.dict['job_id']
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
api.get(request.fs, request.jt, request.user).stop(job_id)
except RestException, e:
raise PopupException(_("Error stopping Pig script.") % e.message)
return watch(request, job_id)
@show_oozie_error
def run(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
'is_design': False
}
pig_script = create_or_update_script(**attrs)
params = request.POST.get('submissionVariables')
oozie_id = api.get(request.fs, request.jt, request.user).submit(pig_script, params)
pig_script.update_from_dict({'job_id': oozie_id})
pig_script.save()
response = {
'id': pig_script.id,
'watchUrl': reverse('pig:watch', kwargs={'job_id': oozie_id}) + '?format=python'
}
return JsonResponse(response, content_type="text/plain")
def copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
doc = pig_script.doc.get()
try:
doc.can_read_or_exception(request.user)
except Exception, e:
raise PopupException(e)
existing_script_data = pig_script.dict
owner = request.user
name = existing_script_data["name"] + _(' (Copy)')
script = existing_script_data["script"]
parameters = existing_script_data["parameters"]
resources = existing_script_data["resources"]
hadoopProperties = existing_script_data["hadoopProperties"]
script_copy = PigScript.objects.create(owner=owner)
script_copy.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
})
script_copy.save()
copy_doc = doc.copy(content_object=script_copy, name=name, owner=owner)
response = {
'id': script_copy.id,
'docId': copy_doc.id,
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
}
return JsonResponse(response, content_type="text/plain")
def delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
ids = request.POST.get('ids').split(",")
for script_id in ids:
try:
pig_script = PigScript.objects.get(id=script_id)
pig_script.can_edit_or_exception(request.user)
pig_script.doc.all().delete()
pig_script.delete()
except:
LOG.exception('failed to delete pig script')
None
response = {
'ids': ids,
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def watch(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
logs, workflow_actions, is_really_done = api.get(request.fs, request.jt, request.user).get_log(request, oozie_workflow)
output = get_workflow_output(oozie_workflow, request.fs)
workflow = {
'job_id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(),
'isRunning': oozie_workflow.is_running(),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id': oozie_workflow.id, 'action': 'kill'}),
'rerunUrl': reverse('oozie:rerun_oozie_job', kwargs={'job_id': oozie_workflow.id, 'app_path': oozie_workflow.appPath}),
'actions': workflow_actions
}
response = {
'workflow': workflow,
'logs': logs,
'isReallyDone': is_really_done,
'output': hdfs_link(output)
}
return JsonResponse(response, content_type="text/plain")
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
pig_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
| [
"cloudera@quickstart.cloudera"
] | cloudera@quickstart.cloudera |
05743f19fd6a54dc73ab2663b6a43d967e3953e5 | caf39133030e9e9d9240769fbfe72287009c6b51 | /supervised_learning/0x00-binary_classification/11-neural_network.py | 21048d014bf2a955602c5ea89d0fdecd77c0e585 | [] | no_license | sazad44/holbertonschool-machine_learning | d08facbc24582ebcedf9a8607c82b18909fe7867 | b92e89b980a8f1360a24f4ed5654a2ab0dfac679 | refs/heads/master | 2022-11-30T22:32:21.264942 | 2020-08-12T05:25:06 | 2020-08-12T05:25:06 | 280,286,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | #!/usr/bin/env python3
"""Class definitions for neural network with 1 hidden layer"""
import numpy as np
class NeuralNetwork():
"""Neural network class definition"""
def __init__(self, nx, nodes):
"""initialization func for class"""
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
elif nx < 1:
raise ValueError("nx must be a positive integer")
if not isinstance(nodes, int):
raise TypeError("nodes must be an integer")
elif nodes < 1:
raise ValueError("nodes must be a positive integer")
self.__W1 = np.random.randn(nodes, nx)
self.__b1 = np.array([float(0)] * nodes).reshape(nodes, 1)
self.__A1 = 0
self.__W2 = np.random.randn(1, nodes)
self.__b2 = 0
self.__A2 = 0
@property
def W1(self):
"""getter func for W1"""
return self.__W1
@property
def b1(self):
"""getter func for b1"""
return self.__b1
@property
def A1(self):
"""getter func for A1"""
return self.__A1
@property
def W2(self):
"""getter func for W2"""
return self.__W2
@property
def b2(self):
"""getter func for b2"""
return self.__b2
@property
def A2(self):
"""getter func for A2"""
return self.__A2
def forward_prop(self, X):
"""calculates forward propagation of neural network"""
matmul = np.matmul(self.__W1, X)
actVals = 1 / (1 + np.exp(-(matmul + self.__b1)))
self.__A1 = actVals
matmul = np.matmul(self.__W2, self.__A1)
actVals = 1 / (1 + np.exp(-(matmul + self.__b2)))
self.__A2 = actVals
return self.__A1, self.__A2
def cost(self, Y, A):
"""calculates cost of the model using logistic regression"""
costMat = -(Y * (np.log(A)) + (1 - Y) * np.log(1.0000001 - A))
costVal = np.sum(costMat) / len(costMat[0])
return costVal
| [
"36613205+sazad44@users.noreply.github.com"
] | 36613205+sazad44@users.noreply.github.com |
946ffb36b8439369b9bb56e4d75d22cf7dc120d2 | 317f0a8f92043a04a1ec1986603c77c5844d7314 | /Default/install_package_control.py | 2d10cd1b517c4435d3e33e2343bb96ede23c25af | [
"LicenseRef-scancode-boost-original"
] | permissive | sharunkumar/Packages | f658c0a1bbe505a697fc62cbd580950ef388a6bc | bae297c3f03921c8aa2e0adb0ce2a40ee8d33330 | refs/heads/master | 2020-04-06T09:51:16.493005 | 2019-05-09T09:39:20 | 2019-05-09T09:39:20 | 157,359,405 | 0 | 0 | NOASSERTION | 2018-11-13T10:06:30 | 2018-11-13T10:06:30 | null | UTF-8 | Python | false | false | 5,524 | py | import base64
import binascii
import os
import threading
from urllib.error import URLError
from urllib.request import build_opener, install_opener, ProxyHandler, urlopen
import sublime
import sublime_api
import sublime_plugin
class InstallPackageControlCommand(sublime_plugin.ApplicationCommand):
error_prefix = 'Error installing Package Control: '
filename = 'Package Control.sublime-package'
public_key = (
'MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEkiE2JtDn/IQDfVLso4HRg0BNMHNj'
'5rpuEIVaX6txyFS0HoBmCgd+9AXKcgKAsBKbEBD6a9nVzLLmJrDVFafepQ==')
def run(self):
threading.Thread(target=self._install).start()
def is_visible(self):
ipp_path = os.path.join(sublime.installed_packages_path(), self.filename)
p_path = os.path.join(sublime.packages_path(), self.filename.replace('.sublime-package', ''))
return not os.path.exists(ipp_path) and not os.path.exists(p_path)
def _install(self):
"""
RUNS IN A THREAD
Downloads and then installs Package Control, alerting the user to
the result
"""
try:
package_data = self._download()
if package_data is None:
sublime.set_timeout(self._show_error, 10)
return
dest = os.path.join(sublime.installed_packages_path(), self.filename)
with open(dest, 'wb') as f:
f.write(package_data)
sublime.set_timeout(self._show_success, 10)
except (Exception) as e:
print(self.error_prefix + str(e))
sublime.set_timeout(self._show_error, 10)
def _show_success(self):
"""
RUNS IN THE MAIN THREAD
"""
sublime.message_dialog(
"Package Control was successfully installed\n\n"
"Use the Command Palette and type \"Install Package\" to get started")
def _show_error(self):
"""
RUNS IN THE MAIN THREAD
"""
sublime.error_message(
"An error occurred installing Package Control\n\n"
"Please check the Console for details\n\n"
"Visit https://packagecontrol.io/installation for manual instructions")
def _download(self):
"""
RUNS IN A THREAD
Attempts to download over TLS first, falling back to HTTP in case a
user's proxy configuration doesn't work with TLS by default.
Although a secure connection is made, Python 3.3 does not check the
connection hostname against the certificate, so a TLS connection
really only provides privacy. To ensure that the package has not been
modified, we check a public-key signature of the file.
:return:
None or a byte string of the verified package file
"""
host_path = 'packagecontrol.io/' + self.filename.replace(' ', '%20')
# Don't be fooled by the TLS URL, Python 3.3 does not verify hostnames
secure_url = 'https://' + host_path
insecure_url = 'http://' + host_path
secure_sig_url = secure_url + '.sig'
insecure_sig_url = insecure_url + '.sig'
install_opener(build_opener(ProxyHandler()))
try:
package_data = urlopen(secure_url).read()
sig_data = urlopen(secure_sig_url).read()
except (URLError) as e:
print('%sHTTPS error encountered, falling back to HTTP - %s' % (self.error_prefix, str(e)))
try:
package_data = urlopen(insecure_url).read()
sig_data = urlopen(insecure_sig_url).read()
except (URLError) as e2:
print('%sHTTP error encountered, giving up - %s' % (self.error_prefix, str(e2)))
return None
return self._verify(package_data, sig_data)
def _verify(self, package_data, sig_data):
"""
RUNS IN A THREAD
Verifies the package is authentic
:param package_data:
A byte string of the .sublime-package data
:param sig_data:
A byte string of the .sig data
:return:
None if invalid, byte string of package file otherwise
"""
try:
armored_sig = sig_data.decode('ascii').strip()
except (UnicodeDecodeError):
print(self.error_prefix + 'invalid signature ASCII encoding')
return None
begin = '-----BEGIN PACKAGE CONTROL SIGNATURE-----'
end = '-----END PACKAGE CONTROL SIGNATURE-----'
pem_error = self.error_prefix + 'invalid signature PEM armor'
b64_sig = ''
in_body = None
for line in armored_sig.splitlines():
if not in_body:
if line != begin:
print(pem_error)
return None
in_body = True
else:
if line.startswith('-----'):
if line != end:
print(pem_error)
return None
break
b64_sig += line
try:
sig = base64.b64decode(b64_sig)
except (binascii.Error):
print(self.error_prefix + 'invalid signature base64 decoding')
return None
public_key = base64.b64decode(self.public_key)
if not sublime_api.verify_pc_signature(package_data, sig, public_key):
print(self.error_prefix + 'signature could not be verified')
return None
return package_data
| [
"sharunkumar.ks@gofrugal.com"
] | sharunkumar.ks@gofrugal.com |
c1e9f92e53090868a41830a7785c711adfab01bc | d9f63d87a9f7b19d5ee60c5f38e9007687df4078 | /面向对象-类和对象4.py | 6b8af3e544ed5021e3843f440b94064de10669be | [] | no_license | zhouf1234/untitled3 | 4b156046f0fea2c773785cba0486621625004786 | 238c5aaef121f3d716c96290e7e417a9a4a03b4e | refs/heads/master | 2020-05-05T02:36:07.396459 | 2019-04-05T08:27:31 | 2019-04-05T08:27:31 | 179,643,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Person:
school='阳光幼儿园'
def __init__(self):
self.name='丹丹'
p1=Person()
# 使用对象的方法(属性)时,先看有无此属性,如果没有再看类有无此属性
print(p1.school) #阳光幼儿园
# 给对象改school属性后
Person.school='夏天小学'
print(p1.school) #夏天小学
print()
p2=Person()
print(p2.school) #夏天小学 | [
"="
] | = |
bf0840495fc063b35d948fe9b69befd937bd7de7 | d60acaac9e460c5693efe61449667b3c399c53c8 | /algebra/linear/fishercriterion.py | 1c1c14ab2e5666bf05a05221df9b5c7bd15195f6 | [] | no_license | HussainAther/mathematics | 53ea7fb2470c88d674faa924405786ba3b860705 | 6849cc891bbb9ac69cb20dfb13fe6bb5bd77d8c5 | refs/heads/master | 2021-07-22T00:07:53.940786 | 2020-05-07T03:11:17 | 2020-05-07T03:11:17 | 157,749,226 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | import numpy as np
"""
We can use dimensionality reduction for linear classification models.
One-dimensional input vector x projected down to one dimension using
y = w^T x
We consider a two-class problem with N1 points of class C1 and N2 points of class C2
so the mean vectors of the two classes aare given by:
m1 = (1/N1) * summation of x_n over class C1 and m2 = (1/N2) times summation of x_n over class C2
Separation of the projected class means lets us choose w (the plane onto which we project)
m2 - m1 = w^T (m2-m1)
such that mk = w^T mk .
Fisher criterion is defined as the ratio of the between-class variance to the
within-class variance given by:
J(w) = (m2-m1)^2 / (s1^2 + s2^2)
in which sk^2 for some k is given by the summation of (yn - mk)^2
for one-dimensional space y
"""
def fisher_criterion(v1, v2):
return abs(np.mean(v1) - np.mean(v2)) / (np.var(v1) + np.var(v2))
| [
"shussainather@gmail.com"
] | shussainather@gmail.com |
940976f32b9a4bc97574ca4635af3b4154fe20cd | 2e8f6b40cdd1c8d89b5345ab00ea467310eeb90b | /generate/select_tables.py | 12f4600132a6e33c9b1bd0cc843c417db2b24a78 | [
"MIT"
] | permissive | samirgadkari/companies | d028deb88ee6ab46391d5c6d52c455a2846e87cd | f683a3d077ec3d9b7241e9c91e6393b290f80b2e | refs/heads/master | 2021-06-25T01:59:22.878337 | 2021-03-10T02:32:32 | 2021-03-10T02:32:32 | 212,151,854 | 0 | 0 | MIT | 2021-01-14T20:47:04 | 2019-10-01T17:00:49 | Jupyter Notebook | UTF-8 | Python | false | false | 16,986 | py | import os
from utils.file import copy_file
from utils.environ import html_samples_dir
selected_tables = ['file:///Volumes/datadrive/tables-extracted_split-tables/0000036146_TRUSTMARK_CORP/10-k/2018-01-01_2018-12-31_10-K/107.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000036146_TRUSTMARK_CORP/10-k/2018-01-01_2018-12-31_10-K/137.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000036146_TRUSTMARK_CORP/10-k/2013-01-01_2013-12-31_10-K/111.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000036146_TRUSTMARK_CORP/10-k/2013-01-01_2013-12-31_10-K/14.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000064782_MELLON_FINANCIAL_CORP/10-k/2006-01-01_2006-12-31_10-K/23.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000064782_MELLON_FINANCIAL_CORP/10-k/2006-01-01_2006-12-31_10-K/83.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000064782_MELLON_FINANCIAL_CORP/10-k/2006-01-01_2006-12-31_10-K/24.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000090498_SIMMONS_FIRST_NATIONAL_CORP/10-k/2011-01-01_2011-12-31_10-K/109.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000090498_SIMMONS_FIRST_NATIONAL_CORP/10-k/2011-01-01_2011-12-31_10-K/33.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000090498_SIMMONS_FIRST_NATIONAL_CORP/10-k/2016-01-01_2016-12-31_10-K/12.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000090498_SIMMONS_FIRST_NATIONAL_CORP/10-k/2016-01-01_2016-12-31_10-K/152.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000039263_CULLEN_FROST_BANKERS,_INC./10-k/2016-01-01_2016-12-31_10-K/152.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000039263_CULLEN_FROST_BANKERS,_INC./10-k/2016-01-01_2016-12-31_10-K/147.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001475841_National_Bank_Holdings_Corp/10-k/2015-01-01_2015-12-31_10-K/157.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001475841_National_Bank_Holdings_Corp/10-k/2015-01-01_2015-12-31_10-K/15.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001606363_Green_Bancorp,_Inc./10-k/2016-01-01_2016-12-31_10-K/256.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001606363_Green_Bancorp,_Inc./10-k/2016-01-01_2016-12-31_10-K/247.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001606363_Green_Bancorp,_Inc./10-k/2016-01-01_2016-12-31_10-K/99.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001473844_CBTX,_Inc./10-k/2018-01-01_2018-12-31_10-K/111.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001473844_CBTX,_Inc./10-k/2018-01-01_2018-12-31_10-K/110.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001319327_Encore_Bancshares_Inc/10-k/2010-01-01_2010-12-31_10-K/48.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001319327_Encore_Bancshares_Inc/10-k/2010-01-01_2010-12-31_10-K/39.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001393534_Belvedere_SoCal/10-k/2008-01-01_2008-12-31_10-K/50.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001393534_Belvedere_SoCal/10-k/2008-01-01_2008-12-31_10-K/44.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001172102_FIRST_RELIANCE_BANCSHARES_INC/10-k/2012-01-01_2012-12-31_10-K/22.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001172102_FIRST_RELIANCE_BANCSHARES_INC/10-k/2012-01-01_2012-12-31_10-K/100.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001172102_FIRST_RELIANCE_BANCSHARES_INC/10-k/2012-01-01_2012-12-31_10-K/74.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001172102_FIRST_RELIANCE_BANCSHARES_INC/10-k/2012-01-01_2012-12-31_10-K/99.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001279756_SOUTHCREST_FINANCIAL_GROUP_INC/10-k/2007-01-01_2007-12-31_10-K/54.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001279756_SOUTHCREST_FINANCIAL_GROUP_INC/10-k/2007-01-01_2007-12-31_10-K/46.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001279756_SOUTHCREST_FINANCIAL_GROUP_INC/10-k/2007-01-01_2007-12-31_10-K/67.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001145547_GREER_BANCSHARES_INC/10-k/2009-01-01_2009-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001145547_GREER_BANCSHARES_INC/10-k/2009-01-01_2009-12-31_10-K/14.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001145547_GREER_BANCSHARES_INC/10-k/2009-01-01_2009-12-31_10-K/8.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001145547_GREER_BANCSHARES_INC/10-k/2003-01-01_2003-12-31_10-K/18.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001118237_MBT_FINANCIAL_CORP/10-k/2008-01-01_2008-12-31_10-K/4.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001118237_MBT_FINANCIAL_CORP/10-k/2013-01-01_2013-12-31_10-K/107.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001118237_MBT_FINANCIAL_CORP/10-k/2018-01-01_2018-12-31_10-K/110.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001118237_MBT_FINANCIAL_CORP/10-k/2018-01-01_2018-12-31_10-K-A/5.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098146_PATRIOT_NATIONAL_BANCORP_INC/10-k/2010-01-01_2010-12-31_10-K-A/2.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098146_PATRIOT_NATIONAL_BANCORP_INC/10-k/2012-01-01_2012-12-31_10-K/114.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098146_PATRIOT_NATIONAL_BANCORP_INC/10-k/2016-01-01_2016-12-31_10-K/128.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001102266_CenterState_Bank_Corp/10-k/2009-01-01_2009-12-31_10-K/10.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001102266_CenterState_Bank_Corp/10-k/2009-01-01_2009-12-31_10-K/23.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001102266_CenterState_Bank_Corp/10-k/2017-01-01_2017-12-31_10-K/176.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001102266_CenterState_Bank_Corp/10-k/2017-01-01_2017-12-31_10-K/186.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001099932_CENTRA_FINANCIAL_HOLDINGS_INC/10-k/2008-01-01_2008-12-31_10-K/19.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001099932_CENTRA_FINANCIAL_HOLDINGS_INC/10-k/2008-01-01_2008-12-31_10-K/26.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001094742_MAINSTREET_BANKSHARES_INC/10-k/2013-01-01_2013-12-31_10-K/109.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001094742_MAINSTREET_BANKSHARES_INC/10-k/2013-01-01_2013-12-31_10-K/136.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098151_FIDELITY_D_&_D_BANCORP_INC/10-k/2018-01-01_2018-12-31_10-K/104.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098151_FIDELITY_D_&_D_BANCORP_INC/10-k/2018-01-01_2018-12-31_10-K/159.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001098151_FIDELITY_D_&_D_BANCORP_INC/10-k/2018-01-01_2018-12-31_10-K/180.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001084717_PACIFIC_CONTINENTAL_CORP/10-k/2008-01-01_2008-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001084717_PACIFIC_CONTINENTAL_CORP/10-k/2008-01-01_2008-12-31_10-K/51.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001084717_PACIFIC_CONTINENTAL_CORP/10-k/2008-01-01_2008-12-31_10-K/38.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001060455_KILLBUCK_BANCSHARES_INC/10-k/2007-01-01_2007-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001060455_KILLBUCK_BANCSHARES_INC/10-k/2011-01-01_2011-12-31_10-K/52.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001058867_GUARANTY_BANCSHARES_INC__TX_/10-k/2003-01-01_2003-12-31_10-K/21.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001058867_GUARANTY_BANCSHARES_INC__TX_/10-k/2003-01-01_2003-12-31_10-K/42.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001058867_GUARANTY_BANCSHARES_INC__TX_/10-k/2003-01-01_2003-12-31_10-K/9.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001038773_SMARTFINANCIAL_INC./10-k/2010-01-01_2010-12-31_10-K/16.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001038773_SMARTFINANCIAL_INC./10-k/2018-01-01_2018-12-31_10-K/101.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001037652_PELICAN_FINANCIAL_INC/10-k/2004-01-01_2004-12-31_10-K/45.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001035713_PROVIDIAN_FINANCIAL_CORP/10-k/2004-01-01_2004-12-31_10-K/24.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0001035713_PROVIDIAN_FINANCIAL_CORP/10-k/2004-01-01_2004-12-31_10-K/10.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000947559_FIRST_BANCSHARES_INC__MS_/10-k/2018-01-01_2018-12-31_10-K/10.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000947559_FIRST_BANCSHARES_INC__MS_/10-k/2018-01-01_2018-12-31_10-K/219.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000947559_FIRST_BANCSHARES_INC__MS_/10-k/2018-01-01_2018-12-31_10-K/215.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000947559_FIRST_BANCSHARES_INC__MS_/10-k/2013-01-01_2013-12-31_10-K/108.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000893467_NORTHWEST_BANCORPORATION_INC/10-k/2012-01-01_2012-12-31_10-K/100.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000875357_BOK_FINANCIAL_CORP_ET_AL/10-k/2014-01-01_2014-12-31_10-K/107.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000875357_BOK_FINANCIAL_CORP_ET_AL/10-k/2014-01-01_2014-12-31_10-K/125.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000875357_BOK_FINANCIAL_CORP_ET_AL/10-k/2018-01-01_2018-12-31_10-K/109.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2010-01-01_2010-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2016-01-01_2016-12-31_10-K-A/9.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2003-01-01_2003-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2003-01-01_2003-12-31_10-K/26.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2006-01-01_2006-12-31_10-K/20.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000914138_MIDDLEBURG_FINANCIAL_CORP/10-k/2006-01-01_2006-12-31_10-K/47.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000856223_SUMMIT_FINANCIAL_CORP/10-k/2004-01-01_2004-12-31_10-K/17.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000856223_SUMMIT_FINANCIAL_CORP/10-k/2004-01-01_2004-12-31_10-K/51.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000856223_SUMMIT_FINANCIAL_CORP/10-k/2004-01-01_2004-12-31_10-K/41.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000835012_COMMONWEALTH_BANKSHARES_INC/10-k/2010-01-01_2010-12-31_10-K/19.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000835012_COMMONWEALTH_BANKSHARES_INC/10-k/2010-01-01_2010-12-31_10-K/24.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000835012_COMMONWEALTH_BANKSHARES_INC/10-k/2005-01-01_2005-12-31_10-K/28.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000846617_BRIDGE_BANCORP,_INC./10-k/2018-01-01_2018-12-31_10-K/109.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000846617_BRIDGE_BANCORP,_INC./10-k/2018-01-01_2018-12-31_10-K/107.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000846617_BRIDGE_BANCORP,_INC./10-k/2004-01-01_2004-12-31_10-K/17.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000840256_VINEYARD_NATIONAL_BANCORP/10-k/2005-01-01_2005-12-31_10-K/61.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000803112_SECOND_BANCORP_INC/10-k/2002-01-01_2002-12-31_10-K/23.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000797838_NEFFS_BANCORP_INC/10-k/2010-01-01_2010-12-31_10-K-A/4.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000797838_NEFFS_BANCORP_INC/10-k/2008-01-01_2008-12-31_10-K/13.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000750686_CAMDEN_NATIONAL_CORP/10-k/2010-01-01_2010-12-31_10-K/17.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000750686_CAMDEN_NATIONAL_CORP/10-k/2010-01-01_2010-12-31_10-K/33.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000750686_CAMDEN_NATIONAL_CORP/10-k/2017-01-01_2017-12-31_10-K/106.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000759458_CANANDAIGUA_NATIONAL_CORP/10-k/2003-01-01_2003-12-31_10-K/13.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000759458_CANANDAIGUA_NATIONAL_CORP/10-k/2012-01-01_2012-12-31_10-K/59.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000759458_CANANDAIGUA_NATIONAL_CORP/10-k/2012-01-01_2012-12-31_10-K/71.table-extracted',
'file:///Volumes/datadrive/tables-extracted_split-tables/0000740971_OLD_POINT_FINANCIAL_CORP/10-k/2010-01-01_2010-12-31_10-K/10.table-extracted']
def select_tables():
global selected_tables
def remove_prefix(filename):
return filename[len('file://'):]
selected_tables = map(remove_prefix, selected_tables)
for filename in selected_tables:
parts = filename.split(os.sep)
dest_filename = os.path.join(html_samples_dir(),
'html_input',
'__'.join(parts[4:]))
copy_file(filename, dest_filename)
| [
"samir.gadkari@gmail.com"
] | samir.gadkari@gmail.com |
8af064ef0d7490610f6c59dfd4002054ce1eda91 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27094.py | ca9e4b98345e5ed3db4156dcb2812fcc628ce499 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # Save full text of a tweet with tweepy
retweeted_status
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
d86ee1b4566a0e1a14a1ae48450497bf4829a0b9 | bc06161fc2a7ac28931042a9e503a276c003870c | /mysite/vacancies/management/commands/_utils.py | f64f9e7411333ccda3e65785f282f3ba08111b11 | [] | no_license | ilineserg/django_indeed | ac324ee886509b10b119f528ab0c1c0ed809ac25 | 911380593b0068bbe6fd7ac33d8086f180557f4d | refs/heads/master | 2021-02-10T03:31:56.939497 | 2020-04-02T08:34:46 | 2020-04-02T08:34:46 | 244,348,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import hashlib
import urllib.parse as url_parser
from enum import Enum
class Colors(Enum):
"""
Available colors.
"""
RED = '\x1b[91m'
GREEN = '\x1b[32m'
END = '\x1b[0m'
def colorize(text: str, color: Colors) -> str:
"""
Colorizing a text string.
"""
return f"{color.value}{text}{Colors.END.value}"
def normalize_url(base, url):
return url_parser.urljoin(base, url)
def url_to_md5(url):
return hashlib.md5(url.encode()).hexdigest()
def debug_log(message):
print(message)
# with open('debug.log', 'a+') as _log_file:
# _log_file.write(f"{message}\n")
def error_log(message):
print(message)
# with open('error.log', 'a+') as _log_file:
# _log_file.write(f"{message}\n")
| [
"ilineserg@gmail.com"
] | ilineserg@gmail.com |
47ecc56f687300df5c2ad1bba94ec239449193de | d81c04e592aec9d9bb8ad48a7fe3c8d446852f17 | /StateMachine.py | 84fb8efa974ac75cbd20244b2befa59a77073f14 | [] | no_license | Swia/movingrepo | e5e141d49dbde243572d0fe0211c131d18812e6d | 18aeee5b3adc64ad151c70bb40512c96e2f1316f | refs/heads/main | 2023-05-06T12:59:06.456699 | 2021-05-25T14:30:13 | 2021-05-25T14:30:13 | 370,630,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # -*- coding: utf-8 -*-
from enum import Enum
class States(Enum):
"""
A simple state machine to easier navigation through @decorator functions
in alarms_bot.py
"""
STATE_START = 0
STATE_NEWALARM = 1
STATE_SETTING_TIMEZONE_SEPARATE = 2
STATE_SETTING_TIMEZONE_FOR_ALARM = 3
STATE_SETTING_TIME = 4
STATE_SETTING_TEXT = 5 | [
"noreply@github.com"
] | noreply@github.com |
b7b344929e53f398c8535a2cacd7a38cd91a01ce | 4df1b2f1b5085e73223800ada334c8603b8be87b | /getdata.py | c0bc31df5a7f8447c7897fb0169999f1d245441a | [] | no_license | srejun/Project_Roboduct | 1c34355951cf0670ce513b332a70c8785f6ed154 | b656797d86773e84c0791a57751f8db973ffa5d8 | refs/heads/master | 2022-06-28T10:36:12.636777 | 2020-05-06T11:49:38 | 2020-05-06T11:49:38 | 261,741,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,183 | py | #Libraries
from __future__ import print_function
import RPi.GPIO as GPIO
import time
import xbox
file = open("cleaningdata.txt","w")
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
#set GPIO Pins
GPIO_TRIGGERleft = 12
GPIO_ECHOleft = 16
GPIO_TRIGGERrigth = 24
GPIO_ECHOrigth = 26
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGERleft, GPIO.OUT)
GPIO.setup(GPIO_ECHOleft, GPIO.IN)
GPIO.setup(GPIO_TRIGGERrigth, GPIO.OUT)
GPIO.setup(GPIO_ECHOrigth, GPIO.IN)
w1 = 31
w2 = 33
w3 = 35
w4 = 37
w5 = 5
w6 = 7
servoPINleft = 38
servoPINright = 40
turn = 27 #power of left-right (0-100)
run = 34 #power of forward-backward (0-100)
works = 100
#t=time.sleep(0.2)
GPIO.setup(w1,GPIO.OUT)
GPIO.setup(w2,GPIO.OUT)
GPIO.setup(w3,GPIO.OUT)
GPIO.setup(w4,GPIO.OUT)
GPIO.setup(w5,GPIO.OUT)
GPIO.setup(w6,GPIO.OUT)
GPIO.setup(servoPINleft, GPIO.OUT)
GPIO.setup(servoPINright, GPIO.OUT)
pX = GPIO.PWM(servoPINleft, 50) # GPIO 17 for PWM with 50Hz
pY = GPIO.PWM(servoPINright, 50)
pX.start(8) # Initialization
pY.start(4)
pwm1 = GPIO.PWM(w1,120) # FL
pwm2 = GPIO.PWM(w2,120) # BL
pwm3 = GPIO.PWM(w3,120) # BR
pwm4 = GPIO.PWM(w4,120) # FR
pwm5 = GPIO.PWM(w5,120)
pwm6 = GPIO.PWM(w6,120)
pwm1.start(0)
pwm2.start(0)
pwm3.start(0)
pwm4.start(0)
pwm5.start(0)
pwm6.start(0)
checkword = 'none'
action='none'
def work():
pwm5.ChangeDutyCycle(0)
pwm6.ChangeDutyCycle(works)
def forward():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(run+2)
pwm3.ChangeDutyCycle(run)
pwm4.ChangeDutyCycle(0)
def backward():
pwm1.ChangeDutyCycle(run)
pwm2.ChangeDutyCycle(0)
pwm3.ChangeDutyCycle(0)
pwm4.ChangeDutyCycle(run)
def left():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(turn)
pwm3.ChangeDutyCycle(0)
pwm4.ChangeDutyCycle(turn)
def right():
pwm1.ChangeDutyCycle(turn)
pwm2.ChangeDutyCycle(0)
pwm3.ChangeDutyCycle(turn)
pwm4.ChangeDutyCycle(0)
def lefthight():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(turn*1.5)
pwm3.ChangeDutyCycle(0)
pwm4.ChangeDutyCycle(turn*1.5)
def righthight():
pwm1.ChangeDutyCycle(turn*1.5)
pwm2.ChangeDutyCycle(0)
pwm3.ChangeDutyCycle(turn*1.5)
pwm4.ChangeDutyCycle(0)
def stop():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(0)
pwm3.ChangeDutyCycle(0)
pwm4.ChangeDutyCycle(0)
# Format floating point number to string format -x.xxx
def fmtFloat(n):
return '{:6.3f}'.format(n)
# Print one or more values without a line feed
def show(*args):
for arg in args:
print(arg, end="")
# Print true or false value based on a boolean, without linefeed
def showIf(boolean, ifTrue, ifFalse=" "):
if boolean:
show(ifTrue)
else:
show(ifFalse)
# Instantiate the controller
joy = xbox.Joystick()
# Show various axis and button states until Back button is pressed
print("Xbox controller sample: Press Back button to exit")
def distanceleft():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGERleft, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGERleft, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHOleft) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHOleft) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
def distancerigth():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGERrigth, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGERrigth, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHOrigth) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHOrigth) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
#print("Action")
try:
work()
while not joy.Back():
#work()
action='none'
# Show connection status
# show("Connected:")
# showIf(joy.connected(), "Y", "N")
# Left analog stick
# show(" Left X/Y:", fmtFloat(joy.leftX()), "/", fmtFloat(joy.leftY()))
# Right trigger
# show(" RightTrg:", fmtFloat(joy.rightTrigger()))
# A/B/X/Y buttons
show(" Buttons:")
showIf(joy.A(), "A")
# if(joy.A()==1):
showIf(joy.B(), "B")
showIf(joy.X(), "X")
showIf(joy.Y(), "Y")
if(joy.A()==1):
# pX.ChangeDutyCycle(7.2)
# pY.ChangeDutyCycle(4.8)
# show("down")
backward()
time.sleep(1)
stop()
action='backward'
# time.sleep(1)
elif(joy.Y()==1):
# pX.ChangeDutyCycle(9)
# pY.ChangeDutyCycle(3)
# show("up")
forward()
time.sleep(1)
stop()
action='forward'
# time.sleep(1)
elif(joy.X()==1):
left()
time.sleep(1)
stop()
action='left'
elif(joy.B()==1):
right()
time.sleep(1)
stop()
action='right'
# Dpad U/D/L/R
show(" Dpad:")
showIf(joy.dpadUp(), "U")
showIf(joy.dpadDown(), "D")
showIf(joy.dpadLeft(), "L")
showIf(joy.dpadRight(), "R")
if(joy.dpadUp()==1):
# forward()
# time.sleep(1)
# stop()
# action='forward'
pX.ChangeDutyCycle(9)
pY.ChangeDutyCycle(3)
show("up")
elif(joy.dpadDown()==1):
pX.ChangeDutyCycle(8.0)
pY.ChangeDutyCycle(4.0)
show("down")
# backward()
# time.sleep(1)
# stop()
# action='backward'
elif(joy.dpadLeft()==1):
show("")
# left()
# time.sleep(1)
# stop()
# action='left'
elif(joy.dpadRight()==1):
show("")
# right()
# time.sleep(1)
# stop()
# action='right'
# Move cursor back to start of line
show(chr(13))
distleft = distanceleft()
print ("Distanceleft = %.1f cm" % distleft)
distrigth = distancerigth()
print ("Distancerigth = %.1f cm" % distrigth)
print ("action = %s" %action)
time.sleep(2)
file.write("distLeft %.2f\r\n" %distleft)
file.write("distRigth %.2f\r\n" %distrigth)
file.write("action %s\r\n" %action)
# Close out when done
joy.close()
file.close()
# while True:
#
# distleft = distanceleft()
# print ("Measured Distanceleft = %.1f cm" % distleft)
# distrigth = distancerigth()
# print ("Measured Distancerigth = %.1f cm" % distrigth)
# time.sleep(2)
# file.write("distLeft %.2f\r\n" %distleft)
# file.write("distRigth %.2f\r\n" %distrigth)
## time.sleep(2)
# print("Action")
# file.close()
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
| [
"noreply@github.com"
] | noreply@github.com |
1b29e17738ce8017bc364637474cffdf18602a34 | 2ad64e7398057a09c2a5b8543199f3781c515e45 | /Pilha.py | 1e39a66b1b69ab75517b19e2d8a114afc785b3a4 | [] | no_license | edinhograno/provadepython | e56281657d0b27d0ecf327ab5befde12323a9075 | 55ab15c451a760dbfef0afa561b1bdea5c66186d | refs/heads/master | 2023-06-25T20:08:19.564672 | 2021-07-07T13:43:11 | 2021-07-07T13:43:11 | 383,813,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from Livros import Livro
class Pilha:
def __init__(self):
self.top = None
self._size = 0
def adiciona(self, id, nome, autor):
node = Livro(id, nome, autor)
node.next = self.top
self.top = node
self._size = self._size + 1
def remove(self):
if self._size > 0:
node = self.top
self.top = self.top.next
self._size = self._size - 1
return node.titulo
raise IndexError("A pilha está vazia")
def imprimir(self):
linha = "========="
hook = ""
pointer = self.top
while(pointer):
hook = hook + str(f" \n Id: {pointer.id} \n Titulo: {pointer.titulo} \n Autor: {pointer.autor.nome}\n {linha}") + "\n"
pointer = pointer.next
return hook | [
"granomotorista@gmail.com"
] | granomotorista@gmail.com |
562d6b667658cc8ca7127a940db800debd92f225 | eccc9f30b406903761c85fa8edf239b809805cf0 | /listings/migrations/0001_initial.py | 5c2e992fd3779b68d2c904bb4d8a262cd3107f4f | [] | no_license | InnaAndreeva/real_estate_django | 40f9510155476f7e4ea135f520112539f2845f89 | 90f9414d76c901c73c412335ebca39610040466a | refs/heads/main | 2023-01-14T05:51:47.931878 | 2020-11-24T10:32:05 | 2020-11-24T10:32:05 | 315,594,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | # Generated by Django 2.1.7 on 2019-03-25 18:38
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('badrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
| [
"innaandreeva17yo@gmail.com"
] | innaandreeva17yo@gmail.com |
c1b71ce4bf116be38058532866d68049bfa605b1 | 88ea6ae5a8f97e3771490583d8acecdbe2877fd8 | /zips/plugin.video.vistatv-ini-maker/main.py | 773a4185cc39459dd2f2a721e93b53361a46dfec | [] | no_license | staycanuca/PersonalDataVistaTV | 26497a29e6f8b86592609e7e950d6156aadf881c | 4844edbfd4ecfc1d48e31432c39b9ab1b3b1a222 | refs/heads/master | 2021-01-25T14:46:25.763952 | 2018-03-03T10:48:06 | 2018-03-03T10:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,546 | py | from xbmcswift2 import Plugin
from xbmcswift2 import actions
import xbmc,xbmcaddon,xbmcvfs,xbmcgui
import re
from rpc import RPC
import requests
import random
import sqlite3
from datetime import datetime,timedelta
import time
#import urllib
import HTMLParser
import xbmcplugin
#import xml.etree.ElementTree as ET
#import sqlite3
import os
#import shutil
#from rpc import RPC
from types import *
plugin = Plugin()
big_list_view = False
def log2(v):
xbmc.log(repr(v))
def log(v):
xbmc.log(re.sub(',',',\n',repr(v)))
def get_icon_path(icon_name):
addon_path = xbmcaddon.Addon().getAddonInfo("path")
return os.path.join(addon_path, 'resources', 'img', icon_name+".png")
def remove_formatting(label):
label = re.sub(r"\[/?[BI]\]",'',label)
label = re.sub(r"\[/?COLOR.*?\]",'',label)
return label
@plugin.route('/addon/<id>')
def addon(id):
addon = plugin.get_storage(id)
items = []
for name in sorted(addon):
url = addon[name]
items.append(
{
'label': name,
'path': url,
'thumbnail':get_icon_path('tv'),
'is_playable':True,
})
return items
@plugin.route('/player')
def player():
if not plugin.get_setting('addons.folder'):
dialog = xbmcgui.Dialog()
dialog.notification("Echo INI Creator", "Set Folder",xbmcgui.NOTIFICATION_ERROR )
xbmcaddon.Addon ('plugin.video.vistatv-ini-maker').openSettings()
addons = plugin.get_storage("addons")
for a in addons.keys():
add = plugin.get_storage(a)
add.clear()
addons.clear()
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
addon = None
for line in lines:
if line.startswith('['):
a = line.strip('[]')
addons[a] = a
addon = plugin.get_storage(a)
addon.clear()
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
addon[name] = url
items = []
for id in sorted(addons):
items.append(
{
'label': id,
'path': plugin.url_for('addon',id=id),
'thumbnail':get_icon_path('tv'),
})
return items
@plugin.route('/play/<url>')
def play(url):
xbmc.executebuiltin('PlayMedia(%s)' % url)
@plugin.route('/pvr_subscribe')
def pvr_subscribe():
plugin.set_setting("pvr.subscribe","true")
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/pvr_unsubscribe')
def pvr_unsubscribe():
plugin.set_setting("pvr.subscribe","false")
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/add_folder/<id>/<path>')
def add_folder(id,path):
folders = plugin.get_storage('folders')
#ids = plugin.get_storage('ids')
folders[path] = id
#ids[id] = id
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_folder/<id>/<path>')
def remove_folder(id,path):
folders = plugin.get_storage('folders')
del folders[path]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/clear')
def clear():
folders = plugin.get_storage('folders')
folders.clear()
@plugin.route('/folder/<id>/<path>')
def folder(id,path):
folders = plugin.get_storage('folders')
response = RPC.files.get_directory(media="files", directory=path, properties=["thumbnail"])
files = response["files"]
dirs = dict([[remove_formatting(f["label"]), f["file"]] for f in files if f["filetype"] == "directory"])
links = {}
thumbnails = {}
for f in files:
if f["filetype"] == "file":
label = remove_formatting(f["label"])
file = f["file"]
while (label in links):
label = "%s." % label
links[label] = file
thumbnails[label] = f["thumbnail"]
items = []
for label in sorted(dirs):
path = dirs[label]
context_items = []
if path in folders:
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_folder, id=id, path=path))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_folder, id=id, path=path))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('folder',id=id, path=path),
'thumbnail': get_icon_path('tv'),
'context_menu': context_items,
})
for label in sorted(links):
items.append(
{
'label': label,
'path': plugin.url_for('play',url=links[label]),
'thumbnail': thumbnails[label],
})
return items
@plugin.route('/pvr')
def pvr():
index = 0
urls = []
channels = {}
for group in ["radio","tv"]:
urls = urls + xbmcvfs.listdir("pvr://channels/%s/All channels/" % group)[1]
for group in ["radio","tv"]:
groupid = "all%s" % group
json_query = RPC.PVR.get_channels(channelgroupid=groupid, properties=[ "thumbnail", "channeltype", "hidden", "locked", "channel", "lastplayed", "broadcastnow" ] )
if "channels" in json_query:
for channel in json_query["channels"]:
channelname = channel["label"]
channelid = channel["channelid"]-1
channellogo = channel['thumbnail']
streamUrl = urls[index]
index = index + 1
url = "pvr://channels/%s/All channels/%s" % (group,streamUrl)
channels[url] = channelname
items = []
for url in sorted(channels, key=lambda x: channels[x]):
name = channels[url]
items.append(
{
'label': name,
'path': url,
'is_playable': True,
})
return items
@plugin.route('/subscribe')
def subscribe():
folders = plugin.get_storage('folders')
ids = {}
for folder in folders:
id = folders[folder]
ids[id] = id
all_addons = []
for type in ["xbmc.addon.video", "xbmc.addon.audio"]:
response = RPC.addons.get_addons(type=type,properties=["name", "thumbnail"])
if "addons" in response:
found_addons = response["addons"]
all_addons = all_addons + found_addons
seen = set()
addons = []
for addon in all_addons:
if addon['addonid'] not in seen:
addons.append(addon)
seen.add(addon['addonid'])
items = []
pvr = plugin.get_setting('pvr.subscribe')
context_items = []
label = "PVR"
if pvr == "true":
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(pvr_unsubscribe))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(pvr_subscribe))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('pvr'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
addons = sorted(addons, key=lambda addon: remove_formatting(addon['name']).lower())
for addon in addons:
label = remove_formatting(addon['name'])
id = addon['addonid']
path = "plugin://%s" % id
context_items = []
if id in ids:
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_folder, id=id, path=path))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_folder, id=id, path=path))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('folder',id=id, path=path),
'thumbnail': get_icon_path('tv'),
'context_menu': context_items,
})
return items
@plugin.route('/update')
def update():
if not plugin.get_setting('addons.folder'):
dialog = xbmcgui.Dialog()
dialog.notification("Echo INI Creator", "Set Folder",xbmcgui.NOTIFICATION_ERROR )
xbmcaddon.Addon ('plugin.video.vistatv-ini-maker').openSettings()
folders = plugin.get_storage('folders')
streams = {}
for folder in folders:
log("[plugin.video.vistatv-ini-maker] " + folder)
path = folder
id = folders[folder]
if not id in streams:
streams[id] = {}
response = RPC.files.get_directory(media="files", directory=path, properties=["thumbnail"])
if not 'error' in response:
files = response["files"]
links = {}
thumbnails = {}
for f in files:
if f["filetype"] == "file":
label = remove_formatting(f["label"])
file = f["file"]
while (label in links):
label = "%s." % label
links[label] = file
thumbnails[label] = f["thumbnail"]
streams[id][label] = file
if plugin.get_setting("pvr.subscribe") == "true":
streams["plugin.video.vistatv-ini-maker"] = {}
items = pvr()
for item in items:
name = item["label"]
url = item["path"]
streams["plugin.video.vistatv-ini-maker"][name] = url
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"wb")
# steams contains all the addon ids of the addons you are generating i.e plugin.video.sportie
for id in sorted(streams):
# make a line that contains the plugin to a line before all the channels i.e [plugin.video.sportie]
line = "[%s]\n" % id
# write that line to the ini file.
f.write(line.encode("utf8"))
# make the word channels contain all the streams from said addon.
channels = streams[id]
# for each channel in the addon. i.e bbc one
for channel in sorted(channels):
# Grab the URL to the channel from the list
url = channels[channel]
# make a list called naughty that contains all the funny characters, all within "" and seperated by a comma.
naughty = [":","!",'"',"$","%","^","&","*","(",")","-","_","=","+","[","]","{","}","#","~","@",";",":","/","?",".",">",",","<","|",","]
# go through every item in the list. So in the first instance item would become :
for item in naughty:
# Check if that character exists in the channel name, if so replace it with a space.
channel = channel.replace(item,' ')
# Strip all whitespace from the beggining of the channel name (AKA Remove all spaces)
channel=channel.lstrip()
# Strip all whitespace from the end of the channel name (AKA Remove all spaces)
channel=channel.rstrip()
# Check if there are any double spaces in the channel name and replace them with a single space.
while " " in channel:
# Replace double spaces with single spaces.
channel = channel.replace(" "," ")
#Check if the length of the channel name is one or more characters.
if len(channel) >= 1:
# If so make the line to conatin the channel anme and url, you can see the = below, channel before the = url after.
line = "%s=%s\n" % (channel,url)
#write the line to the ini file.
f.write(line.encode("utf8"))
#Close the file.
f.close()
xbmcgui.Dialog().notification("Echo INI Creator", "Finished Update")
@plugin.route('/search/<what>')
def search(what):
if not what:
return
addons = plugin.get_storage("addons")
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
addon = None
for line in lines:
if line.startswith('['):
a = line.strip('[]')
addons[a] = a
addon = plugin.get_storage(a)
addon.clear()
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
addon[name] = url
items = []
for a in addons.keys():
add = plugin.get_storage(a)
log2(add.keys())
exact = [x for x in add.keys() if x.lower() == what.lower()]
log2(exact)
partial = [x for x in add.keys() if what.lower() in x.lower()]
ignore_space = [x for x in add.keys() if re.sub(' ','',what).lower() in re.sub(' ','',x).lower()]
found = exact + partial
for f in sorted(set(exact)):
items.append({
"label": "[COLOR green]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
for f in sorted(set(partial)-set(exact)):
items.append({
"label": "[COLOR orange]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
for f in sorted(set(ignore_space)-set(partial)-set(exact)):
items.append({
"label": "[COLOR red]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
return items
@plugin.route('/search_dialog')
def search_dialog():
dialog = xbmcgui.Dialog()
what = dialog.input("Search")
if what:
return search(what)
@plugin.route('/add_channel')
def add_channel():
channels = plugin.get_storage('channels')
d = xbmcgui.Dialog()
channel = d.input("Add Channel")
if channel:
channels[channel] = ""
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_channel')
def remove_channel():
channels = plugin.get_storage('channels')
channel_list = sorted(channels)
d = xbmcgui.Dialog()
which = d.select("Remove Channel",channel_list)
if which == -1:
return
channel = channel_list[which]
del channels[channel]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_this_channel/<channel>')
def remove_this_channel(channel):
channels = plugin.get_storage('channels')
del channels[channel]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/clear_channels')
def clear_channels():
channels = plugin.get_storage('channels')
channels.clear()
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/import_channels')
def import_channels():
channels = plugin.get_storage('channels')
d = xbmcgui.Dialog()
filename = d.browse(1, 'Import Channels', 'files', '', False, False, 'special://home/')
if not filename:
return
if filename.endswith('.ini'):
lines = xbmcvfs.File(filename,'rb').read().splitlines()
for line in lines:
if not line.startswith('[') and not line.startswith('#') and "=" in line:
channel_url = line.split('=',1)
if len(channel_url) == 2:
name = channel_url[0]
channels[name] = ""
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/stream_search/<channel>')
def stream_search(channel):
#folders = plugin.get_storage('folders')
streams = {}
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
for line in lines:
if line.startswith('['):
addon = line.strip('[]')
if addon not in streams:
streams[addon] = {}
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
streams[addon][url] = name
channel_search = channel.lower().replace(' ','')
stream_list = []
for id in sorted(streams):
files = streams[id]
for f in sorted(files, key=lambda k: files[k]):
label = files[f]
label_search = label.lower().replace(' ','')
if label_search in channel_search or channel_search in label_search:
stream_list.append((id,f,label))
labels = ["[%s] %s" % (x[0],x[2]) for x in stream_list]
d = xbmcgui.Dialog()
which = d.select(channel, labels)
if which == -1:
return
stream_name = stream_list[which][2]
stream_link = stream_list[which][1]
plugin.set_resolved_url(stream_link)
@plugin.route('/export_channels')
def export_channels():
channels = plugin.get_storage('channels')
f = xbmcvfs.File('special://profile/addon_data/plugin.video.vistatv-ini-maker/export.ini','wb')
for channel in sorted(channels):
url = plugin.url_for('stream_search',channel=channel)
channel = channel.replace(':','')
s = "%s=%s\n" % (channel,url)
f.write(s)
f.close()
@plugin.route('/channel_player')
def channel_player():
channels = plugin.get_storage("channels")
items = []
for channel in sorted(channels):
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Add Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Remove Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_this_channel, channel=channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Import Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(import_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Export Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(export_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear_channels))))
items.append(
{
'label': channel,
'path': plugin.url_for('stream_search',channel=channel),
'thumbnail':get_icon_path('tv'),
'is_playable': True,
'context_menu': context_items,
})
return items
@plugin.route('/')
def index():
items = []
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Subscriptions', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear))))
items.append(
{
'label': "[COLOR red]Subscribe[/COLOR]",
'path': plugin.url_for('subscribe'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
items.append(
{
'label': "[COLOR green]Create[/COLOR]",
'path': plugin.url_for('update'),
'thumbnail':get_icon_path('tv'),
})
items.append(
{
'label': "Play",
'path': plugin.url_for('player'),
'thumbnail':get_icon_path('tv'),
})
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Add Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Remove Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Import Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(import_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Export Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(export_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear_channels))))
items.append(
{
'label': "Channels",
'path': plugin.url_for('channel_player'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
return items
if __name__ == '__main__':
plugin.run()
if big_list_view == True:
view_mode = int(plugin.get_setting('view_mode'))
plugin.set_view_mode(view_mode) | [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
5df953e7136216e7adfa597079d091686b4fa538 | deb97b21457bc360563e09c7bbba235cdd915548 | /gitkit/commands/del_merged.py | de55050ed183a4ab19f91ae4bcc81325227a18e2 | [
"MIT"
] | permissive | akx/git-kit | e381ae5516a6f36f39d72af00e93aa5d4f0e985f | 8084d99c6a113aad56764b0907d157c6957a3977 | refs/heads/master | 2023-07-19T20:16:27.358018 | 2023-07-18T07:49:41 | 2023-07-18T07:49:41 | 22,340,212 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import click
from gitkit.conf import sacred_branches
from gitkit.util.refs import get_main_branch
from gitkit.util.shell import get_lines, run
@click.command()
@click.argument("ref", required=False, default=None)
def del_merged(ref):
"""
Delete merged branches.
"""
if not ref:
ref = get_main_branch()
for branch in set(get_lines(["git", "branch", "-l", "--merged", ref])):
branch = branch.strip("* ")
if branch != ref and branch not in sacred_branches:
run(["git", "branch", "-v", "-d", branch])
| [
"akx@iki.fi"
] | akx@iki.fi |
5148b36fb7b804d585edaef072685b6c32aa6ce1 | 63e8a1c42aad04fa471d5dc92ee2308b511bf33c | /hotel/hotel/settings.py | a1964374a2b5d3a9b27a53f7058b5804bdf5645e | [] | no_license | sampathkumar0511/new_proj | 7a5dfcaf0aaa7f27b50a4d1d11012587d2818221 | 67944367d35da3639e35786d3d9842d97af2c3dd | refs/heads/main | 2023-01-14T12:37:15.549711 | 2020-11-21T16:47:10 | 2020-11-21T16:47:10 | 312,472,762 | 0 | 0 | null | 2020-11-21T16:47:11 | 2020-11-13T04:26:55 | Python | UTF-8 | Python | false | false | 3,073 | py | """
Django settings for hotel project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zlwnxe%ypi2m0z-5=(l4zj^v)-l1o%5h1*5kwa7ogs_+a4t&8v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'recipe',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hotel.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hotel.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"sampath@sampaths-MacBook-Pro.local"
] | sampath@sampaths-MacBook-Pro.local |
e8dd578f213f88343398afdca104aa8d1e0c925b | 7020b4ff30cd1f35146235c1d0e74bb8f9a3c4b9 | /venv/Lib/site-packages/pymysql/connections.py | 7c90ce34e00b2a8abdfea1084d9fcca168f6bf9a | [] | no_license | Brian-Munene/HouseProject | 4f6c500738d733d88dc75b6e2849e80d85024197 | a6a41cfa8d59a9861e0659bce2da191d4eefe1df | refs/heads/master | 2023-02-09T06:47:53.011462 | 2021-09-14T09:48:09 | 2021-09-14T09:48:09 | 169,042,895 | 0 | 1 | null | 2023-02-02T06:14:37 | 2019-02-04T07:38:34 | Python | UTF-8 | Python | false | false | 49,033 | py | # Python implementation of the MySQL client-server protocol
# http://dev.mysql.com/doc/internals/en/client-server-protocol.html
# Error codes:
# http://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html
from __future__ import print_function
from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON
import errno
import io
import os
import socket
import struct
import sys
import traceback
import warnings
from . import _auth
from .charset import charset_by_name, charset_by_id
from .constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS
from . import converters
from .cursors import Cursor
from .optionfile import Parser
from .protocol import (
dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper,
EOFPacketWrapper, LoadLocalPacketWrapper
)
from .util import byte2int, int2byte
from . import err, VERSION_STRING
try:
import ssl
SSL_ENABLED = True
except ImportError:
ssl = None
SSL_ENABLED = False
try:
import getpass
DEFAULT_USER = getpass.getuser()
del getpass
except (ImportError, KeyError):
# KeyError occurs when there's no entry in OS database for a current user.
DEFAULT_USER = None
DEBUG = False
_py_version = sys.version_info[:2]
if PY2:
pass
elif _py_version < (3, 6):
# See http://bugs.python.org/issue24870
_surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)]
def _fast_surrogateescape(s):
return s.decode('latin1').translate(_surrogateescape_table)
else:
def _fast_surrogateescape(s):
return s.decode('ascii', 'surrogateescape')
# socket.makefile() in Python 2 is not usable because very inefficient and
# bad behavior about timeout.
# XXX: ._socketio doesn't work under IronPython.
if PY2 and not IRONPYTHON:
# read method of file-like returned by sock.makefile() is very slow.
# So we copy io-based one from Python 3.
from ._socketio import SocketIO
def _makefile(sock, mode):
return io.BufferedReader(SocketIO(sock, mode))
else:
# socket.makefile in Python 3 is nice.
def _makefile(sock, mode):
return sock.makefile(mode)
TEXT_TYPES = {
FIELD_TYPE.BIT,
FIELD_TYPE.BLOB,
FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB,
FIELD_TYPE.STRING,
FIELD_TYPE.TINY_BLOB,
FIELD_TYPE.VAR_STRING,
FIELD_TYPE.VARCHAR,
FIELD_TYPE.GEOMETRY,
}
DEFAULT_CHARSET = 'utf8mb4'
MAX_PACKET_LEN = 2**24-1
def pack_int24(n):
return struct.pack('<I', n)[:3]
# https://dev.mysql.com/doc/internals/en/integer.html#packet-Protocol::LengthEncodedInteger
def lenenc_int(i):
if (i < 0):
raise ValueError("Encoding %d is less than 0 - no representation in LengthEncodedInteger" % i)
elif (i < 0xfb):
return int2byte(i)
elif (i < (1 << 16)):
return b'\xfc' + struct.pack('<H', i)
elif (i < (1 << 24)):
return b'\xfd' + struct.pack('<I', i)[:3]
elif (i < (1 << 64)):
return b'\xfe' + struct.pack('<Q', i)
else:
raise ValueError("Encoding %x is larger than %x - no representation in LengthEncodedInteger" % (i, (1 << 64)))
class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
Establish a connection to the MySQL database. Accepts several
arguments:
:param host: Host where the database server is located
:param user: Username to log in as
:param password: Password to use.
:param database: Database to use, None to not use a particular one.
:param port: MySQL port to use, default is usually OK. (default: 3306)
:param bind_address: When the client has multiple network interfaces, specify
the interface from which to connect to the host. Argument can be
a hostname or an IP address.
:param unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
:param read_timeout: The timeout for reading from the connection in seconds (default: None - no timeout)
:param write_timeout: The timeout for writing to the connection in seconds (default: None - no timeout)
:param charset: Charset you want to use.
:param sql_mode: Default SQL_MODE to use.
:param read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
:param conv:
Conversion dictionary to use instead of the default one.
This is used to provide custom marshalling and unmarshaling of types.
See converters.
:param use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
:param client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
:param cursorclass: Custom cursor class to use.
:param init_command: Initial SQL statement to run when connection is established.
:param connect_timeout: Timeout before throwing an exception when connecting.
(default: 10, min: 1, max: 31536000)
:param ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
:param read_default_group: Group to read from in the configuration file.
:param compress: Not supported
:param named_pipe: Not supported
:param autocommit: Autocommit mode. None means use server default. (default: False)
:param local_infile: Boolean to enable the use of LOAD DATA LOCAL command. (default: False)
:param max_allowed_packet: Max size of packet sent to server in bytes. (default: 16MB)
Only used to limit size of "LOAD LOCAL INFILE" data packet smaller than default (16KB).
:param defer_connect: Don't explicitly connect on contruction - wait for connect call.
(default: False)
:param auth_plugin_map: A dict of plugin names to a class that processes that plugin.
The class will take the Connection object as the argument to the constructor.
The class needs an authenticate method taking an authentication packet as
an argument. For the dialog plugin, a prompt(echo, prompt) method can be used
(if no authenticate method) for returning a string from the user. (experimental)
:param server_public_key: SHA256 authenticaiton plugin public key value. (default: None)
:param db: Alias for database. (for compatibility to MySQLdb)
:param passwd: Alias for password. (for compatibility to MySQLdb)
:param binary_prefix: Add _binary prefix on bytes and bytearray. (default: False)
See `Connection <https://www.python.org/dev/peps/pep-0249/#connection-objects>`_ in the
specification.
"""
_sock = None
_auth_plugin_name = ''
_closed = False
_secure = False
def __init__(self, host=None, user=None, password="",
database=None, port=0, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=None, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=10, ssl=None, read_default_group=None,
compress=None, named_pipe=None,
autocommit=False, db=None, passwd=None, local_infile=False,
max_allowed_packet=16*1024*1024, defer_connect=False,
auth_plugin_map=None, read_timeout=None, write_timeout=None,
bind_address=None, binary_prefix=False, program_name=None,
server_public_key=None):
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
self._local_infile = bool(local_infile)
if self._local_infile:
client_flag |= CLIENT.LOCAL_FILES
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = Parser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, arg):
if arg:
return arg
try:
return cfg.get(read_default_group, key)
except Exception:
return arg
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
bind_address = _config("bind-address", bind_address)
charset = _config("default-character-set", charset)
if not ssl:
ssl = {}
if isinstance(ssl, dict):
for key in ["ca", "capath", "cert", "key", "cipher"]:
value = _config("ssl-" + key, ssl.get(key))
if value:
ssl[key] = value
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
self.ctx = self._create_ssl_ctx(ssl)
self.host = host or "localhost"
self.port = port or 3306
self.user = user or DEFAULT_USER
self.password = password or b""
if isinstance(self.password, text_type):
self.password = self.password.encode('latin1')
self.db = database
self.unix_socket = unix_socket
self.bind_address = bind_address
if not (0 < connect_timeout <= 31536000):
raise ValueError("connect_timeout should be >0 and <=31536000")
self.connect_timeout = connect_timeout or None
if read_timeout is not None and read_timeout <= 0:
raise ValueError("read_timeout should be >= 0")
self._read_timeout = read_timeout
if write_timeout is not None and write_timeout <= 0:
raise ValueError("write_timeout should be >= 0")
self._write_timeout = write_timeout
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
# specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
if conv is None:
conv = converters.conversions
# Need for MySQLdb compatibility.
self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int}
self.decoders = {k: v for (k, v) in conv.items() if type(k) is int}
self.sql_mode = sql_mode
self.init_command = init_command
self.max_allowed_packet = max_allowed_packet
self._auth_plugin_map = auth_plugin_map or {}
self._binary_prefix = binary_prefix
self.server_public_key = server_public_key
self._connect_attrs = {
'_client_name': 'pymysql',
'_pid': str(os.getpid()),
'_client_version': VERSION_STRING,
}
if program_name:
self._connect_attrs["program_name"] = program_name
if defer_connect:
self._sock = None
else:
self.connect()
def _create_ssl_ctx(self, sslp):
if isinstance(sslp, ssl.SSLContext):
return sslp
ca = sslp.get('ca')
capath = sslp.get('capath')
hasnoca = ca is None and capath is None
ctx = ssl.create_default_context(cafile=ca, capath=capath)
ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True)
ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED
if 'cert' in sslp:
ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key'))
if 'cipher' in sslp:
ctx.set_ciphers(sslp['cipher'])
ctx.options |= ssl.OP_NO_SSLv2
ctx.options |= ssl.OP_NO_SSLv3
return ctx
def close(self):
"""
Send the quit message and close the socket.
See `Connection.close() <https://www.python.org/dev/peps/pep-0249/#Connection.close>`_
in the specification.
:raise Error: If the connection is already closed.
"""
if self._closed:
raise err.Error("Already closed")
self._closed = True
if self._sock is None:
return
send_data = struct.pack('<iB', 1, COMMAND.COM_QUIT)
try:
self._write_bytes(send_data)
except Exception:
pass
finally:
self._force_close()
@property
def open(self):
"""Return True if the connection is open"""
return self._sock is not None
def _force_close(self):
"""Close connection without QUIT message"""
if self._sock:
try:
self._sock.close()
except: # noqa
pass
self._sock = None
self._rfile = None
__del__ = _force_close
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
def _read_ok_packet(self):
pkt = self._read_packet()
if not pkt.is_ok_packet():
raise err.OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
return ok
def _send_autocommit_mode(self):
"""Set whether or not to commit after every execute()"""
self._execute_command(COMMAND.COM_QUERY, "SET AUTOCOMMIT = %s" %
self.escape(self.autocommit_mode))
self._read_ok_packet()
def begin(self):
"""Begin payment."""
self._execute_command(COMMAND.COM_QUERY, "BEGIN")
self._read_ok_packet()
def commit(self):
"""
Commit changes to stable storage.
See `Connection.commit() <https://www.python.org/dev/peps/pep-0249/#commit>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "COMMIT")
self._read_ok_packet()
def rollback(self):
"""
Roll back the current payment.
See `Connection.rollback() <https://www.python.org/dev/peps/pep-0249/#rollback>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
self._read_ok_packet()
def show_warnings(self):
"""Send the "SHOW WARNINGS" SQL command."""
self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
result.read()
return result.rows
def select_db(self, db):
"""
Set current db.
:param db: The name of the db.
"""
self._execute_command(COMMAND.COM_INIT_DB, db)
self._read_ok_packet()
def escape(self, obj, mapping=None):
"""Escape whatever value you pass to it.
Non-standard, for internal use; do not use this in your applications.
"""
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
if isinstance(obj, (bytes, bytearray)):
ret = self._quote_bytes(obj)
if self._binary_prefix:
ret = "_binary" + ret
return ret
return converters.escape_item(obj, self.charset, mapping=mapping)
def literal(self, obj):
"""Alias for escape()
Non-standard, for internal use; do not use this in your applications.
"""
return self.escape(obj, self.encoders)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return converters.escape_string(s)
def _quote_bytes(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),)
return converters.escape_bytes(s)
def cursor(self, cursor=None):
"""
Create a new cursor to execute queries with.
:param cursor: The type of cursor to create; one of :py:class:`Cursor`,
:py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`.
None means use Cursor.
"""
if cursor:
return cursor(self)
return self.cursorclass(self)
def __enter__(self):
"""Context manager that returns a Cursor"""
warnings.warn(
"Context manager API of Connection object is deprecated; Use conn.begin()",
DeprecationWarning)
return self.cursor()
def __exit__(self, exc, value, traceback):
"""On successful exit, commit. On exception, rollback"""
if exc:
self.rollback()
else:
self.commit()
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql, unbuffered=False):
# if DEBUG:
# print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
if PY2:
sql = sql.encode(self.encoding)
else:
sql = sql.encode(self.encoding, 'surrogateescape')
self._execute_command(COMMAND.COM_QUERY, sql)
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def next_result(self, unbuffered=False):
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def affected_rows(self):
return self._affected_rows
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
return self._read_ok_packet()
def ping(self, reconnect=True):
"""
Check if the server is alive.
:param reconnect: If the connection is closed, reconnect.
:raise Error: If the connection is closed and reconnect=False.
"""
if self._sock is None:
if reconnect:
self.connect()
reconnect = False
else:
raise err.Error("Already closed")
try:
self._execute_command(COMMAND.COM_PING, "")
self._read_ok_packet()
except Exception:
if reconnect:
self.connect()
self.ping(False)
else:
raise
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
self._read_packet()
self.charset = charset
self.encoding = encoding
def connect(self, sock=None):
self._closed = False
try:
if sock is None:
if self.unix_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
self.host_info = "Localhost via UNIX socket"
self._secure = True
if DEBUG: print('connected using unix_socket')
else:
kwargs = {}
if self.bind_address is not None:
kwargs['source_address'] = (self.bind_address, 0)
while True:
try:
sock = socket.create_connection(
(self.host, self.port), self.connect_timeout,
**kwargs)
break
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
self.host_info = "socket %s:%d" % (self.host, self.port)
if DEBUG: print('connected using socket')
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(None)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._sock = sock
self._rfile = _makefile(sock, 'rb')
self._next_seq_id = 0
self._get_server_information()
self._request_authentication()
if self.sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (self.sql_mode,))
if self.init_command is not None:
c = self.cursor()
c.execute(self.init_command)
c.close()
self.commit()
if self.autocommit_mode is not None:
self.autocommit(self.autocommit_mode)
except BaseException as e:
self._rfile = None
if sock is not None:
try:
sock.close()
except: # noqa
pass
if isinstance(e, (OSError, IOError, socket.error)):
exc = err.OperationalError(
2003,
"Can't connect to MySQL server on %r (%s)" % (
self.host, e))
# Keep original exception and traceback to investigate error.
exc.original_exception = e
exc.traceback = traceback.format_exc()
if DEBUG: print(exc.traceback)
raise exc
# If e is neither DatabaseError or IOError, It's a bug.
# But raising AssertionError hides original error.
# So just reraise it.
raise
def write_packet(self, payload):
"""Writes an entire "mysql packet" in its entirety to the network
addings its length and sequence number.
"""
# Internal note: when you build packet manualy and calls _write_bytes()
# directly, you should set self._next_seq_id properly.
data = pack_int24(len(payload)) + int2byte(self._next_seq_id) + payload
if DEBUG: dump_packet(data)
self._write_bytes(data)
self._next_seq_id = (self._next_seq_id + 1) % 256
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError: If the packet sequence number is wrong.
"""
buff = b''
while True:
packet_header = self._read_bytes(4)
#if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
self._force_close()
if packet_number == 0:
# MariaDB sends error packet with seqno==0 when shutdown
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query")
raise err.InternalError(
"Packet sequence number wrong - got %d expected %d"
% (packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
recv_data = self._read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self.encoding)
packet.check_error()
return packet
def _read_bytes(self, num_bytes):
self._sock.settimeout(self._read_timeout)
while True:
try:
data = self._rfile.read(num_bytes)
break
except (IOError, OSError) as e:
if e.errno == errno.EINTR:
continue
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query (%s)" % (e,))
except BaseException:
# Don't convert unknown exception to MySQLError.
self._force_close()
raise
if len(data) < num_bytes:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST, "Lost connection to MySQL server during query")
return data
def _write_bytes(self, data):
self._sock.settimeout(self._write_timeout)
try:
self._sock.sendall(data)
except IOError as e:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_GONE_ERROR,
"MySQL server has gone away (%r)" % (e,))
def _read_query_result(self, unbuffered=False):
self._result = None
if unbuffered:
try:
result = MySQLResult(self)
result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
result.read()
self._result = result
if result.server_status is not None:
self.server_status = result.server_status
return result.affected_rows
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _execute_command(self, command, sql):
"""
:raise InterfaceError: If the connection is closed.
:raise ValueError: If no username was specified.
"""
if not self._sock:
raise err.InterfaceError("(0, '')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None:
if self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
self._result._finish_unbuffered_query()
while self._result.has_next:
self.next_result()
self._result = None
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
packet_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
# tiny optimization: build first packet manually instead of
# calling self..write_packet()
prelude = struct.pack('<iB', packet_size, command)
packet = prelude + sql[:packet_size-1]
self._write_bytes(packet)
if DEBUG: dump_packet(packet)
self._next_seq_id = 1
if packet_size < MAX_PACKET_LEN:
return
sql = sql[packet_size-1:]
while True:
packet_size = min(MAX_PACKET_LEN, len(sql))
self.write_packet(sql[:packet_size])
sql = sql[packet_size:]
if not sql and packet_size < MAX_PACKET_LEN:
break
def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split('.', 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, MAX_PACKET_LEN, charset_id, b'')
if self.ssl and self.server_capabilities & CLIENT.SSL:
self.write_packet(data_init)
self._sock = self.ctx.wrap_socket(self._sock, server_hostname=self.host)
self._rfile = _makefile(self._sock, 'rb')
self._secure = True
data = data_init + self.user + b'\0'
authresp = b''
plugin_name = None
if self._auth_plugin_name == '':
plugin_name = b''
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == 'mysql_native_password':
plugin_name = b'mysql_native_password'
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == 'caching_sha2_password':
plugin_name = b'caching_sha2_password'
if self.password:
if DEBUG:
print("caching_sha2: trying fast path")
authresp = _auth.scramble_caching_sha2(self.password, self.salt)
else:
if DEBUG:
print("caching_sha2: empty password")
elif self._auth_plugin_name == 'sha256_password':
plugin_name = b'sha256_password'
if self.ssl and self.server_capabilities & CLIENT.SSL:
authresp = self.password + b'\0'
elif self.password:
authresp = b'\1' # request public key
else:
authresp = b'\0' # empty password
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack('B', len(authresp)) + authresp
else: # pragma: no cover - not testing against servers without secure auth (>=5.0)
data += authresp + b'\0'
if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + b'\0'
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
data += (plugin_name or b'') + b'\0'
if self.server_capabilities & CLIENT.CONNECT_ATTRS:
connect_attrs = b''
for k, v in self._connect_attrs.items():
k = k.encode('utf-8')
connect_attrs += struct.pack('B', len(k)) + k
v = v.encode('utf-8')
connect_attrs += struct.pack('B', len(v)) + v
data += struct.pack('B', len(connect_attrs)) + connect_attrs
self.write_packet(data)
auth_packet = self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
if DEBUG: print("received auth switch")
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
auth_packet = self._process_auth(plugin_name, auth_packet)
else:
# send legacy handshake
data = _auth.scramble_old_password(self.password, self.salt) + b'\0'
self.write_packet(data)
auth_packet = self._read_packet()
elif auth_packet.is_extra_auth_data():
if DEBUG:
print("received extra data")
# https://dev.mysql.com/doc/internals/en/successful-authentication.html
if self._auth_plugin_name == "caching_sha2_password":
auth_packet = _auth.caching_sha2_password_auth(self, auth_packet)
elif self._auth_plugin_name == "sha256_password":
auth_packet = _auth.sha256_password_auth(self, auth_packet)
else:
raise err.OperationalError("Received extra packet for auth method %r", self._auth_plugin_name)
if DEBUG: print("Succeed to auth")
def _process_auth(self, plugin_name, auth_packet):
handler = self._get_auth_plugin_handler(plugin_name)
if handler:
try:
return handler.authenticate(auth_packet)
except AttributeError:
if plugin_name != b'dialog':
raise err.OperationalError(2059, "Authentication plugin '%s'"
" not loaded: - %r missing authenticate method" % (plugin_name, type(handler)))
if plugin_name == b"caching_sha2_password":
return _auth.caching_sha2_password_auth(self, auth_packet)
elif plugin_name == b"sha256_password":
return _auth.sha256_password_auth(self, auth_packet)
elif plugin_name == b"mysql_native_password":
data = _auth.scramble_native_password(self.password, auth_packet.read_all())
elif plugin_name == b"mysql_old_password":
data = _auth.scramble_old_password(self.password, auth_packet.read_all()) + b'\0'
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/clear-text-authentication.html
data = self.password + b'\0'
elif plugin_name == b"dialog":
pkt = auth_packet
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b"Password: ":
self.write_packet(self.password + b'\0')
elif handler:
resp = 'no response - TypeError within plugin.prompt method'
try:
resp = handler.prompt(echo, prompt)
self.write_packet(resp + b'\0')
except AttributeError:
raise err.OperationalError(2059, "Authentication plugin '%s'" \
" not loaded: - %r missing prompt method" % (plugin_name, handler))
except TypeError:
raise err.OperationalError(2061, "Authentication plugin '%s'" \
" %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt))
else:
raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler))
pkt = self._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
else:
raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name)
self.write_packet(data)
pkt = self._read_packet()
pkt.check_error()
return pkt
def _get_auth_plugin_handler(self, plugin_name):
plugin_class = self._auth_plugin_map.get(plugin_name)
if not plugin_class and isinstance(plugin_name, bytes):
plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii'))
if plugin_class:
try:
handler = plugin_class(self)
except TypeError:
raise err.OperationalError(2059, "Authentication plugin '%s'"
" not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class))
else:
handler = None
return handler
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
i = 0
packet = self._read_packet()
data = packet.get_all_data()
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(b'\0', i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
# TODO: deprecate server_language and server_charset.
# mysqlclient-python doesn't provide it.
self.server_language = lang
try:
self.server_charset = charset_by_id(lang).name
except KeyError:
# unknown collation
self.server_charset = None
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
i += salt_len
i+=1
# AUTH PLUGIN NAME may appear here.
if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i:
# Due to Bug#59453 the auth-plugin-name is missing the terminating
# NUL-char in versions prior to 5.5.10 and 5.6.2.
# ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
# didn't use version checks as mariadb is corrected and reports
# earlier than those two.
server_end = data.find(b'\0', i)
if server_end < 0: # pragma: no cover - very specific upstream bug
# not found \0 and last field so take it all
self._auth_plugin_name = data[i:].decode('utf-8')
else:
self._auth_plugin_name = data[i:server_end].decode('utf-8')
def get_server_info(self):
return self.server_version
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class MySQLResult(object):
def __init__(self, connection):
"""
:type connection: Connection
"""
self.connection = connection
self.affected_rows = None
self.insert_id = None
self.server_status = None
self.warning_count = 0
self.message = None
self.field_count = 0
self.description = None
self.rows = None
self.has_next = None
self.unbuffered_active = False
def __del__(self):
if self.unbuffered_active:
self._finish_unbuffered_query()
def read(self):
try:
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
else:
self._read_result_packet(first_packet)
finally:
self.connection = None
def init_unbuffered_query(self):
"""
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError:
"""
self.unbuffered_active = True
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
self.unbuffered_active = False
self.connection = None
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
self.unbuffered_active = False
self.connection = None
else:
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
# Apparently, MySQLdb picks this number because it's the maximum
# value of a 64bit unsigned integer. Since we're emulating MySQLdb,
# we set it to this instead of None, which would be preferred.
self.affected_rows = 18446744073709551615
def _read_ok_packet(self, first_packet):
ok_packet = OKPacketWrapper(first_packet)
self.affected_rows = ok_packet.affected_rows
self.insert_id = ok_packet.insert_id
self.server_status = ok_packet.server_status
self.warning_count = ok_packet.warning_count
self.message = ok_packet.message
self.has_next = ok_packet.has_next
def _read_load_local_packet(self, first_packet):
if not self.connection._local_infile:
raise RuntimeError(
"**WARN**: Received LOAD_LOCAL packet but local_infile option is false.")
load_packet = LoadLocalPacketWrapper(first_packet)
sender = LoadLocalFile(load_packet.filename, self.connection)
try:
sender.send_data()
except:
self.connection._read_packet() # skip ok packet
raise
ok_packet = self.connection._read_packet()
if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error
raise err.OperationalError(2014, "Commands Out of Sync")
self._read_ok_packet(ok_packet)
def _check_packet_is_eof(self, packet):
if not packet.is_eof_packet():
return False
#TODO: Support CLIENT.DEPRECATE_EOF
# 1) Add DEPRECATE_EOF to CAPABILITIES
# 2) Mask CAPABILITIES with server_capabilities
# 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper
wp = EOFPacketWrapper(packet)
self.warning_count = wp.warning_count
self.has_next = wp.has_next
return True
def _read_result_packet(self, first_packet):
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
self._read_rowdata_packet()
def _read_rowdata_packet_unbuffered(self):
# Check if in an active query
if not self.unbuffered_active:
return
# EOF
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None
self.rows = None
return
row = self._read_row_from_packet(packet)
self.affected_rows = 1
self.rows = (row,) # rows should tuple of row for MySQL-python compatibility.
return row
def _finish_unbuffered_query(self):
# After much reading on the MySQL protocol, it appears that there is,
# in fact, no way to stop MySQL from sending all the data after
# executing a query, so we just spin, and wait for an EOF packet.
while self.unbuffered_active:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None # release reference to kill cyclic reference.
def _read_rowdata_packet(self):
"""Read a rowdata packet for each data row in the result set."""
rows = []
while True:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.connection = None # release reference to kill cyclic reference.
break
rows.append(self._read_row_from_packet(packet))
self.affected_rows = len(rows)
self.rows = tuple(rows)
def _read_row_from_packet(self, packet):
row = []
for encoding, converter in self.converters:
try:
data = packet.read_length_coded_string()
except IndexError:
# No more columns in this row
# See https://github.com/PyMySQL/PyMySQL/pull/434
break
if data is not None:
if encoding is not None:
data = data.decode(encoding)
if DEBUG: print("DEBUG: DATA = ", data)
if converter is not None:
data = converter(data)
row.append(data)
return tuple(row)
def _get_descriptions(self):
"""Read a column descriptor packet for each column in the result."""
self.fields = []
self.converters = []
use_unicode = self.connection.use_unicode
conn_encoding = self.connection.encoding
description = []
for i in range_type(self.field_count):
field = self.connection._read_packet(FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
field_type = field.type_code
if use_unicode:
if field_type == FIELD_TYPE.JSON:
# When SELECT from JSON column: charset = binary
# When SELECT CAST(... AS JSON): charset = connection encoding
# This behavior is different from TEXT / BLOB.
# We should decode result by connection encoding regardless charsetnr.
# See https://github.com/PyMySQL/PyMySQL/issues/488
encoding = conn_encoding # SELECT CAST(... AS JSON)
elif field_type in TEXT_TYPES:
if field.charsetnr == 63: # binary
# TEXTs with charset=binary means BINARY types.
encoding = None
else:
encoding = conn_encoding
else:
# Integers, Dates and Times, and other basic data is encoded in ascii
encoding = 'ascii'
else:
encoding = None
converter = self.connection.decoders.get(field_type)
if converter is converters.through:
converter = None
if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter))
self.converters.append((encoding, converter))
eof_packet = self.connection._read_packet()
assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF'
self.description = tuple(description)
class LoadLocalFile(object):
def __init__(self, filename, connection):
self.filename = filename
self.connection = connection
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'')
| [
"brianmunene69@gmail.com"
] | brianmunene69@gmail.com |
8440e8250bda5ae92abd0501c1219d37a8251790 | d713770971a0d9e4a77921fa85fd03daf339dd84 | /business_hardcode/build_project/build_project.py | b34832268d919212f956754af2974f20ed2d4dea | [
"Apache-2.0"
] | permissive | laashub/laas-soa | cf9c0403cb25eedc74326752aaa776f501fac9d0 | 63a5e84b646bf1d857e97ddbbc7c1c487a9dc9e4 | refs/heads/master | 2023-01-07T17:44:24.431030 | 2020-11-12T13:35:31 | 2020-11-12T13:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,528 | py | """
构建项目
需要依赖一些数据
构建服务器
源码仓库信息
项目配置信息
"""
import datetime
import json
import os
import time
import traceback
from rest.operate.executor import context
local_executor_root_path = os.path.join(os.getcwd(), "business_hardcode/build_project")
remote_executor_root_path = "/data/tristan/1" # 远程执行器根目录
# 准备本地目录
local_executor_data_data_path = os.path.join(local_executor_root_path, "data_data")
context.prepare_local_dirs([local_executor_data_data_path])
# 本地数据版本记录文件
local_update_datetime_record_path = local_executor_root_path + "/" + "local_update_datetime_record"
def build_project(executor_data_id, data_data_data):
"""
构建项目
:param executor_data_id:
:param data_data_data:
:return:
"""
# 记录全局数据
context.global_data.executor_data_id = executor_data_id
startup_timestamp = int(time.time())
context.log("启动时间: " + str(datetime.datetime.now()))
try:
"""
{'id': 11, 'git_server': '1', 'project_name': '仓库系统', 'branches': 'master', 'tags': '',
'program_language': 'java', 'docker_registry_id': '1', 'update_datetime': {'$date': 1605035741000},
'create_datetime': {'$date': 1605035741000}, 'repo_path': 'http://git.wjh.com/wms/wms_service'}
"""
# 查询执行器
host_build = context.select_data_by_data_id__data_data_id(15, 1)[0] # 查询服务器连接信息
# 获取最新版本的数据, 保存数据到本地, 同步最新版本的数据到执行器目录
latest_update_datetime_record = ""
"""
data_data:
git_server.json
docker_registry.json
"""
# 查询 git服务器
data_data_git_server = context.select_data_by_data_id__data_data_id('5', data_data_data['git_server'])[0]
latest_update_datetime_record += str(data_data_git_server["update_datetime"]) + ";"
# 查询 docker镜像仓库
data_data_docker_registry = \
context.select_data_by_data_id__data_data_id('4', data_data_data['docker_registry_id'])[0]
latest_update_datetime_record += str(data_data_docker_registry["update_datetime"]) + ";"
# 查询 仓库地址
local_update_datetime_record = None
if os.path.exists(local_update_datetime_record_path):
with open(local_update_datetime_record_path) as f:
local_update_datetime_record = f.read()
if not local_update_datetime_record or local_update_datetime_record != latest_update_datetime_record:
# ############### 同步数据到文件到远程服务器
# 准备远程目录
context.log(context.declare_remote_dirs(host_build, [remote_executor_root_path]))
context.write_data_data_2_file(data_data_git_server, local_executor_data_data_path + '/git_server.json')
context.write_data_data_2_file(data_data_docker_registry,
local_executor_data_data_path + '/docker_registry.json')
# 获取最新版本的业务, 保存业务到本地, 同步最新版本的业务到执行器
"""
business_hyper_fusion:
java:
do_build_project.sh
build_project.sh
clean_build_project.sh
startup.sh
Dockerfile
do_build_docker.sh
clean_build_docker.sh
"""
# 同步数据、业务脚本目录到服务器
context.sync_dirs_2_remote(host_build, local_executor_root_path, remote_executor_root_path,
["data_data", "business_hyper_fusion"])
# 同步启动文件到服务器
context.sync_files_2_remote(host_build, local_executor_root_path, remote_executor_root_path, ["startup.py"])
with open(local_update_datetime_record_path, 'w')as f:
f.write(latest_update_datetime_record)
# ######每次执行器都需要创建执行目录, 并将启动数据写入执行目录的data_data.json文件中
remote_executor_run_n_path = remote_executor_root_path + "/run/" + str(executor_data_id)
# 创建这次执行器的运行目录
context.declare_remote_dirs(host_build, [remote_executor_run_n_path])
# 写入启动参数
context.execute_remote_command(host_build, """
sudo cat >> %s<<EOF
%s
EOF
""" % (remote_executor_run_n_path + "/data_data.json", json.dumps(data_data_data, ensure_ascii=False)))
# 是否应该考虑将共享文件拷贝到自己的区域???
# 好处是什么? 目录都都可以在自己的目录, 坏处是什么, 需要拷贝文件
command = "cd %s && python startup.py -ei %s" % (remote_executor_root_path, executor_data_id)
context.RemoteShell(host_build["ip"], host_build["port"], host_build["username"],
host_build["password"]).execute(command)
# context.ShellHandler(host_build["ip"], host_build["port"], host_build["username"],host_build["password"]).execute(command)
print("=" * 200)
except Exception as e:
traceback.print_exc()
context.log(str(e))
context.log("结束时间: " + str(datetime.datetime.now()))
context.log("总耗时: %s 秒钟" + str(int((int(time.time()) - startup_timestamp) / 1000)))
| [
"tanshilinmail@gmail.com"
] | tanshilinmail@gmail.com |
187bd2a6ff0bfea7ed5629278eea007adedb4d97 | 54d3a1558a4bd38888d4d51f1ae2d2699965087c | /exa.py | 59f998f63b4e4f8e21e59e08b9035fd514853656 | [] | no_license | A8IK/Python-2 | a86843c6ccfe23d42faebb020307351a108075bd | 538aee64bac73110cd0a8ac74747c9d2fa485149 | refs/heads/main | 2023-01-21T12:42:51.226144 | 2020-12-04T18:14:32 | 2020-12-04T18:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | def div(a,b):
print(a/b)
div(4,2)
| [
"noreply@github.com"
] | noreply@github.com |
af54235f1808ded628afe0b1d54a6be553ceaa24 | 91e57f5ef0a4477e12a946dc7c9c66ad2ced0778 | /baekjoon_py/14889.py | c6a840877bf2e2890dc01538a4278279e1abb982 | [] | no_license | popcon9424/algorithm | 85b50de51cf6d61dfa8edfcc508e5c7c333eb82b | 5f9c685f4f684ea398758ab1f71f02e0bf1bac03 | refs/heads/master | 2020-05-28T03:08:24.286215 | 2019-11-28T08:01:26 | 2019-11-28T08:01:26 | 188,863,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import sys
from itertools import combinations
N = int(sys.stdin.readline())
board = [ list(map(int, sys.stdin.readline().split())) for _ in range(N) ]
minimum = 0
for bd in board:
minimum += sum(bd)
combs = combinations(list(range(N)), N//2)
for com in combs:
firstSum, secondSum = 0, 0
smallcombs = combinations(com, 2)
for smallcomb in smallcombs:
firstSum += board[smallcomb[0]][smallcomb[1]] + board[smallcomb[1]][smallcomb[0]]
notcombs = combinations(list(set(range(N)) - set(com)), 2)
for notcomb in notcombs:
secondSum += board[notcomb[0]][notcomb[1]] + board[notcomb[1]][notcomb[0]]
diff = abs(firstSum - secondSum)
if diff == 0:
minimum = 0
break
if diff < minimum:
minimum = diff
print(minimum) | [
"gusgh9424@naver.com"
] | gusgh9424@naver.com |
0d307cf1b2d2df910db56e7f5bfb1b8f8f5ab2a4 | 288ccb79d6b73572d6d82366031813a3a7976eb5 | /venv/lib/python3.6/site-packages/secretstorage/util.py | 60bafca0f46b04c7a1d59de295c9b98aaa186a15 | [
"MIT"
] | permissive | abualrubbaraa/Baraa-Validator | d9182767b696270dbcc6f071c12574e470ed0f5d | bff356f4e35ea7de66de799e7f063c383e298d1f | refs/heads/master | 2022-11-29T11:19:30.624281 | 2020-08-18T20:37:22 | 2020-08-18T20:37:22 | 288,558,878 | 0 | 1 | MIT | 2022-11-28T19:57:51 | 2020-08-18T20:34:28 | Python | UTF-8 | Python | false | false | 6,213 | py | # SecretStorage module for Python
# Access passwords using the SecretService DBus API
# Author: Dmitry Shachnev, 2013-2018
# License: 3-clause BSD, see LICENSE file
"""This module provides some utility functions, but these shouldn't
normally be used by external applications."""
import os
from typing import Any, List, Tuple
from jeepney import DBusAddress
from jeepney.bus_messages import MatchRule
from jeepney.integrate.blocking import DBusConnection
from jeepney.low_level import Message
from jeepney.wrappers import new_method_call, Properties, DBusErrorResponse
from secretstorage.defines import DBUS_UNKNOWN_METHOD, DBUS_NO_SUCH_OBJECT, \
DBUS_SERVICE_UNKNOWN, DBUS_NO_REPLY, DBUS_NOT_SUPPORTED, DBUS_EXEC_FAILED, \
SS_PATH, SS_PREFIX, ALGORITHM_DH, ALGORITHM_PLAIN
from secretstorage.dhcrypto import Session, int_to_bytes
from secretstorage.exceptions import ItemNotFoundException, \
SecretServiceNotAvailableException
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.utils import int_from_bytes
BUS_NAME = 'org.freedesktop.secrets'
SERVICE_IFACE = SS_PREFIX + 'Service'
PROMPT_IFACE = SS_PREFIX + 'Prompt'
class DBusAddressWrapper(DBusAddress): # type: ignore
"""A wrapper class around :class:`jeepney.wrappers.DBusAddress`
that adds some additional methods for calling and working with
properties, and converts error responses to SecretStorage
exceptions.
.. versionadded:: 3.0
"""
def __init__(self, path: str, interface: str,
connection: DBusConnection) -> None:
DBusAddress.__init__(self, path, BUS_NAME, interface)
self._connection = connection
def send_and_get_reply(self, msg: Message) -> Any:
try:
return self._connection.send_and_get_reply(msg)
except DBusErrorResponse as resp:
if resp.name in (DBUS_UNKNOWN_METHOD, DBUS_NO_SUCH_OBJECT):
raise ItemNotFoundException('Item does not exist!') from resp
elif resp.name in (DBUS_SERVICE_UNKNOWN, DBUS_EXEC_FAILED,
DBUS_NO_REPLY):
data = resp.data
if isinstance(data, tuple):
data = data[0]
raise SecretServiceNotAvailableException(data) from resp
raise
def call(self, method: str, signature: str, *body: Any) -> Any:
msg = new_method_call(self, method, signature, body)
return self.send_and_get_reply(msg)
def get_property(self, name: str) -> Any:
msg = Properties(self).get(name)
(signature, value), = self.send_and_get_reply(msg)
return value
def set_property(self, name: str, signature: str, value: Any) -> None:
msg = Properties(self).set(name, signature, value)
self.send_and_get_reply(msg)
def open_session(connection: DBusConnection) -> Session:
"""Returns a new Secret Service session."""
service = DBusAddressWrapper(SS_PATH, SERVICE_IFACE, connection)
session = Session()
try:
output, result = service.call('OpenSession', 'sv',
ALGORITHM_DH,
('ay', int_to_bytes(session.my_public_key)))
except DBusErrorResponse as resp:
if resp.name != DBUS_NOT_SUPPORTED:
raise
output, result = service.call('OpenSession', 'sv',
ALGORITHM_PLAIN,
('s', ''))
session.encrypted = False
else:
signature, value = output
assert signature == 'ay'
key = int_from_bytes(value, 'big')
session.set_server_public_key(key)
session.object_path = result
return session
def format_secret(session: Session, secret: bytes,
content_type: str) -> Tuple[str, bytes, bytes, str]:
"""Formats `secret` to make possible to pass it to the
Secret Service API."""
if isinstance(secret, str):
secret = secret.encode('utf-8')
elif not isinstance(secret, bytes):
raise TypeError('secret must be bytes')
assert session.object_path is not None
if not session.encrypted:
return (session.object_path, b'', secret, content_type)
assert session.aes_key is not None
# PKCS-7 style padding
padding = 0x10 - (len(secret) & 0xf)
secret += bytes((padding,) * padding)
aes_iv = os.urandom(0x10)
aes = algorithms.AES(session.aes_key)
encryptor = Cipher(aes, modes.CBC(aes_iv), default_backend()).encryptor()
encrypted_secret = encryptor.update(secret) + encryptor.finalize()
return (
session.object_path,
aes_iv,
encrypted_secret,
content_type
)
def exec_prompt(connection: DBusConnection,
prompt_path: str) -> Tuple[bool, List[str]]:
"""Executes the prompt in a blocking mode.
:returns: a tuple; the first element is a boolean value showing
whether the operation was dismissed, the second element
is a list of unlocked object paths
"""
prompt = DBusAddressWrapper(prompt_path, PROMPT_IFACE, connection)
dismissed = result = None
def callback(msg_body: Tuple[bool, List[str]]) -> None:
_dismissed, _result = msg_body
nonlocal dismissed, result
dismissed, result = bool(_dismissed), _result
connection.router.subscribe_signal(callback, prompt_path, PROMPT_IFACE, 'Completed')
prompt.call('Prompt', 's', '')
if result is None:
connection.recv_messages()
assert dismissed is not None
assert result is not None
return dismissed, result
def unlock_objects(connection: DBusConnection, paths: List[str]) -> bool:
"""Requests unlocking objects specified in `paths`.
Returns a boolean representing whether the operation was dismissed.
.. versionadded:: 2.1.2"""
service = DBusAddressWrapper(SS_PATH, SERVICE_IFACE, connection)
unlocked_paths, prompt = service.call('Unlock', 'ao', paths)
if len(prompt) > 1:
dismissed, (signature, unlocked) = exec_prompt(connection, prompt)
assert signature == 'ao'
return dismissed
return False
def add_match_rules(connection: DBusConnection) -> None:
"""Adds match rules for the given connection.
Currently it matches all messages from the Prompt interface, as the
mock service (unlike GNOME Keyring) does not specify the signal
destination.
.. versionadded:: 3.1
"""
rule = MatchRule(sender=BUS_NAME, interface=PROMPT_IFACE)
dbus = DBusAddressWrapper(path='/org/freedesktop/DBus',
interface='org.freedesktop.DBus',
connection=connection)
dbus.bus_name = 'org.freedesktop.DBus'
dbus.call('AddMatch', 's', rule.serialise())
| [
"baraaabualrub1998@gmail.com"
] | baraaabualrub1998@gmail.com |
e1e86389d66b93dd4822b7ba5af7fe578432b75a | 1662e063d62bddb3f3e63204f66f8d7685d59d9a | /blog/views.py | 49d6f7ad68dd59e72576de8a887ecc140b0bea0c | [] | no_license | danielmjales/my-first-blog | b2f9ae9bca676d367cc015765e6653ee0a64eabf | 66bd359926b0a38dcc06e4b4c4f1caf815382b3f | refs/heads/master | 2020-04-17T11:40:04.905284 | 2019-01-20T12:37:02 | 2019-01-20T12:37:02 | 166,549,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post
from .forms import PostForm
from rest_framework import viewsets
from .serializers import PostModelSerializer
def post_list(request):
posts = Post.objects.all().order_by('title')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
class PostModelViewSet(viewsets.ModelViewSet):
serializer_class = PostModelSerializer
queryset = Post.objects.all().order_by('-title') | [
"danielmjales@bct.ect.ufrn.br"
] | danielmjales@bct.ect.ufrn.br |
c703a262839b247143130d0cf69dd4626cb5d5ff | a63590f247d914b6993f4e72a5c27a439344d12a | /env/lib/python3.7/io.py | 062f32ae1bab0a72f1d55ace8c1184b6d81bdb8e | [] | no_license | wgcv/Social-Media-Analyze-Election-Guayaquil | e6c65e68e6f54a11aadad9d1765568521df9a20e | 784e6e4c94552307fefdf85367bb6a793ae878c3 | refs/heads/master | 2020-05-09T20:33:58.585077 | 2019-05-11T16:46:43 | 2019-05-11T16:46:43 | 181,410,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | /Users/wgcv/anaconda3/lib/python3.7/io.py | [
"gstavocevallos@gmail.com"
] | gstavocevallos@gmail.com |
b87d3f64e713ba53fb5b94de3507f74d8a97ea0b | 5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982 | /python/quantumhall/cyclotron.py | 108c267d7ee00673328a312228abdcb7f535d40f | [] | no_license | philzook58/python | 940c24088968f0d5c655e2344dfa084deaefe7c6 | 6d43db5165c9bcb17e8348a650710c5f603e6a96 | refs/heads/master | 2020-05-25T15:42:55.428149 | 2018-05-14T03:33:29 | 2018-05-14T03:33:29 | 69,040,196 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 993 | py |
#A suggesiton for the classical fractional hall effect
#Is a mean field organiztion of the cycltron phases, such that they synchronize.
#Leading to an effective time and angle dependant
# self consistantly dz/dt2 = i w dz/dt + P
# where E is a vortex configuration by conjecture. P = f(|z|)z^n
# and also has angular time dependance z/|z|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
omega =1.
g = -.5
def pack(z,zdot):
return np.array([np.real(z),np.imag(z),np.real(zdot),np.imag(zdot)])
def unpack(x):
return x[0]+1.j * x[1], x[2]+1.j * x[3],
def accel(z,zdot):
return 1.j * omega * zdot + g * np.conj(z)**3
def diffeq(x,t):
z, zdot = unpack(x)
return pack(zdot, accel(z,zdot))
from scipy import signal
T = 1000.
N = 1000
initcond = pack(1. + 0.j ,0. + 1.j)
t = np.linspace(0,T, N)
sol = odeint(diffeq, initcond, t)
f , P = signal.periodogram(sol[:,1],N/T)
plt.plot(t,sol[:,1])
plt.figure()
plt.plot(f,P)
plt.show()
| [
"philip@FartMachine7.local"
] | philip@FartMachine7.local |
bb9a53589955ef9aa479dbd294e34706c2932991 | 1ff34305a38b92eb33983ec90f29c67eac731f31 | /next level platform.py | 63a34dbb60b17f2a65176d70adbc3e5e620c2c64 | [] | no_license | jerhieb/pygame | 3e4e78fd3a9f1bfdda52404c4994025249e0c0b0 | 81d737d82d0562389cab87eb80f5913806082080 | refs/heads/master | 2020-08-04T16:56:53.348022 | 2019-10-02T02:06:38 | 2019-10-02T02:06:38 | 212,210,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,741 | py | import pygame
pygame.init()
display_width = 800
display_height = 600
surface = pygame.display.set_mode((display_width, display_height))
running1 = pygame.image.load('Running1.png')
running2 = pygame.image.load('Running2.png')
standing = pygame.image.load('playerstanding.png')
door = pygame.image.load('Door.png')
key = pygame.image.load('key.png')
running_left1 = pygame.transform.flip(running1, True, False)
running_left2 = pygame.transform.flip(running2, True, False)
running_list = [running1, running2]
counter = 0
clock = pygame.time.Clock()
player_x = 100
player_y = 300
player_xvel = 0
player_yvel = 2
run_direction = 'standing'
jumping = False
gravity = 1.2
key_found = False
level =1
trip = 0
while True:
clock.tick(40)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
run_direction = 'right'
player_xvel = 5
if event.key == pygame.K_LEFT:
run_direction = 'left'
player_xvel = -5
if event.key ==pygame.K_SPACE:
if jumping ==False:
jumping = True
player_yvel = -15
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
run_direction='standing'
player_xvel = 0
if event.key == pygame.K_LEFT:
run_direction='standing'
player_xvel = 0
if jumping ==True:
if player_yvel < 8:
player_yvel = player_yvel + gravity
player_x = player_x + player_xvel
player_y = player_y + player_yvel
surface.fill((255, 255, 255))
if level ==1:
pygame.draw.rect(surface, (100, 100, 100), (30, 500, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (200, 450, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (380, 400, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (680, 500, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (550, 450, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (200, 350, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (550, 350, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (680, 300, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (550, 250, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (380, 300, 100, 10))
if level ==2:
if trip == 0:
player_x = 720
player_y = 50
trip = trip + 1
pygame.draw.rect(surface, (100, 100, 100), (700, 100, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (100, 280, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (0, 200, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (100, 120, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (300, 100, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (200, 350, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (680, 500, 100, 10))
pygame.draw.rect(surface, (100, 100, 100), (380, 300, 100, 10))
pygame.draw.rect(surface, (255, 0, 0), (0, display_height-40, display_width, display_height))
if level ==1:
door_rect = surface.blit(door, (425, 270))
if key_found ==False:
key_rect = surface.blit(key, (720, 475))
if level ==2:
door_rect = surface.blit(door, (720, 470))
if key_found ==False:
key_rect = surface.blit(key, (320, 75))
if run_direction== 'right':
if counter%2==0:
character = surface.blit(running1, (player_x, player_y))
else:
character =surface.blit(running2, (player_x, player_y))
if run_direction == 'left':
if counter%2==0:
character =surface.blit(running_left1, (player_x, player_y))
else:
character =surface.blit(running_left2, (player_x, player_y))
if run_direction=='standing':
character = surface.blit(standing, (player_x, player_y))
if character.colliderect(key_rect):
key_found = True
if character.colliderect(door_rect):
if key_found == True:
print('you win')
key_found = False
level =2
if surface.get_at((character.left, character.bottom))==(100, 100, 100) or surface.get_at((character.right, character.bottom))==(100, 100, 100):
player_yvel = 0
jumping = False
else:
if jumping == False:
player_yvel = 2
if player_y>display_height-40:
pygame.quit()
counter = counter + 1
pygame.display.update()
| [
"noreply@github.com"
] | noreply@github.com |
e1a8258c295bb435d8d056b45f9adbadb1d5fb35 | 6d2307761303169e6527e10d4ee3b8692c84e9b7 | /Code/Algorithm Testing/NB.py | 82463836cc9517bc6e6411e58f5807ed2a177bf8 | [] | no_license | bpblakely/Email-Spam-Classification-Project | e783bed5a6b5b4bb6ccf8c9be092cdb9e163207f | d657bfbf475edb5731e78a4122fb7aaeee9e6ab5 | refs/heads/master | 2020-10-02T04:39:27.867598 | 2019-12-12T22:42:19 | 2019-12-12T22:42:19 | 227,703,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 10:29:48 2019
@author: Brian
"""
import numpy as np
from sklearn.naive_bayes import MultinomialNB,GaussianNB,ComplementNB
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LassoCV
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from kerasClassifier import predictTest as pt
#Main code for testing different algorithms. Commented code are algorithms testing prior. Uncomment in order to test again
def predictTest(trainFeatures,trainLabels,testFeatures,i):
#model=MultinomialNB(alpha=.01)
#model= MLPClassifier(random_state=2,hidden_layer_sizes=[100, 100],max_iter=1000)
#model = SVC(kernel="rbf",probability=True,gamma='scale')
#model=LinearSVC(penalty="l2")
#model=RandomForestClassifier(n_estimators=100, max_depth=12,random_state=2)
ch2=SelectKBest(chi2,k=i)
train=ch2.fit_transform(trainFeatures, trainLabels)
test= ch2.transform(testFeatures)
#lsvc = LinearSVC(C=20, penalty="l1", dual=False)
#clf = Pipeline([('feature_selection', SelectFromModel(lsvc)), ('classification', model)])
#clf.fit(trainFeatures, trainLabels)
#trainFeaturesN = select.fit_transform(trainFeatures,trainLabels)
return pt(train,trainLabels,test,i)
# model.fit(train,trainLabels)
# predicted = model.predict_proba(test)[:,1]
# return predicted
| [
"noreply@github.com"
] | noreply@github.com |
e19d83d920cbf214a0559c2f0bb610c90b9d69ee | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1914.py | 20d7b72d1b8a35128812032e9655e83a53e17756 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | f = open("A-large.in","r")
t = int (f.readline())
ent = []
def check(ent):
for i in range(0,4):
if ('.' not in ent[i])and ('O' not in ent[i]):
return 0
if ('.' not in ent[i])and ('X' not in ent[i]):
return 1
for i in range(0,4):
a = []
for j in range(0,4):
a.append(ent[j][i])
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][0],ent[1][1],ent[2][2],ent[3][3]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][3],ent[1][2],ent[2][1],ent[3][0]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
if ('.' not in ent[0]) and ('.' not in ent[1]) and ('.' not in ent[2]) and ('.' not in ent[3]):
return 2
return 3
s = open("output.out","w")
for i in range(1,t+1):
for j in range(0,4):
ent.append(f.readline())
x = check(ent)
if x == 0:
s.write("Case #%d: X won" % i)
if x == 1:
s.write("Case #%d: O won" % i)
if x == 2:
s.write("Case #%d: Draw" % i)
if x == 3:
s.write("Case #%d: Game has not completed" % i)
if i<t:
ent.append(f.readline())
s.write("\n")
ent = []
f.close()
s.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9ef08444444fb5f68dc415a3902027135ded3355 | 7fa478e503293dad2b12ffc5a7648e5ead2cf3df | /outliers/enron_outliers.py | ffd44aaaccb50fdc57f9afed72de4250f8092578 | [] | no_license | bluewaitor/ud120 | e81457fec36b8d1841bbecb91fde4e893d4df37b | b120ca580443d92721f9a46955b0f42a01b15e66 | refs/heads/master | 2021-08-28T23:33:16.267964 | 2017-12-13T08:37:13 | 2017-12-13T08:37:13 | 114,091,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | #!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
features = ["salary", "bonus"]
data_dict.pop('TOTAL',0)
data = featureFormat(data_dict, features)
### your code below
for point in data:
salary = point[0]
bonus = point[1]
matplotlib.pyplot.scatter(salary, bonus)
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.show()
for key, value in data_dict.items():
if value['bonus'] == data.max():
print '==' + key
biggest = 0
for key, value in data_dict.items():
if value['bonus'] > biggest:
biggest = value['bonus']
print key, biggest, value['salary'] | [
"405622394@qq.com"
] | 405622394@qq.com |
360fbd0df75ba142aadd5589508fdb2d95ba7602 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_074/ch117_2020_04_01_19_24_01_200930.py | 446a96f7337eaf516aa30fe9c7ef40edbc6f0571 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import math
def snell_descartes (n1,n2,c):
o1>=o2
c=math.degrees(o1)
d=math.degrees(o2)
a=math.sin(c)
b=math.sin(d)
b=a*n1/n2
o1<=90 and o1>=0
if o1==0:
o2==0 and a==b==1
return(o1) | [
"you@example.com"
] | you@example.com |
4fd29bedd1166b6c40908bcbd80ecf7dd8f39329 | 8a67943c7dfdf9c89a140d1a4c5aabc63d1b7263 | /train/train_transfer.py | 300ccb9c01c4c5b326954a52bac63326c93ccd29 | [
"MIT"
] | permissive | huynhtuan17ti/UnsupervisedLearning-JigsawPuzzle | c37782050b61a60695681a195dd5f38b36803976 | 1aafade4b6f169cef8815f90c27ec485bf64ca7d | refs/heads/main | 2023-04-30T07:46:43.300200 | 2021-05-16T13:08:52 | 2021-05-16T13:08:52 | 366,249,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,749 | py | import sys
sys.path.insert(1, '../UnsupervisedLearning-JigsawPuzzle')
import torch
import torch.nn as nn
import torchvision
import os
import cv2
from torch.autograd import Variable
from dataset_factory.data_loader import AnimalDataset
from dataset_factory.data_utils import get_all_imgs
from models.AlexNet import AlexNet
from config import Config
import math
from metric import accuracy as acc_metric
from tqdm import tqdm
from train.train_utils import prepare_dataloader
from torchvision import models
import argparse
parser = argparse.ArgumentParser(description='Train JigsawPuzzle Classifer')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate for SGD optimizer')
parser.add_argument('--gamma', default=0.3, type=float, help='gamma for StepLR')
parser.add_argument('--period', default=30, type=int, help='period range for StepLR')
parser.add_argument('--pretrained', default=None, type=str, help='Path to pretrained model')
parser.add_argument('--checkpoint', default=None, type=str, help='Path to save checkpoint model')
parser.add_argument('--train_csv', default='../UnsupervisedLearning-JigsawPuzzle/dataset/csv/train.csv', type=str, help='Path to train.csv')
parser.add_argument('--valid_csv', default='../UnsupervisedLearning-JigsawPuzzle/dataset/csv/valid.csv', type=str, help='Path to valid.csv')
parser.add_argument('--epochs', default=200, type=int, help='number of total epochs for training')
parser.add_argument('--train_batch', default=16, type=int, help='train batch size')
parser.add_argument('--valid_batch', default=16, type=int, help='valid batch size')
parser.add_argument('--init_acc', default=0, type=float, help='initial accuracy for training')
parser.add_argument('--result', default=None, type=str, help='Path to save result log')
args = parser.parse_args()
def train_one_epoch(epoch, net, train_loader, loss_fc, optimizer):
net.train()
total_loss = 0
total_acc = 0
pbar = tqdm(enumerate(train_loader), total = len(train_loader))
for step, (images, labels) in pbar:
images = Variable(images).cuda()
labels = Variable(labels).cuda()
optimizer.zero_grad()
outputs = net(images)
preds = torch.argmax(outputs, 1).detach().cpu().numpy()
targets = labels.detach().cpu().numpy()
acc = (preds == targets).mean()*100
loss = loss_fc(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += acc
description = f'epoch {epoch} || Loss: {total_loss/(step+1):.6f} | Acc: {total_acc/(step+1):.6}'
pbar.set_description(description)
def valid_one_epoch(epoch, net, valid_loader, loss_fc):
net.eval()
total_loss = 0
total_acc = 0
pbar = tqdm(enumerate(valid_loader), total = len(valid_loader))
for step, (images, labels) in pbar:
images = Variable(images).cuda()
labels = Variable(labels).cuda()
outputs = net(images)
preds = torch.argmax(outputs, 1).detach().cpu().numpy()
targets = labels.detach().cpu().numpy()
acc = (preds == targets).mean()*100
loss = loss_fc(outputs, labels)
total_loss += loss.item()
total_acc += acc
description = f'epoch {epoch} || Loss: {total_loss/(step+1):.6f} | Acc: {total_acc/(step+1):.6}'
pbar.set_description(description)
return total_acc/(step+1)
if __name__ == '__main__':
train_loader, valid_loader = prepare_dataloader(AnimalDataset, args.train_csv, args.valid_csv, args.train_batch, args.valid_batch)
net = models.alexnet(pretrained = True)
num_ftrs = net.classifier[6].in_features
net.classifier[6] = nn.Linear(num_ftrs, 10)
net.cuda()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr = args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.period, args.gamma, verbose = True)
if args.result:
f = open(args.result, "w")
# training
print('='*30)
print('Start training ...')
best_acc = args.init_acc
for epoch in range(args.epochs):
train_one_epoch(epoch, net, train_loader, loss, optimizer)
with torch.no_grad():
acc = valid_one_epoch(epoch, net, valid_loader, loss)
if acc > best_acc:
best_acc = acc
torch.save(net.state_dict(), args.checkpoint)
print('Save checkpoint ... Best accuracy {:.3f}'.format(best_acc))
if args.result:
f.write("Epoch: " + str(epoch) + ', best acc save: ' + str(best_acc) + '\n')
scheduler.step()
if args.result:
f.close() | [
"huynhminhtuan6429@gmail.com"
] | huynhminhtuan6429@gmail.com |
64dcffc9f3b11462172adb89a4680a202824afe1 | 669b9fd39398de1fc55ad7da8e7f3182c3d25ade | /sonar.py | aa1179e0e5de9bb69342c6ea8306a32da0a67705 | [
"Apache-2.0"
] | permissive | milos85vasic/Website-Sonar | 3fa811082221f90225d174b17a8dce31c69d05f3 | 406fa3f1baa82d1a7279c0d50dada9b141dec506 | refs/heads/master | 2020-04-15T16:11:10.500880 | 2019-01-31T13:59:25 | 2019-01-31T13:59:25 | 164,824,149 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,171 | py | import os
import sys
import time
import json
import urllib2
import requests
import logging
import os.path
from requests import ConnectionError
from logging.handlers import RotatingFileHandler
param_configuration_name = '--configuration'
configuration_default_file = 'configuration.json'
configuration = {}
elapsed_times = {}
unreachable_websites = []
key_websites = 'websites'
key_overrides = 'overrides'
key_notification = 'notification'
key_connectivity_verification_website = 'connectivity_verification_website'
def load_configuration():
configuration_file = configuration_default_file
for arg in sys.argv:
if sys.argv.index(arg) > 0:
if param_configuration_name in arg:
configuration_name = arg.replace(param_configuration_name, "")
configuration_name = configuration_name.replace(".json", "")
configuration_name = configuration_name.replace("=", "")
configuration_name = configuration_name.replace("'", "")
configuration_name = configuration_name.replace("\"", "")
configuration_name = configuration_name.replace(" ", "")
configuration_file = configuration_name + ".json"
log("Starting Website Sonar (version: " + version + "). Configuration file: " + configuration_file + ".")
if os.path.isfile(configuration_file):
try:
json_file = open(configuration_file)
json_str = json_file.read()
loaded = json.loads(json_str)
configuration.update(loaded)
if key_websites in configuration and key_overrides in configuration and \
key_connectivity_verification_website in configuration[key_overrides]:
for item in configuration[key_websites]:
elapsed_times[item] = 0
return True
except Exception as e:
log("Error: " + str(e))
return False
return False
app_log = logging.getLogger('root')
debug = False
verbose = True
do_logging = True
version = "1.2.0"
working_frequency = 1
key_frequency = 'frequency'
key_verification = 'verification'
key_working_frequency = 'working_frequency'
default_frequency = 10 * 60 if not debug else 10
key_notification_mechanism_println = "Println"
key_notification_mechanism_slack = "Slack-Notifier"
key_notification_mechanism_email = "Email-Notifier"
headers = {'user-agent': 'Website Sonar, version: ' + version}
log_filename = 'website-sonar.log'
log_files_count = 10 if not debug else 5
log_max_file_size = 5 * 1024 * 1024 if not debug else 1024
def log(what):
if verbose:
print what
if do_logging:
app_log.info(what)
def internet_on():
try:
urllib2.urlopen(configuration[key_overrides][key_connectivity_verification_website], timeout=1)
return True
except urllib2.URLError:
return False
def check(website, website_configuration):
log("Checking: " + website)
if "http" not in website:
log("No schema defined for: " + website + ", falling back to default: http:// schema.")
website = "http://" + website
try:
response = requests.get(website, headers=headers)
if response.status_code != 200 and response.status_code != 201:
return False
body = response.text
if key_verification in website_configuration:
for criteria in website_configuration[key_verification]:
if criteria not in body:
return False
except ConnectionError:
return False
return True
def perform_check(website):
if check(website, configuration[key_websites][website]):
message = "Website " + website + " is ok."
if website in unreachable_websites:
message = "Website " + website + " is reachable again."
unreachable_websites.remove(website)
notify(message)
log(message)
else:
if website not in unreachable_websites:
fail(website)
else:
log("Website is still not reachable: " + website)
def run(what):
for cmd in what:
os.system(cmd)
def fail(website):
unreachable_websites.append(website)
message = "Website is not reachable: " + website
log(message)
notify(message)
return
def notify(message):
if key_notification in configuration:
for mechanism in configuration[key_notification]:
if mechanism == key_notification_mechanism_slack:
slack(message)
continue
if mechanism == key_notification_mechanism_email:
email(message)
continue
if mechanism == key_notification_mechanism_println:
print ("MSG :: " + message)
continue
def slack(message):
command = [
"python Slack/notify.py \"" + message + "\""
]
if internet_on():
run(command)
def email(message):
command = [
"python Email/notify.py \"" + message + "\""
]
if internet_on():
run(command)
def run_sonar():
if do_logging:
logging.basicConfig(
filename="website-sonar.log",
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG
)
formatter = logging.Formatter('%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s')
handler = RotatingFileHandler(
log_filename, mode='a', maxBytes=log_max_file_size, backupCount=log_files_count, encoding=None, delay=0
)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
app_log.setLevel(logging.DEBUG)
app_log.addHandler(handler)
if not load_configuration():
log("Website Sonar (version: " + version + ") could not be started. Could not load configuration JSON.")
sys.exit(1)
start_message = "Website Sonar (version: " + version + ") is STARTED."
if key_notification in configuration and key_notification_mechanism_email in configuration[key_notification]:
email(start_message)
log(start_message)
frequency = working_frequency
if key_working_frequency in configuration[key_overrides]:
frequency = configuration[key_overrides][key_working_frequency]
while True:
time.sleep(frequency)
for website in elapsed_times:
elapsed_times[website] = elapsed_times[website] + frequency
if debug:
log("Tick. " + str(elapsed_times[website]))
expected_frequency = default_frequency
if key_frequency in configuration[key_websites][website]:
expected_frequency = configuration[key_websites][website][key_frequency]
if elapsed_times[website] >= expected_frequency:
elapsed_times[website] = 0
if not internet_on():
log("No internet connection available.")
continue
perform_check(website)
if __name__ == '__main__':
run_sonar()
| [
"milos85vasic@gmail.com"
] | milos85vasic@gmail.com |
9c7c5a026f89ccbfb7ccfb066f21b2da5e6310a4 | 03644227f51ff3ebfd0b5321d40c7d392dfcd315 | /exchange_plane/venv/Scripts/easy_install-script.py | 272aecf33bbda4ac76aa86b4c487a6da0a400aa8 | [] | no_license | wufeipku/python | 32fc26b85bafad15fe8f873d9806b6ab8d699310 | 1eb16a01d11aecd23097172a45caf79a4042f3bf | refs/heads/master | 2020-04-29T15:11:11.065925 | 2019-03-18T07:28:45 | 2019-03-18T07:28:45 | 176,220,111 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #!E:\python36\Pycharmproject\exchange_plane\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"wufei.pku@163.com"
] | wufei.pku@163.com |
c34cf6fe9e2f299d9e4c23ed305ee16c98656660 | 20257efe43389be6da440cf0ae1d28d203cc49c0 | /label/label_V1/srcs/photos.py | 514f826c196a235e8fd79e8737b31042153db91e | [] | no_license | Sebds/Patate | 26a7b8590edffe7a08dae8696d949ab8c5cb36da | 7676189d6051a4530bd47392639e4169217b61c5 | refs/heads/master | 2020-03-26T21:33:29.513474 | 2018-08-19T17:33:00 | 2018-08-19T17:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,561 | py | import os
import time
import srcs.color as c
from tkinter import *
from PIL import ImageTk, Image
class Photos(object):
def __init__(self, photos, lab_photos, trash='trash/', auto_next=False, width=800, height=600):
self.photos_dir = photos
self.lab_photos_dir = lab_photos
self.trash_dir = trash
self.photo_act = 0
self.fen = {
'fen' : None,
'lab_photo' : None,
'photo' : None,
'lab_info' : None
}
self.width = width
self.height = height
self.width_img = self.width
self.height_img = self.height - 50
self.auto_next = auto_next
def load(self):
self.photos = os.listdir(self.photos_dir)
self.photos_inf = [{'del' : False, 'label' : ''} for i in range(len(self.photos))]
self.photo_act = 0
def init_win(self, width=0, height=0):
self.width = width if width != 0 else self.width
self.width_img = width if width != 0 else self.width_img
self.height_img = height - 50 if height != 0 else self.height_img
self.fen['fen'] = Tk()
self.fen['fen'].title('lab_photo')
self.print_win()
self.init_key()
self.fen['fen'].mainloop()
def init_key(self):
self.fen['fen'].bind("<Escape>", self.quit_win)
self.fen['fen'].bind("<BackSpace>", self.del_label)
self.fen['fen'].bind("<Control-Key-s>", self.save)
self.fen['fen'].bind("<Right>", self.next_photo)
self.fen['fen'].bind("<Left>", self.last_photo)
self.fen['fen'].bind("<Up>", self.del_photo)
self.fen['fen'].bind("<KeyPress>", self.event_win)
def save(self, event):
for i in range(len(self.photos)):
if self.photos_inf[i]['del'] == True:
print(c.RED + 'DELETE -> ' + c.EOC + self.photos[i])
os.rename(self.photos_dir + self.photos[i], self.trash_dir + self.photos[i])
if self.photos_inf[i]['label'] != '':
print(c.GREEN + 'LABEL -> ' + c.EOC + self.photos_inf[i]['label'] + '_' + self.photos[i])
os.rename(self.photos_dir + self.photos[i], self.lab_photos_dir + self.photos_inf[i]['label'] + '_' + self.photos[i])
self.load()
def quit_win(self, event):
self.save(None)
self.fen['fen'].destroy()
self.fen['fen'].quit()
def del_label(self, event):
self.photos_inf[self.photo_act]['label'] = ''
if self.auto_next == True:
self.next_photo(None)
else:
self.print_win()
def event_win(self, event):
if event.char in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'):
self.photos_inf[self.photo_act]['label'] = event.char
if self.auto_next == True:
self.next_photo(None)
else:
self.print_win()
def del_photo(self, event):
if self.photos_inf[self.photo_act]['del'] == True:
self.photos_inf[self.photo_act]['del'] = False
else:
self.photos_inf[self.photo_act]['del'] = True
if self.auto_next == True:
self.next_photo(None)
else:
self.print_win()
def last_photo(self, event):
self.photo_act -= 1
if self.photo_act < 0:
self.photo_act = len(self.photos) - 1
self.print_win()
def next_photo(self, event):
self.photo_act += 1
if self.photo_act >= len(self.photos):
self.photo_act = 0
self.print_win()
def print_win(self):
if self.fen['lab_photo'] != None:
self.fen['lab_photo'].destroy()
if self.fen['lab_info'] != None:
self.fen['lab_info'].destroy()
image = Image.open(self.photos_dir + self.photos[self.photo_act])
image = image.resize((self.width_img, self.height_img), Image.ANTIALIAS)
self.fen['photo'] = ImageTk.PhotoImage(image)
self.fen['lab_photo'] = Label(self.fen['fen'], image=self.fen['photo'])
self.fen['lab_photo'].pack(side=TOP)
self.fen['lab_info'] = Label(self.fen['fen'], width=32, height=2, font=("Courier", 40))
if self.photos_inf[self.photo_act]['del'] == True:
self.fen['lab_info'].configure(bg='red')
else:
self.fen['lab_info'].configure(bg='white')
self.fen['lab_info']['text'] = self.photos_inf[self.photo_act]['label'] + '\t\t' + str(self.photo_act) + '/' + str(len(self.photos))
self.fen['lab_info'].pack(side=BOTTOM)
| [
"tnicolas@student.42.fr"
] | tnicolas@student.42.fr |
60d4e232d5fa663fa88d5d6da7e0953144542f33 | 9ef0f266173887eafd5c797d13a6538733b39002 | /trimesh/path/entities.py | de2166781a4699322e91ad3e70b13e8fccd4f1c4 | [
"MIT"
] | permissive | MiaoLi/trimesh | a850e3a922e43ce6500085eeaf16df8404ad0f17 | 8f6e537151d914d23180a1c1152d849c41d2c1fa | refs/heads/master | 2021-01-14T12:36:02.831270 | 2015-10-17T01:36:33 | 2015-10-17T01:36:33 | 44,636,986 | 2 | 0 | null | 2015-10-20T21:52:11 | 2015-10-20T21:52:10 | null | UTF-8 | Python | false | false | 5,072 | py | '''
entities.py: basic geometric primitives
Design intent: only store references to vertex indices and pass the vertex
array back to functions that require it.
This keeps all vertices in one external list.
'''
import numpy as np
from .arc import discretize_arc, arc_center
from .curve import discretize_bezier, discretize_bspline
from ..points import unitize
from ..util import replace_references
_HASH_LENGTH = 5
class Entity(object):
def __init__(self,
points,
closed = False):
self.points = np.array(points)
self.closed = closed
@property
def _class_id(self):
'''
Return an integer that is unique to the class type.
Note that this implementation will fail if a class is defined
that starts with the same letter as an existing class.
Since this function is called a lot, it is a tradeoff between
speed and robustness where speed won.
'''
return ord(self.__class__.__name__[0])
@property
def hash(self):
'''
Returns a string unique to the entity.
If two identical entities exist, they can be removed
by comparing the string returned by this function.
'''
hash = np.zeros(_HASH_LENGTH, dtype=np.int)
hash[-2:] = self._class_id, int(self.closed)
points_count = np.min([3, len(self.points)])
hash[0:points_count] = np.sort(self.points)[-points_count:]
return hash
def to_dict(self):
'''
Returns a dictionary with all of the information about the entity.
'''
return {'type' : self.__class__.__name__,
'points': self.points.tolist(),
'closed': self.closed}
def rereference(self, replacement):
'''
Given a replacement dictionary, change points to reflect the dictionary.
eg, if replacement = {0:107}, self.points = [0,1902] becomes [107, 1902]
'''
self.points = replace_references(self.points, replacement)
@property
def nodes(self):
'''
Returns an (n,2) list of nodes, or vertices on the path.
Note that this generic class function assumes that all of the reference
points are on the path, which is true for lines and three point arcs.
If you were to define another class where that wasn't the case
(for example, the control points of a bezier curve),
you would need to implement an entity- specific version of this function.
The purpose of having a list of nodes is so that they can then be added
as edges to a graph, so we can use functions to check connectivity,
extract paths, etc.
The slicing on this function is essentially just tiling points
so the first and last vertices aren't repeated. Example:
self.points = [0,1,2]
returns: [[0,1], [1,2]]
'''
return np.column_stack((self.points,
self.points)).reshape(-1)[1:-1].reshape((-1,2))
@property
def end_points(self):
'''
Returns the first and last points. Also note that if you
define a new entity class where the first and last vertices
in self.points aren't the endpoints of the curve you need to
implement this function for your class.
self.points = [0,1,2]
returns: [0,2]
'''
return self.points[[0,-1]]
class Arc(Entity):
def discrete(self, vertices, scale=1.0):
return discretize_arc(vertices[self.points],
close = self.closed,
scale = scale)
def center(self, vertices):
return arc_center(vertices[self.points])
class Line(Entity):
def discrete(self, vertices, scale=1.0):
return vertices[self.points]
class Curve(Entity):
@property
def _class_id(self):
return sum([ord(i) for i in self.__class__.__name__])
@property
def nodes(self):
return [[self.points[0],
self.points[1]],
[self.points[1],
self.points[-1]]]
class Bezier(Curve):
def discrete(self, vertices, scale=1.0):
return discretize_bezier(vertices[self.points], scale=scale)
class BSpline(Curve):
def __init__(self, points, knots, closed=False):
self.points = points
self.knots = knots
self.closed = closed
def discrete(self, vertices, count=None, scale=1.0):
result = discretize_bspline(control = vertices[self.points],
knots = self.knots,
count = count,
scale = scale)
return result
| [
"mik3dh@gmail.com"
] | mik3dh@gmail.com |
2b44e8f347c687e6cfc33ec6220eb5f18acfb6ef | e287d17181ca901f52f81662dddcb6e6e34af9d0 | /Fallout's Hacking Game.py | 1361bf710d7eaca2e9b01500a45c519e57fff22b | [] | no_license | MarkMillerKeene/DailyProgrammer | 92819771dce19e7e4671f34198f57127bed9d39e | f03b63051c84827e386c08f96b3f95df393317c3 | refs/heads/master | 2021-01-17T11:54:53.355033 | 2014-11-05T20:48:17 | 2014-11-05T20:48:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | __author__ = 'peiggs'
| [
"mark.miller@ksc.keene.edu"
] | mark.miller@ksc.keene.edu |
611e6bea09e4fc1314eb651ee69043dad69aec8d | 0af76aee48453b64d2f09dfadeb79f4a4ac6fef3 | /solution/practice/data-structures/multiple-choice/how-well-do-you-know-trees/solution.py | fc9723e63948c87114dd0dc2b22b2b8d0c594e5f | [
"MIT"
] | permissive | Abhishek2019/HackerRank | 99ee5d437eb3abe8f041a04bea3968848605a811 | d8a297e2707545957452d07ca564086e3e34a527 | refs/heads/master | 2021-05-03T12:06:18.488528 | 2019-10-30T17:19:59 | 2019-10-30T17:19:59 | 120,493,921 | 0 | 1 | MIT | 2019-10-30T17:20:05 | 2018-02-06T17:08:34 | Python | UTF-8 | Python | false | false | 13 | py | print("n-1")
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
e08349ddfbec65c303385ec355d6356a79d8056f | f9ca6da37554c16211eae83d280765205d98a72d | /gesture_recognizer/picture_cropper.py | e8a989109264ea1d288cc96b69918eec6dbe2f3e | [] | no_license | kgalloway2/VSC-Code | 03f8955f0c6b630ad97dd1d42ca83af64317e6f8 | 7c3d321de7a4880a9c92f57c153cd23a154390f6 | refs/heads/master | 2023-07-09T12:20:02.147818 | 2021-08-09T13:50:06 | 2021-08-09T13:50:06 | 291,090,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from PIL import Image
# 640x480
i = 0
while i <= 105:
img=Image.open("C:/Users/kgtrm/Documents/VSC Code/gesture_recognizer/screenshots/test_hands/test_hand" + str(i) + ".jpg")
c_i = img.crop(box=(20,20,550,400))
c_i.save("C:/Users/kgtrm/Documents/VSC Code/gesture_recognizer/screenshots/test_hands/cropped_test_hand" + str(i) + ".jpg")
i += 1
| [
"keatongalloway@yahoo.com"
] | keatongalloway@yahoo.com |
dee362941322f9741b27f098fc60916cc88f260a | b5e3b4b8e2c70e06e3b19bcd86789b83028da78f | /django_project/blog/migrations/0009_auto_20190620_2016.py | fd3d4764fd5b6fae3dd1dfcc853bd20a330c5b05 | [] | no_license | feridbedelov/Django_Project_Blog_Aurora | 334593d2d523f38e7c472b6e8439cd19f777ec6a | 130f3db455590333c45d40c042722f5908e7bb32 | refs/heads/master | 2020-07-31T15:47:39.431799 | 2019-09-24T17:41:33 | 2019-09-24T17:41:33 | 210,662,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 2.2.1 on 2019-06-20 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_post_rating'),
]
operations = [
migrations.AlterField(
model_name='post',
name='rating',
field=models.FloatField(default=5.5, max_length=20),
),
]
| [
"User@DESKTOP-K24KG53"
] | User@DESKTOP-K24KG53 |
76732c90be1e6c89d923ed2aabebc32359ae7817 | b73b77dbbd6b4b2c216c1c1e08e5d92c734e545c | /hotel/migrations/0102_auto_20200414_1402.py | 4c95c54d31333b48f288d476d6df915d58142931 | [] | no_license | aadarshachapagain/hotel_booking | 0cf248b78a03277a5208aecb1a72aa1282319ead | 58503c57d2fd6d07fdbe6b7eb113954a0282dc3d | refs/heads/main | 2023-08-27T01:53:21.176194 | 2021-10-01T03:13:42 | 2021-10-01T03:13:42 | 412,294,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 2.1.5 on 2020-04-14 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotel', '0101_bedtype_status'),
]
operations = [
migrations.AlterField(
model_name='bedtype',
name='description',
field=models.TextField(blank=True, max_length=500, null=True),
),
]
| [
"aadarshachapagain@gmail.com"
] | aadarshachapagain@gmail.com |
26464ce47e44f9c4fe06a0efc68c7c7a9c866238 | f3d3ba2921e65a352e6f78fe02f04ddb8a55a8cd | /data/presets.py | 3f876e79971ed19e57a852fb53b9cf01c931a3aa | [] | no_license | ejtalbot/piscripts | ac9f68abce1c2c0711cfb7187cae42fa396feee8 | b5f86edaa8d748108b8316c2b21c79bc2d029071 | refs/heads/main | 2023-07-02T22:23:02.573466 | 2021-08-07T02:16:08 | 2021-08-07T02:16:08 | 341,776,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | snake_templates = {
"rainbow": ["red", "orange_red", "yellow", "electric_green", "blue", "violet"],
"purple_pink": ["pink_orange", "magenta", "purple_pizzazz", "violet"],
"hot": ["red", "persian_red", "orange_red", "selective_yellow"],
"cool": ["aquamarine", "teal", "blue", "violet"],
}
| [
"erikjamestalbot@gmail.com"
] | erikjamestalbot@gmail.com |
900753b09ad104145d9f0ffbfa579ec628962275 | f62cf89d4e87a053c442e24f50cef4eb0ada2263 | /01-Python3基础语法.py | ed343984a27df1a3348f6ad4e7e5b2fdf7dd6e7e | [] | no_license | Yushallchao/PythonPractise | e9024a45b01658805ad39c47c86574d241b11f5e | 4d5fe0a7870af9b38569d715d73f8f057ce9b37d | refs/heads/main | 2023-01-31T20:54:54.449565 | 2020-12-10T06:10:57 | 2020-12-10T06:10:57 | 316,146,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | #默认情况下,Python 3 源码文件以 UTF-8 编码,所有字符串都是 unicode 字符串。 当然你也可以为源码文件指定不同的编码:
# -*- coding: utf-8 -*-
#!/usr/bin/python3
# coding=utf-8
# 第一个注释
print ("Hello, Python3!") # 第二个注释
'''
第三注释
'''
"""
第四注释
"""
if True:
print ("True")
print("Hello, Python3! again")
else:
print ("False")
str = 'Runoob'#python中单引号和双引号使用完全相同 (''="")
print(str)
print(str[0:-1]) # 输出第一个到倒数第二个的所有字符,从右往左以-1开始
print(str[0]) # 输出字符串第一个字符
print(str[2:5]) # 输出从第三个开始到第五个的字符
print(str[2:]) # 输出从第三个开始后的所有字符
print(str * 2) # 输出字符串两次
print(str + '你好') # 连接字符串
print('------------------------------')
print('hello\nrunoob') # 使用反斜杠(\)+n转义特殊字符
print(r'hello\nrunoob') # 在字符串前面添加一个 r,表示原始字符串,不会发生转义
p = input()
print(p)
#Python可以在同一行中使用多条语句,语句之间使用分号(;)分割
import sys;x = 'runoob'; sys.stdout.write(x + '\n')
## 不换行输出在变量末尾加上 end=""
print(str, end="")
print(str, end="")
from sys import argv,path # 导入特定的成员
print('================python from import===================================')
print('path:',path) # 因为已经导入path成员,所以此处引用时不需要加sys.path | [
"yushallchao@163.com"
] | yushallchao@163.com |
3d614af7145d14a806ab6a25d4ba583b74ca5e28 | b7fd24b41333575264a18f4631a0054b8eecea40 | /schema/zst_alarm.py | 7208762ef2097495930ac7fb6ba35582680e3203 | [] | no_license | zhouwanchun/zst_online_server | 5d430d4fe928cb30fed0060f12839eb9519890ca | 955f7ad62f020128c40a3f0ca14848034a3e7bbd | refs/heads/master | 2023-03-17T21:50:35.792788 | 2021-08-18T14:06:54 | 2021-08-18T14:06:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import json
import logging
import time
import requests
# Get an instance of a logger
logger = logging.getLogger(__name__)
class WexinAlarm:
def __init__(self):
self.token = ""
self.expired = int(time.time())
def refresh_token(self):
now = int(time.time())
if now < self.expired and len(self.token) > 0:
return
# TODO 配置写到配置文件中
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ww2ef294fd1f043429&corpsecret=deLb5gd4hiP-l5ekwbEZ6h1WZbGz43VPOWgqwRrfqIM"
response = requests.request("GET", url, headers={}, data={})
if response.status_code > 300:
logger.error("error status code for weixin token: %d", response.status_code)
return
resp_obj = json.loads(response.text)
if resp_obj['errcode'] != 0:
logger.error("failed to get token: %s", resp_obj['errmsg'])
return
self.token = resp_obj['access_token']
self.expired = int(time.time()) + resp_obj['expires_in']
def send_msg(self, users, msg):
self.refresh_token()
url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=" + self.token
payload = {
"touser": users,
"toparty": "1",
"msgtype": "text",
"agentid": 1000002,
"text": {
"content": msg
},
"safe": 0,
"enable_id_trans": 0,
"enable_duplicate_check": 0,
"duplicate_check_interval": 1800
}
# 发送json数据的时候,要加上'Content-Type': 'application/json'
# 如果不加,有可能会出现以下错误
# 400 bad request
# 415 unsupported media type
headers = {
'Content-Type': 'application/json'
}
resp = requests.request("POST", url, headers=headers, data=json.dumps(payload))
if resp.status_code >= 300:
logger.error('failed to send message to wechat: %s', resp.text)
| [
"text.zwb@gmail.com"
] | text.zwb@gmail.com |
d772e8a81b0341e954f8e91fbfad37c97cf003c4 | b7fa6ec316abd8b0df7a873f2a0f82ed55e13c0e | /Datos/Operator_In-Contando_Vocales.py | dfd28a47d4377f0b3ad975603c990ad5a5576c59 | [
"MIT"
] | permissive | CaosMx/Code-Exercises-in-Python-Language | 5230ec32f9606563bc92d77415f11b12946803f4 | 0693e445a48cf8b06432affbf72c9182ce9cfb20 | refs/heads/main | 2023-02-01T16:03:05.763750 | 2020-12-14T02:41:29 | 2020-12-14T02:41:29 | 321,198,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # CaosMX
# Dic 2020
# Ex Python Practica
'''
Comprobar dada una palabra, la cantidad de vocales que contiene:
Usando el operador in -> Nos dice en determinada posición de un string si el caracter existe en
otro string predeterminado:
Siguiendo Curso de Python de Manuel Gonzalez:
https://www.youtube.com/channel/UCQLWbxZbgftDIhw21i6q_OA/featured
https://programarescomounjuego.blogspot.com
'''
#Input
palabra = input ("Dame un una palabra: ")
# String para verificar las vocales
vocales = "aeiouáéíóú"
# Para verificar el caracter en el índice del string:
indice = 0
# Para contar las vocales:
num_vocales = 0
# Recorremos la palabra
while indice <= len(palabra)-1:
# Si el caracter es una vocal
if palabra[indice] in vocales:
# Incremento de contador:
num_vocales += 1
# Aumentamos el indice para recorrer el siguiente caracter:
indice += 1
print ("La cantidad de vocales es: ", num_vocales) | [
"ing.manuel.arreola@gmail.com"
] | ing.manuel.arreola@gmail.com |
f92c4c2d30adeab12a1909fe55ab12ef7f60d039 | 0bff1f5481f5f83d2053a165839489a1f787e433 | /tweets/models.py | 0f0457fe9ac4c66c0ead9896570035fa931205ed | [] | no_license | tong1yi/my-django-twitter | d92b81a5f74f1251fae273ee11cb07851bd97565 | c1909b7f541fe8062bed6e5add068b0e855cfec9 | refs/heads/main | 2023-06-06T06:03:39.629167 | 2021-06-24T03:14:45 | 2021-06-24T03:14:45 | 380,594,421 | 0 | 0 | null | 2021-06-26T20:54:36 | 2021-06-26T20:54:35 | null | UTF-8 | Python | false | false | 3,888 | py | from django.db import models
from django.contrib.auth.models import User
from utils.time_helpers import utc_now
from django.contrib.contenttypes.models import ContentType
from likes.models import Like
from tweets.constants import TweetPhotoStatus, TWEET_PHOTO_STATUS_CHOICES
# https://stackoverflow.com/questions/35129697/difference-between-model-fieldsin-django-and-serializer-fieldsin-django-rest
# Create your models here.
class Tweet(models.Model):
user = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
help_text="This user refers to the user who posts this tweet.",
verbose_name=u"谁发了这个帖子",
)
content = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True) # 有时区(vagrant/server所在的时区)
# Meta是一个配置信息。
# 在创建Tweets这个model的时候会根据配置信息去创建。
class Meta:
# 联合索引 compound index/composite index
# 相当于在数据库中建立了一个我看不到的表单,这个表单中一共有3列。
# [
# ('user', 'created_at', 'id'),
# ...
# ]
# 建立了索引也要进行makemigration和migrate
index_together = (
('user', 'created_at'),
)
# 在Tweet相关的所有queryset中如果没有指定orderby的时候,默认的是下面这个ordering。
# 即,只会影响orderby的默认排序行为。
# ordering 不会对数据库产生影响。
ordering = ('user', '-created_at')
@property
def hours_to_now(self):
# datetime.now()不带时区信息,需要增加上utc的时区信息。
return (utc_now()-self.created_at).seconds // 3600
def __str__(self):
# 当执行 print(tweet instance) 的时候会显示的内容
return f'{self.created_at} {self.user}: {self.content}'
@property
def like_set(self):
# 找到tweet下所有的点赞。
return Like.objects.filter(
content_type=ContentType.objects.get_for_model(Tweet),
object_id=self.id,
).order_by('-created_at')
class TweetPhoto(models.Model):
# 图片在哪个 Tweet 下面
tweet = models.ForeignKey(Tweet, on_delete=models.SET_NULL, null=True)
# 谁上传了这张图片,这个信息虽然可以从 tweet 中获取到,但是重复的记录在 Image 里可以在
# 使用上带来很多遍历,比如某个人经常上传一些不合法的照片,那么这个人新上传的照片可以被标记
# 为重点审查对象。或者我们需要封禁某个用户上传的所有照片的时候,就可以通过这个 model 快速
# 进行筛选
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
# 图片文件
file = models.FileField()
order = models.IntegerField(default=0)
# 图片状态,用于审核等情况
status = models.IntegerField(
default=TweetPhotoStatus.PENDING,
choices=TWEET_PHOTO_STATUS_CHOICES,
)
# 软删除(soft delete)标记,当一个照片被删除的时候,首先会被标记为已经被删除,在一定时间之后
# 才会被真正的删除。这样做的目的是,如果在 tweet 被删除的时候马上执行真删除的通常会花费一定的
# 时间,影响效率。可以用异步任务在后台慢慢做真删除。
has_deleted = models.BooleanField(default=False)
deleted_at = models.DateTimeField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
index_together = (
('user', 'created_at'),
('has_deleted', 'created_at'),
('status', 'created_at'),
('tweet', 'order'),
)
def __str__(self):
return f'{self.tweet_id}: {self.file}'
| [
"wilburzjh@gmail.com"
] | wilburzjh@gmail.com |
acbeb910b65258b18b71182806b2cc75e84ffa03 | 3b1efdd0aacc98738f3b8b9ee09c6ff59cccc14e | /ietf/person/factories.py | e076b4ef72e4bec53e2bc6a55c5798054d06ced0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | unofficial-mirror/ietfdb | 15beb6bf17b1d4abb257ee656ac6b7488339d331 | ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81 | refs/heads/master | 2020-08-06T17:24:13.966746 | 2019-10-04T20:54:05 | 2019-10-04T20:54:05 | 213,088,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,456 | py | # Copyright The IETF Trust 2015-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import factory
import faker
import faker.config
import os
import random
import shutil
from unidecode import unidecode
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.utils.encoding import force_text
import debug # pyflakes:ignore
from ietf.person.models import Person, Alias, Email
from ietf.person.name import normalize_name, unidecode_name
fake = faker.Factory.create()
def random_faker():
# The transliteration of some arabic and devanagari names introduces
# non-alphabetic characgters that don't work with the draft author
# extraction code, and also don't seem to match the way people with arabic
# names romanize arabic names. Exlude those locales from name generation
# in order to avoid test failures.
locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_')) ] )
return faker.Faker(random.sample(locales, 1)[0])
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
exclude = ['faker', ]
faker = factory.LazyFunction(random_faker)
first_name = factory.LazyAttribute(lambda o: o.faker.first_name())
last_name = factory.LazyAttribute(lambda o: o.faker.last_name())
email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),
slugify(unidecode(u.last_name)), n, fake.domain_name()))
username = factory.LazyAttribute(lambda u: u.email)
@factory.post_generation
def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter
class PersonFactory(factory.DjangoModelFactory):
class Meta:
model = Person
user = factory.SubFactory(UserFactory)
name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name)))
ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name)))
class Params:
with_bio = factory.Trait(biography = "\n\n".join(fake.paragraphs()))
@factory.post_generation
def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
make_alias = getattr(AliasFactory, 'create' if create else 'build')
make_alias(person=obj,name=obj.name)
make_alias(person=obj,name=obj.ascii)
if obj.name != obj.plain_name():
make_alias(person=obj,name=obj.plain_name())
if obj.ascii != obj.plain_ascii():
make_alias(person=obj,name=obj.plain_ascii())
@factory.post_generation
def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if extracted is None:
extracted = True
if create and extracted:
make_email = getattr(EmailFactory, 'create' if create else 'build')
make_email(person=obj, address=obj.user.email)
@factory.post_generation
def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
import atexit
if obj.biography:
photo_name = obj.photo_name()
media_name = "%s/%s.jpg" % (settings.PHOTOS_DIRNAME, photo_name)
obj.photo = media_name
obj.photo_thumb = media_name
photosrc = os.path.join(settings.TEST_DATA_DIR, "profile-default.jpg")
photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg')
if not os.path.exists(photodst):
shutil.copy(photosrc, photodst)
def delete_file(file):
os.unlink(file)
atexit.register(delete_file, photodst)
class AliasFactory(factory.DjangoModelFactory):
class Meta:
model = Alias
@classmethod
def _create(cls, model_class, *args, **kwargs):
person = kwargs['person']
name = kwargs['name']
existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True))
if not name in existing_aliases:
obj = model_class(*args, **kwargs)
obj.save()
return obj
name = factory.Faker('name')
def fake_email_address(n):
address_field = [ f for f in Email._meta.fields if f.name == 'address'][0]
count = 0
while True:
address = '%s.%s_%d@%s' % (
slugify(unidecode(fake.first_name())),
slugify(unidecode(fake.last_name())),
n, fake.domain_name()
)
count += 1
if len(address) <= address_field.max_length:
break
if count >= 10:
raise RuntimeError("Failed generating a fake email address to fit in Email.address(max_length=%s)"%address_field.max_lenth)
return address
class EmailFactory(factory.DjangoModelFactory):
class Meta:
model = Email
django_get_or_create = ('address',)
address = factory.Sequence(fake_email_address)
person = factory.SubFactory(PersonFactory)
active = True
primary = False
origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '')
| [
"henrik@levkowetz.com"
] | henrik@levkowetz.com |
ebbb121bfb497e7f272ba80d191c5af2c3a9b31d | 04adc1a7ae0f9577076321a5931b7816cacc980b | /Exercicios/media-de-lista-com-input-e-1-while.py | 54a19fcbeed414daebbc49a459bf230dbdd8f22b | [] | no_license | jacquesfelipe/python-learning | 455db8ab474edf3e230c20667aa54d194381b7dd | 668a0c6393655e18841c5ca76bfed9de54d13f32 | refs/heads/main | 2023-06-30T11:11:05.384635 | 2021-08-08T21:34:59 | 2021-08-08T21:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | """Média de lista com input"""
notas = []
x = 0
soma = 0
while x <= 3:
notas.append(float(input("Digite qual nota gostaria de adicionar: ")))
soma = soma + notas[x]
x = x + 1
media = soma / (len(notas))
print(f"A média das notas: {[notas]} é: {media}")
| [
"jacquesfelipej@gmail.com"
] | jacquesfelipej@gmail.com |
5352686f4e473327fc059b46ee1eb30a3308f534 | 8efd2eccd36946f430f1243e13070685d4695bfe | /satfire/tests/test_utils.py | bc3d156a7a881af9851ed304324705627a97bc8a | [] | no_license | pytroll/satfire | 5ca99ccb2b346692eb5fd136c917fd74e55d36d5 | f8bc309ed84aa92673cc02c61eeef0cc997b662b | refs/heads/master | 2020-05-05T13:36:52.702079 | 2020-02-04T13:29:32 | 2020-02-04T13:29:32 | 180,085,989 | 4 | 2 | null | 2020-02-04T13:29:33 | 2019-04-08T06:40:30 | Python | UTF-8 | Python | false | false | 8,356 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, Panu Lahtinen / FMI
#
# Author(s):
#
# Panu Lahtinen <panu.lahtinen@fmi.fi>
"""Unit testing for utils
"""
import sys
import os.path
from collections import OrderedDict
import numpy as np
from satfire import utils
from posttroll.message import Message
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class TestUtils(unittest.TestCase):
yaml_config = """config:
item_1: 1
item_2: 2
"""
def test_ordered_load(self):
fid = StringIO(self.yaml_config)
res = utils.ordered_load(fid)
fid.close()
self.assertTrue(list(res.keys())[0] == "config")
keys = list(res["config"].keys())
self.assertTrue(keys[0] == "item_1")
self.assertTrue(res["config"][keys[0]] == 1)
self.assertTrue(keys[1] == "item_2")
self.assertTrue(res["config"][keys[1]] == 2)
def test_read_config(self):
config = utils.read_config(os.path.join(os.path.dirname(__file__),
"test_data", "config.yaml"))
self.assertTrue(len(config) > 0)
keys = list(config.keys())
self.assertTrue(isinstance(config, OrderedDict))
self.assertEqual(keys[0], 'item_1')
self.assertTrue(isinstance(config['item_1'], str))
self.assertTrue(isinstance(config['item_2'], list))
self.assertTrue(isinstance(config['item_3'], OrderedDict))
self.assertTrue(isinstance(config['item_4'], int))
def test_get_filenames_from_msg(self):
config = {"cma_message_tag": "pps",
"sat_message_tag": "hrpt"}
cma_fname = "/tmp/foo.nc"
sat_fname = "/tmp/bar.l1b"
# Both files present
data = {"collection":
{"pps":
{"dataset":
[{"uri": cma_fname}]},
"hrpt":
{"dataset":
[{"uri": sat_fname}]}}}
msg = Message("/topic", "collection", data)
sat, cma = utils.get_filenames_from_msg(msg, config)
self.assertEqual(sat, sat_fname)
self.assertEqual(cma, cma_fname)
# Only satellite file
data = {"collection":
{"hrpt":
{"dataset":
[{"uri": sat_fname}]}}}
msg = Message("/topic", "collection", data)
sat, cma = utils.get_filenames_from_msg(msg, config)
self.assertEqual(sat, sat_fname)
self.assertIsNone(cma)
# Only cloud mask file
data = {"collection":
{"pps":
{"dataset":
[{"uri": cma_fname}]}}}
msg = Message("/topic", "collection", data)
sat, cma = utils.get_filenames_from_msg(msg, config)
self.assertEqual(cma, cma_fname)
self.assertIsNone(sat)
# No files
data = {"collection": {}}
msg = Message("/topic", "dataset", data)
sat, cma = utils.get_filenames_from_msg(msg, config)
self.assertIsNone(cma)
self.assertIsNone(sat)
def test_get_idxs_around_location(self):
side = 5
# Note that the centre pixel is always masked out
y_cor = np.array([0, 1, 2, 3, 4,
0, 1, 2, 3, 4,
0, 1, 3, 4,
0, 1, 2, 3, 4,
0, 1, 2, 3, 4])
x_cor = np.array([0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4])
y_res, x_res = utils.get_idxs_around_location(2, 2, side,
remove_neighbours=False)
self.assertTrue(y_res.size == 24)
self.assertTrue(x_res.size == 24)
self.assertTrue((y_cor == y_res).all())
self.assertTrue((x_cor == x_res).all())
side = 5
y_cor = np.array([0, 1, 2, 3, 4,
0, 4,
0, 4,
0, 4,
0, 1, 2, 3, 4])
x_cor = np.array([0, 0, 0, 0, 0,
1, 1,
2, 2,
3, 3,
4, 4, 4, 4, 4])
y_res, x_res = utils.get_idxs_around_location(2, 2, side,
remove_neighbours=True)
self.assertTrue(y_res.size == side * side - 9)
self.assertTrue(x_res.size == side * side - 9)
self.assertTrue((y_cor == y_res).all())
self.assertTrue((x_cor == x_res).all())
def test_calc_footprint_size(self):
sat_zens = np.array([0, 68.5])
ifov = 1.4e-3
sat_alt = 830.
max_swath_width = 1446.58
along, across = utils.calc_footprint_size(sat_zens, ifov, sat_alt,
max_swath_width)
self.assertAlmostEqual(along[0], 1.16, 2)
self.assertAlmostEqual(along[1], 2.46, 2)
self.assertAlmostEqual(across[0], 1.16, 2)
self.assertAlmostEqual(across[1], 6.70, 2)
def test_haversine(self):
lon1, lat1 = 25., 60.
lon2, lat2 = 21.3, 68.3
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=True)
self.assertAlmostEqual(dists[0], 939.8, 1)
self.assertAlmostEqual(bearings[0], 350.66, 2)
lon1, lat1 = 0, 0
lon2, lat2 = 0, 90
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=True)
self.assertAlmostEqual(dists[0], 10007.9, 1)
self.assertAlmostEqual(bearings[0], 0.0, 1)
lon1, lat1 = 0, 0
lon2, lat2 = 90, 0
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=True)
self.assertAlmostEqual(dists[0], 10007.9, 1)
self.assertAlmostEqual(bearings[0], 90.0, 1)
lon1, lat1 = 0, 0
lon2, lat2 = -90, 0
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=True)
self.assertAlmostEqual(dists[0], 10007.9, 1)
self.assertAlmostEqual(bearings[0], 270.0, 1)
lon1, lat1 = 0, 0
lon2, lat2 = 0, -90
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=True)
self.assertAlmostEqual(dists[0], 10007.9, 1)
self.assertAlmostEqual(bearings[0], 180.0, 1)
lon1, lat1 = 0, 0
lon2, lat2 = 0, -90
dists, bearings = utils.haversine(lon1, lat1, lon2, lat2,
calc_bearings=False)
self.assertAlmostEqual(dists[0], 10007.9, 1)
self.assertIsNone(bearings)
def test_ensure_numpy(self):
res = utils.ensure_numpy(1, dtype=None)
self.assertTrue(isinstance(res, np.ndarray))
self.assertTrue(res.dtype == np.int64)
self.assertEqual(res[0], 1)
res = utils.ensure_numpy(1, dtype=np.float32)
self.assertTrue(isinstance(res, np.ndarray))
self.assertTrue(res.dtype == np.float32)
self.assertEqual(res[0], 1.0)
res = utils.ensure_numpy([1], dtype=np.float32)
self.assertTrue(isinstance(res, np.ndarray))
self.assertTrue(res.dtype == np.float32)
self.assertEqual(res[0], 1.0)
res = utils.ensure_numpy(np.array([1]), dtype=np.float32)
self.assertTrue(isinstance(res, np.ndarray))
self.assertTrue(res.dtype == np.float32)
self.assertEqual(res[0], 1.0)
res = utils.ensure_numpy(np.array(1), dtype=np.float32)
self.assertTrue(isinstance(res, np.ndarray))
self.assertTrue(res.dtype == np.float32)
self.assertEqual(res[0], 1.0)
def suite():
"""The suite for test_utils
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestUtils))
return mysuite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| [
"pnuu+git@iki.fi"
] | pnuu+git@iki.fi |
d872427d19cab578ba3812d427c71e3f1ce07cee | efea54ec2c6b63ca8904fb3fcbee94102aa256ed | /AprilCookpff/1.py | 3cad934b5a7b7e8eff188c6aa3f4ffa12dc55f2b | [] | no_license | ArefinMizan/Codechef-Solutions | 427198e736da8089001818b96109ab7a2e637497 | 01dd0caab636c3c9d39be87ee57ba867f3ea4c87 | refs/heads/master | 2023-03-15T23:00:13.347656 | 2020-01-20T09:59:17 | 2020-01-20T09:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | def main():
for _ in range(int(input())):
(n,m) = map(int, input().split())
print((n-1)*(m-1))
if __name__ == '__main__':
main() | [
"dillu9878@gmail.com"
] | dillu9878@gmail.com |
45b19b1cab318c1d3e4c9a7783e0ebccc5e46929 | ff5d86192ad048737716528d4d59e0bc506e0bfd | /76.py | d4a3a4d3ce102fc5e8151c223ffe9deb4eddfdb7 | [] | no_license | ggddessgxh/aron | 4c7d0b42ee8a8ef9c8edf5e2528beb36cf5b632f | 69c42a0269d46d88287bc753a4e860d3ea5311f7 | refs/heads/master | 2020-04-17T11:46:54.915655 | 2019-01-31T13:36:27 | 2019-01-31T13:36:27 | 166,555,227 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | a = 809
for i in range(10, 100):
b = i * a
if b >= 1000 and b <= 10000 and 8 * i < 100 and 9 * i >= 100:
print(i)
print(i*809)
| [
"1249440711@qq.com"
] | 1249440711@qq.com |
bbadb5f2b9ced965d16217dd49cadebc56d5713c | 2a720f618609e6e28a01cba5f915c5b52285db53 | /donghyeon's python/python.review(1).py | 04d0623a491368075bdddff1b1625e2910522e80 | [] | no_license | kai3n/fastcampus | 87f923dda0712a42644f67581650ccd99a1cd2e8 | 9363c948b50e1789a64f58ce206d49d71d93c801 | refs/heads/master | 2021-01-10T09:34:56.222510 | 2016-02-16T11:22:02 | 2016-02-16T11:22:02 | 49,542,285 | 11 | 4 | null | 2016-01-13T13:45:25 | 2016-01-13T02:01:23 | Python | UTF-8 | Python | false | false | 23,920 | py | '''파이썬 정리 파일
1 . 파이썬에서 변수 혹은 리터럴값의 타입을 알고싶다면 type()를 사용하면된다''' #tpye()
a= 7
print('----1번----')
print(type(a))
print(type(58))
print(type(99.9))
print(type('abc'))
"""2 . 파이썬의 연산자 특징 알아보기""" #연산자특징
print('----2번----')
print(5 + 8) #덧셈 연산자
print(90 - 10) #뺄셈 연산자
print(4 * 7) #곱셈 연산자
print(7 / 2) #나눗셈 연산자
print(7 // 2) # 나눗셈을 한후 나머지값은 버린체 결과 나옴
print(7 % 3) # 나눗셈을 한수 나머지의 결과값만 나옴
print(3 ** 4) #3의 4제곱을 의미함
""" 3. 형변환 (다른데이터타입을 정수형으로 변환 시킬려면 int()함수를 사용한다 이함수는 소수점을 버리고 정수를 반환한다""" #형변환
print("----3번------")
print(int(True))
print(int(False))
print(int(98.6))
print(int(1.0e4))
print(int(4+7.0)) #<< 숫자 타입을 적어도 자동으로 형변환!(파이썬의 위대함)
#부동소수점수로 형변환
print(float(True))
print(float(98))
print(float('1.0e4'))
#str() 를 이용하여 데이터타입을 문자열로 변환
print(str(102))
print(str(True)) # 여기서 boolean 값이랑 프린트된 True 값은 다르다
""" 4. 문자열 """ #문자
#문자 앞에 백슬레시(\)기호를 붙임으로써 특별한 의미를 줄수있다.
palindrome = 'A man,\nA plan,\nA canal:\nPanama.' #\n 은 new line 이란소리로 프린트될때 다음줄로 옮겨진다.
print('---4번----')
print(palindrome)
# 4-1. \t = tab
print('------4-1---')
print('\tabc')
print('a\tbc')
print('ab\tc')
print('abc\t')
#4-2. 결합하고 복제하기
print('Release the Kraken!' + 'At once@') # 결합
start = "Na" * 4 + '\n'
middle = "Hey" * 3 + '\n'
end = 'Goodbye'
print('-------4-2-------')
print(start+start+middle+end)
#4-3. 문자 추출
letters = 'abcdefghijklnmopqrstuvwxyz'
print('------4-3------')
print(letters[0]) # [] 안에 보고 싶은 인덴트를 넣는다
print(letters[-2])
print(letters[25])
#4-4 슬라이스 (문자열에서 문자열 일부를 추출한다)
# [:] - 처음부터 끝까지 전체 시퀀스 추출
# [start:] - start 오프셋(자리)부터 끝까지 시퀀스를 추출
# [:end] - 처음부터 end-1 오프셋(자리) 까지 시퀀스 추출
# [start:end] - start 오프셋부터 end-1 오프셋 까지 시퀀스를 추출
# [start:end:step] - step 만큼 문자를 건너 뛰면서 start 오프셋부터 end-1 오프셋 까지 시퀀스 추출
print('--------4-4-------')
print(letters[:])
print(letters[20:])
print(letters[:20])
print(letters[-3:])
print(letters[::7])
print(letters[4:20:3])
print(letters[19::4])
#4-5 len() << 길이알려주는
print('------4-5-----')
print(len(letters),':'," 알파벳 a~z 몇개인지")
#4-6 문자열 결합하고 분리하는 join() , split() 함수
print('-------4-6---------')
crypto_list = ['Yetl', 'Bigfoot', 'Loch Ness Monster']
crypto_string = ','.join(crypto_list)
print("Found and signing book deals:", crypto_string)
todos = 'get gloves, get mask, give cat vitamins,call ambulance'
print('------------------')
print(todos.split(',')), print('쉼표를 기준으로 분류함 ')
print(todos.split()), print('공백을 기준으로 분류함')
#4-7 문자열을 자유롭게 다뤄보자
# .startswith(' ') = ' '로 시작하는가
# .endswith(' ') = ' ' 로 끝나는가
# .find(' ') = ' ' 값이 첫번째로 나오는값 찾기
# .rfind(' ') = ' ' 마지막으로 나오는값 찾기
# .count(' ') = ' ' 몇번나오는가
# .isalbum() = ' ' 알파뱃과 숫자로만 이루어져있는가
poem = "All that doth flow we cannot liquid nameor else would fire " \
"and water be the same; But that is liquid Which is moist and " \
"wetFire that property can never get.Then 'tis not cold tha doth " \
"the fire put outBut 'tis the wet that makes it die, no doubt.'"
print('-----------4-7------------')
#처음 13자 출력
print(poem[:13])
# 이시는 몇글자로 되있는가?
print(len(poem))
#이시는 All 로 시작하는가 ?
print(poem.startswith('All'))
#이시는 That's all,folks! 로 끝나는가?
print(poem.endswith("That's all,folks!"))
#이시에서 첫번째로 the 가 나오는 오프셋은?
print(poem.find("the"))
#이시에서 마지막으로 the 가 나오는 오프셋은?
print(poem.rfind("the"))
#세글자 the 가 몇번 나오는가 ?
print(poem.count("the"))
# 이시는 글자와 숫자로만 이루어져 있는가?
print(poem.isalnum())
#4-8
# .strip('.') 마침표 제거 앞과 끝에서부터 찾는다 바로 못찾으면 실행안됨
# .capitalize() 앞에있는거 대문자 바꾸기
# .title() 모든단어 첫글자 대문자로바꾸기
# .upper() 전체 대문자
# .lower() 전체 소문자
# .replace('a','b') 기존에 있던 a를 b로 바꿈
setup = 'a duck goes into a bar...'
print('------4-8------')
#','들 제거 해보기
print(setup.strip('.'))
print(setup.strip('into')) #사이에 낀 into 를 찾을수없는걸로봐서 바로앞과 뒤의 위치만 성립되는듯
# 첫단어를 대문자로 만들기
print(setup.capitalize())
# 모든단어의 첫글자를 다 대문자로
print(setup.title())
# 글자를 모두 대문자로
print(setup.upper())
# 글자를 모두 소문자로
print(setup.lower())
# duck 를 marmoset 으로 바꾸기
print(setup.replace('duck','marmoset'))
"""5 파이썬에서는 두가지 다른 시퀀스(순서되로 저장되는) 구조가 있다. 튜플과 리스트이다. 파이썬은 왜 이두가지를 #리스트
모두 포함하고 있을까?
튜플은 불변한다. 즉 튜플에 항목을 할당하고 이를 바꿀수 없다. - 활용 :절대바뀌면안되는값을 만들때
리스트는 항목을 할당하고 자유롭게 수정하거나 삭제할 수 있다.
우선 리스트부터
"""
#5리스트 : 항목할당후 자유롭게 수정, 리스트는 데이터를 순차적으로 파악하는데 유용하다. 특히 데이터의 순서가 바뀔수 있다는
#점에서 유용하다. 리스트를 묶을땐 [ ] <<사용한다
empty_list = []
weekdays = ['MON','TUE','WED','THU','FRI']
big_birds = ['emu','ostrich','cassowary']
first_name = ['graham','john','terry','Michael']
another_empty_list = list()
print('----------5------------')
print(empty_list)
print(weekdays)
print(big_birds)
print(first_name)
print(another_empty_list)
# 5-1 데이터 타입>> 리스트
# 5-2 튜플 >> 리스트
# 5-3 split()으로나뉜 문자열 >> 리스트
print('---------5-1------')
print(list('cat'))
print('---------5-2------')
a_tuple = ('ready','fire','aim')
print(list(a_tuple))
print('---------5-3------')
birth_day = '9/1/1993'
print(birth_day.split('/')) #split() 자체가 리스트를 만들어주는거같음.
# 5-4 리스트는 오프셋 으로 하나의 특정값을 추출할수 있다 .
# A=[오프셋넘버]
print('---------5-4------')
marxes = ['groucho','Chico', 'Harpo'] # 인덱스 1,2,3 이아니라 0, 1, 2 이런식
print(marxes[2])
print(marxes[-1])
print(marxes[1])
'''5-5
리스트는 다음과 같이 리스트 뿐만아니라 다른타입의 요소도 포함할 수 있다. (리스트 안에 리스트 중복가능)
그리고 인덱스 추출시 name_of_list[index_1][index_2] 이런식 '''
print('---------5-5------')
small_birds = ['hummingbird','finch']
extinct_birds = ['dodo','passenger pigeon','Norwegian Blue']
carol_birds = [3,'French hens','2','turtledoves']
all_birds = [small_birds, extinct_birds, 'rmacaw', carol_birds]
print(all_birds)
print(all_birds[1][0]) #dodo >> 두인덱스 사용해서 추출하기 .
""" 5-6
오프셋으로 항목을 얻어서 바꿀수있다.
슬라이스로 항목 추출하기
append()함수를 사용하여 리스트의 끝에 항목추가하기 """
print('---------5-6------')
marxes = ['Groucho','Chico','Harpo']
marxes[2] = 'Wanda' #marxes 리스트의 2번째 인덱스자리에오는 항목을 'wanda'로 바꿈
print(marxes)
print(marxes[0:2]) # 슬라이스로 이범위내에있는 항목만 추출하기
marxes.append('Zeppo') #가장자리에 'Zeppo' 추가하기
print(marxes)
""" 5-7
Extend() 를 사용하여 다른리스트를 병합해보자
+= 로도 병합할수있다.
append() 를 사용하면 항목을 병합하지않고 리스트 전체가 추가된다.
**extend() 와 append() 가 각각 어떤결과를 추출하는지 확인하자**
insert() 로 항목추가하기 """
print('---------5-7------')
marxes = ['Groucho','Chico','Harpo','Zeppo']
others = ['Gummo','Karl']
marxes.extend(others)
print(marxes)
print('--------cf------')
marxes = ['Groucho','Chico','Harpo','Zeppo']
others = ['Gummo','Karl']
marxes += others
print(marxes)
print('------cf_1-----')
marxes = ['Groucho','Chico','Harpo','Zeppo']
others = ['Gummo','Karl']
marxes.append(others)
print(marxes)
print('------cf_2-----')
marxes = ['Groucho','Chico','Harpo','Zeppo']
marxes.insert(3,'Gummo') # insert(위치,추가할항목)
print(marxes)
print('------cf_3-----')
# cf_1 에서 리스트 안에있는 리스트에 항목추가하기
marxes =['Groucho', 'Chico', 'Harpo', 'Zeppo', ['Gummo', 'Karl']]
marxes[4].insert(1,'개새')
print(marxes)
""" 5-8
오프셋으로 항목삭제하기 del name_of_list[index]
삭제할 항목의 인덱스를 모를때 remove(" ")
항목을 가져오는 동시에 그항목을 삭제하는 pop() """
print('---------5-8------')
marxes = ['Groucho', 'Chico', 'Harpo', 'Zeppo', 'Gummo', 'Karl']
del marxes[-1]
print(marxes)
print('------cf-1-------')
marxes = ['Groucho', 'Chico', 'Harpo', 'Zeppo', 'Gummo', 'Karl']
marxes.remove("Karl")
print(marxes)
print('-----cf-2----------')
marxes = ['Groucho', 'Chico', 'Harpo', 'Zeppo', 'Gummo', 'Karl']
print(marxes.pop()) #항목을 가져오는 동시에 삭제함 , pop() 괄호안에 아무것도 없을땐 마지막항목을 가져오고 사라짐
print(marxes)
""" 5-9
항목값의 리스트 오프셋을 알고싶다면 index()를 사용하면된다.
리스트에 어떤값의 존재를 확인할려면 in 을 사용하면된다
리스트에서 값이 적어도 하나이면 존재하면 in 은 true 를 반환한다. 즉(같은값이 2개 이상있어도 Ture 를 반환함
리스트에서 항목수를 알고 싶다면 len() 함수를쓰자 """
print('---------5-9------')
marxes = ['Groucho', 'Chico', 'Harpo', 'Zeppo','Zeppo']
print(marxes.index('Chico'),':'," 인덱스넘버") #인덱스 확인
print('Groucho' in marxes) # 존재여부확인
print('BoB' in marxes)
print('Zeppo' in marxes) # 동일한 항목이 2개이상이여도 True 를 반환함
print(len(marxes))
""" 5-10
sort 와 sorted 의 차이
기본적으로 두기능은 리스트 자체를 내부적인 순서(숫자: 오름차순 , 문자 : 알파벳순 )에의해 정렬해준다
sort : 기존의 리스트 값 즉 순서가 바뀜 [[ name_of_list.sort() ]]
sorted 기존에 있던 순서는 안바뀜 즉 정렬된 복사본을 반환함 [[ list_copied = sorted(name_of_list
"""
print('---------5-10------')
A = ['b','c','a']
A.sort()
print(A)
print('------cf_1----')
A = ['b','c','a']
B = sorted(A)
print(B)
print(A)
""" 5-11 b = a 의 관계에대해
완전히 새로운 리스트로 복사하고싶으면 name_of_list.copy() , new_list = list(name_of_list)
"""
print('----------5-11------')
a = [1,2,3]
b = a # a 를 비에 넣는다는 뜻임, 데이터를 넣었을때 같은 메모리상에 저장됨
print(b)
a[0] = 'surprise'
print(a) # 그결과로 a 의 값도 변경됨
print(b)
"""5-12
copy() 함수
list() 변환 함수
슬라이드 [:]
"""
print('--------5-12-----')
a = [1,2,3]
b = a.copy() #name_of_list.copy()
c = list(a) # list(name_of_list)
d = a[:]
a[0] = 'integer lists are boring'
print(a) # b,c,d 값은 디폴트를 유지한다.
print(b)
print(c)
print(d)
""" 6_1 , 6_2, 6_3 6_4 튜플
튜플!! 임의적인 항목의 시퀀스, 리스트와 다르게 튜플은 불변한다. 즉 튜플을 정의한후에는
추가, 삭제, 수정을 할수없다는 것을 의미한다.그러므로 튜플은 상수의 리스트라고 할수있다.
-튜플생성
-튜플언패킹
-값교환 튜플
-다른객체를 튜플로 만들기
"""
print('------6-1-----')
empty_tuple = ()
print(empty_tuple)
print('------cf_1----')
one_marx = 'Groucho'
one_marx_tuple = 'Groucho', # 하나 이상의 요소가 있는 튜플을 만들기 위해서는 각 요소 뒤에 콤마(,)를 붙인다.
print(one_marx) # 한개만 있을때는 뒤에 , 가있다는게 튜플
print(one_marx_tuple)
print('------cf_2-----')
marx_tuple = 'Groucho','Chico','Harpo' #두개 이상의 요소가 있을경우, 마지막 요소에는 콤마를 붙이지 않는다.
print(marx_tuple)
marx_tuple = ('Groucho','Chico','Harpo') #파이썬은 튜플을 출력할때 괄호를 포함한다.
print(marx_tuple)
print('-------6_2----')
marx_tuple = ('Groucho','Chico','Harpo')
a,b,c = marx_tuple # 튜플은 한 번에 여러 변수를 할당할 수 있다. -튜플 언패킹-
print(a)
print(b)
print(c)
print('------6_3-----')
password = '1234567810'
icecream = 'tuttifrutti'
password,icecream = icecream,password # 한문장에서 값을 교환하기위해 임시변수를 사용하지 않고 튜플을 사용할수있다.
print(password)
print(icecream)
A = [1] #리스트도되네 ?
B = [2]
A,B=B,A
print(A , '리스트 값변경')
print(B , "리스트 값변경")
print('------6_4-----')
marx_list = ['Groucho','Chico','Harpo'] #list>>tuple tulpe() 은 다른객체를 튜플로 만들어준다.
tuple(marx_list)
""" 튜플과 리스트
리스트를 대신해서 튜플을 사용할 수가 있다. 하지만 튜플은 리스트의 append(),insert()등과 같은 함수가없고
함수의 수가 매우 적다. 튜플을 생성한 후에는 수정할 수가 없기 때문이다 그러면 리스트를 사용하면 되지,
왜튜플을 사용할까?
- 튜플은 더 적은 공간을 사용한다
- 실수로 튜플 항목이 손상될 염려가 없다.
- 튜플은 딕셔너리 키로 사용할수있다.
- Named tuple 은 객체의 단순한 대안이 될 수 있다.
- 함수의 인자들은 튜플로 전달된다 .**** #근데 일반적으로 리스트와 딕셔너리를 많이씀
"""
"""딕셔너리
딕셔너리는 리스트와 비슷하다. 다른 점은 항목의 순서를 따지지 않으며 (there is no index) 0또는 1과같은 오프셋으로 항목
을선택할 수 없다. 대신 값에 상응하는 고유한 키(보통은 문자열)를 저장한다. 이키는 대부분 문자열이지만,
불변하는 파이썬의 어떤 타입이 될수있다. 딕셔너리는 변경 가능하므로 키-값 요소를 추가, 삭제, 수정 할수있다.
다른 언어에서는 딕셔너리를 연관 배열 해시 해시맵 이라고 부른다
"""
""" 7-1 7-2
딕셔너리 생성하기
딕셔너리 변환하기
"""
print('---------7-1------')
empty_dict = {}
print(empty_dict)
bierce = {
"day" : "A period of twenty-four hours, mostly misspent",
"positive" : "Mistaken at the top of one's voice",
"misfortune" : "The kind of fortune that never misses",
}
print(bierce)
print('-------7-2------')
# dict() < 사용 = 딕셔너리로 변환
lol = [['a','b'],['c','d'],['e','f']] #리스트로된 리스트
lot = [('a','b'),('c','d'),('e','f')] #튜플로된 리스트
tol = (['a','b'],['c','d'],['e','f']) #리스트로된 튜플
los = ['ab','cd','ef'] #문자열로된 리스트
tos = ('ab','cd','ef') #문자열로된 튜플
print(lol)
print(lot)
print(tol)
print(los)
print(tos)
print(dict(lol))
print(dict(lot))
print(dict(tol))
print(dict(los))
print(dict(tos))
"""7-3 7-4 딕셔너리
딕셔너리에 항목추가하기
딕셔너리에 항목을 추가하는 것은 간단하다. 키에 의해 참조되는 항목에 값을 할당하면 된다.
키가 이미 있는 경우는 그값은 새값으로 대체된다.
-항목추가
-update() 함수와 비교해보기
"""
print('----------7-3-------')
pythons = {
'Chapman' : 'Graham',
'Cleese' : 'John',
'Idle' : 'Eric',
'Jones' : 'Terry',
'Palin' : 'Michael',
}
print(pythons)
print('------------------------------------------------------------------------------------------------------------')
pythons["Gilliam"] = "Gerry" #값변경 name_of_dict['key'] = 'value'
print(pythons)
print('------------------------------------------------------------------------------------------------------------')
pythons["Gilliam"] = "Terry" #딕셔너리의 키들은 반드시 유일해야함, 만약 같은 키를 두번 이상 사용하면 마지막 값이 덮어씀
print(pythons)
print('---------7-4---------')
pythons = {
'Chapman' : 'Graham',
'Cleese' : 'John',
'Giliam' : 'Terry',
'Idle' : 'Eric',
'Jones' : 'Terry',
'Palin' : 'Michael',
}
print(pythons)
print('-----------------------')
others= {'Marx':'Groucho','Howard':'Moe'}
pythons.update(others) #name_of_dict.update(new_dict)
print(pythons) #다른 딕셔너리를 결합할때 사용함
""" 7-5
1.키와 del로 항목삭제하기. del name_of_dict['key']
2.모든 항목삭제하기. name_of_dict()
3.in 으로 키멤버십 테스트하기 'key' in name_of_dict
4.에러 방지를 위해 in 을 사용하자
(A['key'] 도키를찾을수있지만 만약 키가없으면 traceback 오류가남
그래서 왼만하면 in을 써서 에러를 방지하자.
5. get() 함수 사용하는방법 키가 존재하지 않을때 옵션 값을 지정해서 이를 출력한다.
"""
print('-----------7-5-1---------')
del pythons['Marx']
print(pythons) #'Marx'와 그의 value 값 삭제하기
del pythons['Howard']
print(pythons)
print('---------7-5-2-----------')
pythons.clear() # 모든항목삭제 A.clear()
print(pythons)
print('--------7-5-3------------')
pythons = {'A':'a','B':'b','C':'c'}
print('A' in pythons) #'key' in A
print('B' in pythons) #결과는 True
print('c' in pythons) # False
print('-----------7-5-4----------')
print(pythons['A'])
#print(pythons['Z']) << 딕셔너리에 키가 존재하지 않으면 에러가남 !!!
#이러한 애러를 방지하기위해 in 으로 키에 대한 멤버십 테스트를 하는방법이다.
print('---------7-5-5-------------')
print(pythons.get('B'))
print(pythons.get('Z','Not a python')) # 만약 키가 딕셔너리에 없을떄 오류가안나고 옵션값을 호출함
print(pythons.get('Z')) # 옵션 값을 지정하지않으면 None을 얻음
""" 7-6
1. 모든 키 가져오기 name_of_dict.key()
2. 모든 값 가져오기 name_of_dict.value()
3. 모든 쌍의 키와값 가져오기 name_of_dict.items()
"""
signals = {'green':'go','yellow':'go faster','red':'stop'}
print('-------7-6-1--------')
print(signals.keys())
print(list(signals.keys())) #딕셔너리를 리스트로 변환하기위해
print('-------7-6-2--------')
print(list(signals.values()))
print('-------7-6-3--------')
print(list(signals.items()))
"""셋 셋은 값을 버리고 키만 남은 딕셔너리라고 생각하면된다. 딕셔너리와 마찬가지로 각 키는 유일해야한다. 어떤 것이 셋
존재하는지 여부만 판단하기 위해서는 셋을사용한다 그리고 여기에 어떤 정보를 첨부해서 그 결과를 얻고 싶으면 딕셔너리를
사용한다.
"""
""" 8-1
셋을 생성할떄는 set()함수 혹은 {}안에 콥마로 구분된 하나 이상의 값을 넣으면된다.
"""
print('--------8-1-----------')
empty_set = set()
print(empty_set)
print('---------')
even_number = {0,2,4,6,8} # 딕셔너리 키와 마찬가지로 셋은 순서가 없다.
print(even_number)
print('---------')
print(set('letter')) #중복된 값을 버린 셋을 생성한다 [[ set() ]]
""" 8-2
1. 리스트 >> 셋으로
2. 튜플을 셋으로
3. 딕셔너리 >> 셋으로 (딕셔너리에 set()을 사용하면 키만 사용한다)
set( ('string') )
( (list) )
( (tuple) )
({'a:b','c:d'})
"""
print('-------8-2-1--------')
print(set(['Dasher','Dancer','Prancer','Mason-Dixon']))
print('-------8-2-2--------')
print(set(("Ummagumma","Echoes","Atom Heart Mother")))
print('-------8-2-3--------')
print(set({"apple":"red","orange":"orange","cherry":"red"})) # 키값만 호출
""" 8-3 셋연산
셋연산
1. & 연산자와 intersection() 함수 &:교집합 a&b = a.intersection(b)
2. | 연산자와 union() 함수 |:합집합 a|b = a.union(b)
3. - 연산자와 difference() 함수 - : 차집합 a-b = a.different(b)
4. ^ 연산자와 symmetric_difference() 함수 ^ : 대칭차집합(교집합을제외한나머지) a^b = a.symmetric_difference(b)
5.<= 연산자와 issubset()함수 <= : a셋이 b셋의 부분집합 a<=b = a.issubset(b)
"""
print('--------8-3-1------------')
a = {1,2}
b = {2,3}
print(a&b)
print(a.intersection(b))
print('--------8-3-2-------------')
print(a|b)
print(a.union(b))
print('--------8-3-3-------------')
print(a-b)
print(a.difference(b))
print('------8-3-4--------------')
print(a^b)
print(a.symmetric_difference(b))
print('-------8-3-5-------------')
print(a <= b)
print(a.issubset(b))
A ={2,3}
B ={2,3,4,5}
print('---8-3-5(cf)')
print(A <= b)
print(A.issubset(B)) | [
"jkoon2013@gmail.com"
] | jkoon2013@gmail.com |
4e6028989cc9120f76b6ac9bca31ed716230e93f | 001b95da5e994198a53c21f39f0e5b2c88fcd885 | /apiloaderv2.py | 7ad66da9d150f4797a02b8be4897272e256e9e83 | [
"Apache-2.0"
] | permissive | vunetsys/conf-analysis | 43ed78a4e5859ece4b94067156825c0478a6833e | 20945710646ac346caff5d2d7b45a44402721426 | refs/heads/master | 2022-12-05T15:11:04.506836 | 2020-08-28T11:06:48 | 2020-08-28T11:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | import csv
import json
import requests
from papertracker.models import ConfPaper, ConfAuthor, Conference
csvfile = open('C:/Users/Mattia/Pictures/csrankings.csv', encoding='utf-8')
a = requests.get('https://dblp.org/search/publ/api/?q=conf/fast/2011$&format=json&h=1000')
c = a.json()
conf = Conference.objects.get(id=715)
spamreader = csv.reader(csvfile)
inst = 'None'
for items in c['result']['hits']['hit']:
if 'venue' in items['info']:
if items['info']['venue'] == 'FAST':
#if 'FSE' in items['info']['venue']:
cc = ConfPaper.objects.create(conf=conf, title=items['info']['title'])
cc.save()
for it in items['info']:
if 'authors' in it:
for i in items['info']['authors']['author']:
check = False
if isinstance(i, dict):
for row in spamreader:
if row[0] == i['text']:
check = True
inst = row[1]
csvfile.seek(0)
if check == True:
au = ConfAuthor.objects.create(paper=cc, name=i['text'], institution=inst)
au.save()
else:
au = ConfAuthor.objects.create(paper=cc, name=i['text'], institution='None')
au.save()
inst = 'None'
elif isinstance(i, str):
if i == 'text':
for row in spamreader:
if row[0] == items['info']['authors']['author']['text']:
check = True
inst = row[1]
csvfile.seek(0)
if check == True:
au = ConfAuthor.objects.create(paper=cc, name=items['info']['authors']['author']['text'], institution=inst)
au.save()
else:
au = ConfAuthor.objects.create(paper=cc, name=items['info']['authors']['author']['text'], institution='None')
au.save()
inst = 'None'
| [
"mtt.manzaroli@gmail.com"
] | mtt.manzaroli@gmail.com |
8ab113cf60a3a4a75b3d8b50adeeef8e0c253799 | 22b78677bfe20f4c548a8c6cadfaeebcc635a22e | /venv/bin/pip2 | e606e424a07a9bdbdd662dc790e5b6d64708c181 | [] | no_license | mr-kaveh/flasksocialapp | 57778db7bab285d514502d4dd0ef43245a0f1d5c | d9fa096c53b3a202191d2d9e0373ff1b39663421 | refs/heads/master | 2020-04-19T02:33:34.151348 | 2019-02-01T05:12:25 | 2019-02-01T05:12:25 | 167,907,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/home/hossein/myScripts/socialApp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mr.hdavoodi@gmail.com"
] | mr.hdavoodi@gmail.com | |
344bf52717197625def8b9d5930b228ba011b004 | 27db9295cde6fe07ae4888e3a9c151864642c673 | /blobs.py | f3c880b683470662d5e89f90f7171551e532be87 | [] | no_license | dariodotti/tracking_traj_experiment_indoor_outdoor_dataset | 2ec9d558c8276b9327505c27c9ab0b04d27bb9ad | 0acd8c3f8f138844ee93c4291111dd6fa9f31666 | refs/heads/master | 2021-01-12T10:25:59.095397 | 2016-12-14T11:20:14 | 2016-12-14T11:20:14 | 76,453,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | import cv2
import pickle
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from os import listdir
from os.path import isfile, join
import img_proc as my_img_proc
import main as main_camera017
def my_function(k):
print k
temp_track = []
temp_track_append= temp_track.append
map(lambda i: temp_track_append(file_content[i]) if ids[i] == k else False,xrange(len(file_content)))
return temp_track
def read_data_tracklets(file,multiThread):
global file_content
with open(file,'r')as f:
file_content = f.read().split('\n')
global ids
ids =map(lambda line: int(line.split(' ')[0]),file_content)
keys = list(set(ids))
keys = sorted(keys,key=lambda x: x)
print len(keys)
####MULTI-THREAD VERSION######
if multiThread:
cores = 6
pool = ThreadPool(cores)
print 'n cores: '+str(cores)
tracklets = pool.map(lambda k: my_function(k) ,keys)
#close the pool and wait for the work to finish
pool.close()
pool.join()
###########################
else:
# keys= keys[:500]
tracklets = []
tracklets_append = tracklets.append
for k in keys:
print k
temp_track = []
temp_track_append= temp_track.append
map(lambda i: temp_track_append(file_content[i]) if ids[i] == k else False,xrange(len(file_content)))
tracklets_append(temp_track)
return tracklets
def get_coordinate_points(occurance):
frames =map(lambda line: str(line.split(' ')[0]),occurance)
center_xs = map(lambda line: int(float(line.split(' ')[1])),occurance)
center_ys = map(lambda line: int(float(line.split(' ')[2])),occurance)
bb_width = map(lambda line: int(float(line.split(' ')[3])),occurance)
bb_height = map(lambda line: int(float(line.split(' ')[4])),occurance)
#list_points = []
#list_points_append = list_points.append
#map(lambda c: list_points_append((xs[c],ys[c])),xrange(0,len(xs)))
#apply filter to cancel noise
#x_f,y_f =my_img_proc.median_filter(list_points)
return frames,center_xs,center_ys,bb_width,bb_height
def main():
##divide image into patches(polygons) and get the positions of each one
scene = cv2.imread('C:/Users/dario.dotti/Documents/LOST_dataset/camera017.jpg')
list_poly = my_img_proc.divide_image(scene)
mypath= 'C:/Users/dario.dotti/Documents/LOST_dataset/8_2013-12_2012_camera001/pedestrian_cars/training/blobs/'
only_files=[f for f in listdir(mypath) if isfile(join(mypath, f))]
for f in only_files:
day = f.split('_')[0]
month = f.split('_')[1]
my_file= ''.join([mypath,f])
slices=read_data_tracklets(my_file,0)
with open('C:/Users/dario.dotti/Documents/LOST_dataset/8_2013-12_2012_camera001/pedestrian_cars/training/blobs_forTraining/blobs_org_by_frames_'+day+'_'+month+'.txt', 'wb') as handle:
pickle.dump(slices,handle)
return False
with open('C:/Users/dario.dotti/Documents/LOST_dataset/22_9_2014-1_10_2013_camera017/pedestrian_cars/classification/blobs_org_by_frames_7_2.txt', 'rb') as handle:
slices = pickle.load(handle)
for n,slice in enumerate(slices):
temp_img = scene.copy()
frames,center_xs,center_ys,bb_width,bb_height = get_coordinate_points(slice)
for i in range(0,len(center_ys)):
if bb_width[i] > 15 or bb_height[i] >15:
vertex_1 = (center_xs[i]-(bb_width[i]/2)),(center_ys[i]-(bb_height[i]/2))
vertex_2 = (center_xs[i]+(bb_width[i]/2)),(center_ys[i]+(bb_height[i]/2))
cv2.rectangle(temp_img,vertex_1,vertex_2,0,1)
cv2.putText(temp_img,frames[0],(30,30),cv2.FONT_HERSHEY_SIMPLEX, 1, 0)
cv2.imshow('ciao',temp_img)
cv2.waitKey(0)
if __name__ == '__main__':
main() | [
"dario.dotti@maastrichtuniversity.nl"
] | dario.dotti@maastrichtuniversity.nl |
7cfa51f0591a736e57700f3cb0a8d61f4217297e | f3af403b0f17ba952bdca1554d5d7bcba0b95c05 | /virtual/bin/flask | 4272a265fad124463ed730b2b231cdae1d819d57 | [
"MIT"
] | permissive | Daniel-darnell/Pitchworld | 3157256470b49a24e770718a3cc2dbe713ac475a | 7b9b53a0bbf0a6c191189c5780fdcaabcf89b398 | refs/heads/master | 2023-01-06T02:52:44.270973 | 2020-11-02T09:14:31 | 2020-11-02T09:14:31 | 309,213,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/moringa/Desktop/FullStack/Projects/Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"darnelldanny1997@gmail.com"
] | darnelldanny1997@gmail.com | |
c2329e1d0a37e88a0fcbfb5d6a743b80e8753c28 | df3853b41ed05d86f5bcd992fcc265f637c67784 | /big_deal/test2/14.py | d79e788612e926b9cf62a3a53eddc0a537b10ca5 | [] | no_license | KseniaMIPT/Adamasta | 6ab0121519581dbbbf6ae788d1da85f545f718d1 | e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3 | refs/heads/master | 2021-01-10T16:48:31.141709 | 2016-11-23T21:02:25 | 2016-11-23T21:02:25 | 43,350,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | def digraph_from_input():
N = int(input())
digraph = {}
for i in range(N-1):
line = input().split()
if line[1] not in digraph:
digraph[line[1]] = {line[0]}
else:
digraph[line[1]].add(line[0])
if line[0] not in digraph:
digraph[line[0]] = set()
return digraph
digraph = digraph_from_input()
start_node = str(input())
def bfs_fire(g, start, fired=set(), tree =[]):
"""Функция выделяет остовое дерево методом обхода в ширину.
:param g: основной граф
:param start: начальная вершина
:param fired: множество уже имеющихся в графе вершин
:return tree: остовое дерево
"""
fired.add(start)
queue = [start]
while queue:
current = queue.pop(0)
for neighbour in g[current]:
if neighbour not in fired:
fired.add(neighbour)
queue.append(neighbour)
tree.append([current, neighbour])
return tree
tree = bfs_fire(digraph, start_node)
| [
"ksenia22.11@yandex.ru"
] | ksenia22.11@yandex.ru |
f7876ee7e8a2e78ce0603729c772cba69f9f259d | f61db5940e29773aba8fc342a21de00e91a5ab2e | /base/day15/note/demo2/testcases.py | d496be253d9081853b34930bf67e2d3b34b715c9 | [] | no_license | liyaozr/project | c17a9dcbcda38fe9a15ec4c41a01242a13695991 | 0b0fc10e267ceb19f6792b490fede177035459fe | refs/heads/master | 2020-11-29T18:38:03.297369 | 2020-03-10T01:11:00 | 2020-03-10T01:11:00 | 230,190,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | """
============================
Author:柠檬班-木森
Time:2020/2/7 21:29
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import unittest
from py26_15day.demo2.register import register
from py26_15day.demo2.login import login_check
from py26_15day.demo2.readexcel import ReadExcel
class RegisterTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "register")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_register(self):
# 第一步:准备用例的数据
# 预期结果:
excepted = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = register(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(excepted, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
class LoginTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "login")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_login(self):
# 第一步:准备用例的数据
# 预期结果:
expected = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = login_check(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(expected, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
| [
"lyz_fordream@163.com"
] | lyz_fordream@163.com |
16a90710f419b70d6f28a6bc8e178229f4dd5d27 | aeae1f547225452774a109f2e9a5a2c55f4d866b | /tvm_cudnn/lstm.py | 391ebf12c076b61203b4fde8f61cb7e7788ae2b7 | [] | no_license | ybai62868/MixPrecisionTensorCore | afb73883593f5c93618d1a626eebb9837e630e2d | 9466d378186adb21156b7e50636f74e5144539e4 | refs/heads/main | 2023-08-14T11:12:11.634726 | 2021-10-18T15:02:27 | 2021-10-18T15:02:27 | 401,226,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | from __future__ import print_function
import argparse
import time
import torch
from torch.autograd import Variable
torch.backends.cudnn.benchmark = True
def update_progress(progress):
print("\rProgress: [{0:50s}] {1:.1f}%".format('#' * int(progress * 50),
progress * 100), end="")
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--seconds', type=int, default=15)
parser.add_argument('--dry_runs', type=int, default=50)
parser.add_argument('--runs', type=int, default=50)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--hidden_size', default=640, type=int)
parser.add_argument('--half', action='store_true', dest='half')
args = parser.parse_args()
hidden_size = args.hidden_size
input = Variable(torch.randn(750, args.batch_size,
hidden_size).cuda()) # seq_length based on max deepspeech length 15 seconds
model = torch.nn.LSTM(hidden_size, hidden_size, num_layers=args.num_layers).cuda()
if args.half:
input = input.half()
model = model.half()
model.eval()
def run_benchmark():
for n in range(args.dry_runs):
output, (hx, cx) = model(input)
# grad = output.data.clone().normal_()
# output.backward(grad)
update_progress(n / (float(args.dry_runs) - 1))
print('\nDry runs finished, running benchmark')
avg_fwd_time = 0
torch.cuda.synchronize()
for n in range(args.runs):
torch.cuda.synchronize()
start = time.time()
output, (hx, cx) = model(input)
torch.cuda.synchronize()
end = time.time()
fwd_time = end - start
avg_fwd_time += fwd_time
return avg_fwd_time * 1000 / float(args.runs)
if args.half:
print("Running half precision benchmark")
else:
print("Running standard benchmark")
avg_fwd_time = run_benchmark()
print('\n')
print("Avg Forward time: %.2fms " % avg_fwd_time) | [
"ybai62868@gmail.com"
] | ybai62868@gmail.com |
75b886785f83e8dc3312498f8d4259af161c02b6 | 337976db44254cb997c721139298328416af4086 | /study/part1/dump_db_classes.py | f53647f2e7bbacd3e73ac19d7d18643ad1fcd836 | [] | no_license | vzhukov85/python-study | 3f5d00aa2f84a9b01432d0c0fb378a4b79f46442 | 28b84be1ce50247b8f0b89a8a4b285029c924cde | refs/heads/master | 2020-09-25T23:12:00.044547 | 2020-01-15T06:46:13 | 2020-01-15T06:46:13 | 226,102,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import shelve
db = shelve.open('class-shelve')
for key in db:
print(key, '=>\n', db[key].name, db[key].pay)
bob = db['bob']
print(bob.lastName())
print(db['tom'].lastName())
| [
"erzhukov@i-teco.ru"
] | erzhukov@i-teco.ru |
5fe73d8da91faf5bfb74762110c33cfcca7d1602 | c9da70f24fc278adf5585c9d8e67163e428d1ccc | /payslip/ex_1/cli.py | 12fc9129d2277fbbd2ddede785ca689b55d47f62 | [] | no_license | irinaBaB/payslip_task | 71f736daf8bae392aa275cf04e25788fee055f96 | 2ffb910d50cf1292bfbc49dad9877e9b39185643 | refs/heads/main | 2023-01-13T03:36:44.470235 | 2020-11-18T04:18:01 | 2020-11-18T04:18:01 | 301,219,808 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from pay_period import pay_period
from tax_calculation import *
from user_details import user_details
print("Welcome to payslip generator!")
user_name, surname = user_details()
gross_inc, annual_sal = gross_income()
my_super = super_k(gross_inc)
inc_tax = income_tax(annual_sal)
net_inc = net_income(gross_inc, inc_tax)
start_date = pay_period()
print('\n')
print(f'PAY PERIOD:\t 1st of {start_date} - 30st of {start_date}')
print("Your payslip has been generated")
print(f'Name: {user_name} {surname}')
print(f"Gross Income:\t{gross_inc}")
print(f"Income Tax: \t{inc_tax}")
print(f"Net Income: \t{net_inc}")
print(f"Super:\t\t{my_super}")
print('\n')
print('\n')
print("Thank you for using MYOB")
print('\n')
| [
"irina.babicheva@myob.com"
] | irina.babicheva@myob.com |
6343e86e13ef3b29cc0d65db953cb6ba85f7283a | d8f9b8131cfac411bf551a20e9a5b863160ffb79 | /PreProcessing.py | 76985ef3acbd2cd67be4f32d2d4c04f3a3344065 | [] | no_license | artificial-inteligence/AnacondaTest | dbc59b923a7de843ae3adb81b354c73da5a12e4c | eebde6a95d6f6f65593c5a1e4e50f9296a917dbc | refs/heads/master | 2020-04-21T07:41:30.847875 | 2019-02-11T16:52:32 | 2019-02-11T16:52:32 | 169,398,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # openCV
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
class PreProcessor:
def __init__(self):
pass
def applybilateralfilter(self, img):
# apply bilateral filter on image
blur = cv2.bilateralFilter(img, 9, 75, 75)
return blur
def applygreyscale(self, img):
greyscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return greyscale
#
# # display image
# plt.subplot(121), plt.imshow(img), plt.title('Original')
# plt.xticks([]), plt.yticks([])
# plt.subplot(122), plt.imshow(blur), plt.title('Blurred')
# plt.xticks([]), plt.yticks([])
# plt.show()
| [
"stbbap@gmail.com"
] | stbbap@gmail.com |
abf58fb31e51c78bb90abe08fcf94e44fc5f36c0 | 1985d1a7462d537e1f43055e3c75d91145407ff9 | /Next_Permutation.py | fcc699c978f678ede7468f2b601e8c68627e87c9 | [] | no_license | yeonnseok/algorithm_practice | d95425e59b7b579a70dbbd932e4fb691c57f4534 | c1468f23b2c077ecadac1fa843180674b6ea3295 | refs/heads/master | 2020-04-28T08:51:32.728010 | 2019-04-05T03:20:44 | 2019-04-05T03:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | def swap(list, a, b):
temp = list[b]
list[b] = list[a]
list[a] = temp
def next_permutation(c_list, n):
i = n - 1
while c_list[i - 1] >= c_list[i]:
i -= 1
if i <= 0: return False
j = n - 1
while c_list[j] <= c_list[i - 1]:
j -= 1
swap(c_list, j, i - 1)
j = n - 1
while i < j:
swap(c_list, j, i)
i += 1
j -= 1
return c_list
c_list = [7, 2, 3, 6, 5, 4, 1]
n = len(c_list)
print(next_permutation(c_list, n))
| [
"smr603@snu.ac.kr"
] | smr603@snu.ac.kr |
196c4f27e79af13c60f2efd7ff86c0e6b8733c45 | f31391ec70caf12b5c04634c6375f768b7ddc854 | /Full_Project/PyMongo/Main.py | 808e97469b36fdbbec9610da01b09f07a5f9b9e7 | [] | no_license | poklj/Python | 8daebeff851a494b35c3ef0561bd7dfb5ac4ea94 | acbf3b8705220fb7c0afe8ccb40381f9e337838d | refs/heads/master | 2021-08-23T10:53:44.729608 | 2017-12-04T15:38:07 | 2017-12-04T15:38:07 | 112,198,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from PyMongo import *
def Main():
pass
if __name__ == "__main__":
Main() | [
"compgeek223@gmail.com"
] | compgeek223@gmail.com |
9df82940e19f038b7e3d640228efb7fbca2b4f1d | e16d84730ddddd964e13b1aed9cea1df5875cd5b | /flaskProject/venv/Lib/site-packages/elementpath/xpath30/xpath30_parser.py | d29f151ac7bbc3b34148e130ce187dc1d31c20db | [] | no_license | MarkBenjamins/NHL-Stenden-RestAPI | 0b98c539bb578cb832525275f36d03889b567a02 | 2756a2f5654cbbeab4e39c761e64329f8dd78014 | refs/heads/main | 2023-03-28T04:54:08.922928 | 2021-04-02T09:58:59 | 2021-04-02T09:58:59 | 339,778,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,479 | py | #
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
XPath 3.0 implementation
Refs:
- https://www.w3.org/TR/2014/REC-xpath-30-20140408/
- https://www.w3.org/TR/xpath-functions-30/
"""
import os
import re
import codecs
import math
import xml.etree.ElementTree as ElementTree
from copy import copy
from urllib.parse import urlsplit
from ..helpers import XML_NEWLINES_PATTERN, is_xml_codepoint
from ..namespaces import XPATH_FUNCTIONS_NAMESPACE, XPATH_MATH_FUNCTIONS_NAMESPACE, \
XSLT_XQUERY_SERIALIZATION_NAMESPACE
from ..xpath_nodes import etree_iterpath, is_xpath_node, is_element_node, \
is_document_node, is_etree_element, is_schema_node, TypedElement, \
TextNode, AttributeNode, TypedAttribute, NamespaceNode, XPathNode
from ..xpath_token import ValueToken, XPathFunction
from ..xpath_context import XPathSchemaContext
from ..xpath2 import XPath2Parser
from ..datatypes import NumericProxy, QName, Date10, DateTime10, Time
from ..regex import translate_pattern, RegexError
# XSLT and XQuery Serialization parameters
SERIALIZATION_PARAMS = '{%s}serialization-parameters' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_OMIT_XML_DECLARATION = '{%s}omit-xml-declaration' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_USE_CHARACTER_MAPS = '{%s}use-character-maps' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_CHARACTER_MAP = '{%s}character-map' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_METHOD = '{%s}method' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_INDENT = '{%s}indent' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_VERSION = '{%s}version' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_CDATA = '{%s}cdata-section-elements' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_NO_INDENT = '{%s}suppress-indentation' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_STANDALONE = '{%s}standalone' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
SER_PARAM_ITEM_SEPARATOR = '{%s}item-separator' % XSLT_XQUERY_SERIALIZATION_NAMESPACE
class XPath30Parser(XPath2Parser):
"""
XPath 3.0 expression parser class. Accepts all XPath 2.0 options as keyword
arguments, but the *strict* option is ignored because XPath 3.0+ has braced
URI literals and the expanded name syntax is not compatible.
:param args: the same positional arguments of class :class:`XPath2Parser`.
:param decimal_formats: a mapping with statically known decimal formats.
:param kwargs: the same keyword arguments of class :class:`XPath2Parser`.
"""
version = '3.0'
SYMBOLS = XPath2Parser.SYMBOLS | {
'Q{', # see BracedURILiteral rule
'||', # concat operator
'!', # Simple map operator
# Math functions (trigonometric and exponential)
'pi', 'exp', 'exp10', 'log', 'log10', 'pow', 'sqrt',
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
# Formatting functions
'format-integer', 'format-number', 'format-dateTime',
'format-date', 'format-time',
# String functions that use regular expressions
'analyze-string',
# Functions and operators on nodes
'path', 'has-children', 'innermost', 'outermost',
# Functions and operators on sequences
'head', 'tail', 'generate-id', 'uri-collection',
'unparsed-text', 'unparsed-text-lines', 'unparsed-text-available',
'environment-variable', 'available-environment-variables',
# Parsing and serializing
'parse-xml', 'parse-xml-fragment', 'serialize',
# Higher-order functions
'function-lookup', 'function-name', 'function-arity', '#', '?',
'for-each', 'filter', 'fold-left', 'fold-right', 'for-each-pair',
# Expressions and node type functions
'function', 'let', ':=', # 'namespace-node', 'switch',
}
DEFAULT_NAMESPACES = {
'math': XPATH_MATH_FUNCTIONS_NAMESPACE, **XPath2Parser.DEFAULT_NAMESPACES
}
function_signatures = XPath2Parser.function_signatures.copy()
def __init__(self, *args, decimal_formats=None, **kwargs):
kwargs.pop('strict', None)
super(XPath30Parser, self).__init__(*args, **kwargs)
self.decimal_formats = decimal_formats if decimal_formats is not None else {}
##
# XPath 3.0 definitions
register = XPath30Parser.register
literal = XPath30Parser.literal
infix = XPath30Parser.infix
method = XPath30Parser.method
function = XPath30Parser.function
register(':=')
XPath30Parser.unregister('?')
register('?', bases=(ValueToken,))
@method('?')
def nud(self):
return self
###
# Braced/expanded QName(s)
XPath30Parser.duplicate('{', 'Q{')
XPath30Parser.unregister('{')
XPath30Parser.unregister('}')
register('{')
register('}', bp=100)
XPath30Parser.unregister('(')
@method(register('(', lbp=80, rpb=80, label='expression'))
def nud(self):
if self.parser.next_token.symbol != ')':
self[:] = self.parser.expression(),
self.parser.advance(')')
return self
@method('(')
def led(self, left):
if left.symbol == '(name)' or left.symbol == ':' and left[1].symbol == '(name)':
raise self.error('XPST0017', 'unknown function {!r}'.format(left.value))
if self.parser.next_token.symbol != ')':
self[:] = left, self.parser.expression()
else:
self[:] = left,
self.parser.advance(')')
return self
@method('(')
def evaluate(self, context=None):
if len(self) < 2:
return self[0].evaluate(context) if self else []
result = self[0].evaluate(context)
if isinstance(result, list) and len(result) == 1:
result = result[0]
if not isinstance(result, XPathFunction):
raise self.error('XPST0017', 'an XPath function expected, not {!r}'.format(type(result)))
return result(context, self[1])
@method('(')
def select(self, context=None):
if len(self) < 2:
yield from self[0].select(context) if self else iter(())
else:
value = self[0].evaluate(context)
if not isinstance(value, XPathFunction):
raise self.error('XPST0017', 'an XPath function expected, not {!r}'.format(type(value)))
result = value(context, self[1])
if isinstance(result, list):
yield from result
else:
yield result
@method(infix('||', bp=32))
def evaluate(self, context=None):
return self.string_value(self.get_argument(context)) + \
self.string_value(self.get_argument(context, index=1))
@method(infix('!', bp=72))
def select(self, context=None):
if context is None:
raise self.missing_context()
for context.item in context.inner_focus_select(self[0]):
for result in self[1].select(copy(context)):
if not isinstance(result, (tuple, XPathNode)) and not hasattr(result, 'tag'):
yield result
elif isinstance(result, TypedElement):
yield result
elif isinstance(result, TypedAttribute):
yield result
else:
yield result
if isinstance(context, XPathSchemaContext):
self[1].add_xsd_type(result)
###
# 'let' expressions
@method(register('let', lbp=20, rbp=20, label='let expression'))
def nud(self):
del self[:]
if self.parser.next_token.symbol != '$':
token = self.parser.symbol_table['(name)'](self.parser, self.symbol)
return token.nud()
while True:
self.parser.next_token.expected('$')
variable = self.parser.expression(5)
self.append(variable)
self.parser.advance(':=')
expr = self.parser.expression(5)
self.append(expr)
if self.parser.next_token.symbol != ',':
break
self.parser.advance()
self.parser.advance('return')
self.append(self.parser.expression(5))
return self
@method('let')
def select(self, context=None):
if context is None:
raise self.missing_context()
context = copy(context)
varnames = [self[k][0].value for k in range(0, len(self) - 1, 2)]
values = [self[k].evaluate(copy(context)) for k in range(1, len(self) - 1, 2)]
context.variables.update(x for x in zip(varnames, values))
yield from self[-1].select(context)
###
# 'inline function' expression
@method(register('function', bp=90, label='inline function', bases=(XPathFunction,)))
def nud(self):
if self.parser.next_token.symbol != '(':
token = self.parser.symbol_table['(name)'](self.parser, self.symbol)
return token.nud()
self.parser.advance('(')
self.sequence_types = []
while self.parser.next_token.symbol != ')':
self.parser.next_token.expected('$')
param = self.parser.expression(5)
self.append(param)
if self.parser.next_token.symbol == 'as':
self.parser.advance('as')
token = self.parser.expression(5)
sequence_type = token.source
if not self.parser.is_sequence_type(sequence_type):
raise token.error('XPST0003', "a sequence type expected")
self.sequence_types.append(sequence_type)
else:
self.sequence_types.append('item()*')
self.parser.next_token.expected(')', ',')
if self.parser.next_token.symbol == ',':
self.parser.advance()
self.parser.next_token.unexpected(')')
self.parser.advance(')')
# Add function return type
if self.parser.next_token.symbol != 'as':
self.sequence_types.append('item()*')
else:
self.parser.advance('as')
if self.parser.next_token.label not in ('kind test', 'sequence type'):
self.parser.expected_name('(name)', ':')
token = self.parser.expression(rbp=90)
next_symbol = self.parser.next_token.symbol
if token.symbol != 'empty-sequence' and next_symbol in {'?', '*', '+'}:
self.parser.symbol_table[next_symbol](self.parser), # Add nullary token
self.parser.advance()
sequence_type = token.source + next_symbol
else:
sequence_type = token.source
if not self.parser.is_sequence_type(sequence_type):
raise token.error('XPST0003', "a sequence type expected")
self.sequence_types.append(sequence_type)
self.parser.advance('{')
self.expr = self.parser.expression()
self.parser.advance('}')
return self
@method('function')
def evaluate(self, context=None):
if context is None:
raise self.missing_context()
return self.expr.evaluate(context)
###
# Mathematical functions
@method(function('pi', label='math function', nargs=0, sequence_types=('xs:double',)))
def evaluate(self, context):
return math.pi
@method(function('exp', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
return math.exp(arg)
@method(function('exp10', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
return float(10 ** arg)
@method(function('log', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
return float('-inf') if not arg else float('nan') if arg <= -1 else math.log(arg)
@method(function('log10', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
return float('-inf') if not arg else float('nan') if arg <= -1 else math.log10(arg)
@method(function('pow', label='math function', nargs=2,
sequence_types=('xs:double?', 'numeric', 'xs:double?')))
def evaluate(self, context):
x = self.get_argument(context, cls=NumericProxy)
y = self.get_argument(context, index=1, required=True, cls=NumericProxy)
if x is not None:
if not x and y < 0:
return math.copysign(float('inf'), x) if (y % 2) == 1 else float('inf')
try:
return float(x ** y)
except TypeError:
return float('nan')
@method(function('sqrt', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if arg < 0:
return float('nan')
return math.sqrt(arg)
@method(function('sin', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if math.isinf(arg):
return float('nan')
return math.sin(arg)
@method(function('cos', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if math.isinf(arg):
return float('nan')
return math.cos(arg)
@method(function('tan', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if math.isinf(arg):
return float('nan')
return math.tan(arg)
@method(function('asin', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if arg < -1 or arg > 1:
return float('nan')
return math.asin(arg)
@method(function('acos', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
if arg < -1 or arg > 1:
return float('nan')
return math.acos(arg)
@method(function('atan', label='math function', nargs=1,
sequence_types=('xs:double?', 'xs:double?')))
def evaluate(self, context):
arg = self.get_argument(context, cls=NumericProxy)
if arg is not None:
return math.atan(arg)
@method(function('atan2', label='math function', nargs=2,
sequence_types=('xs:double', 'xs:double', 'xs:double')))
def evaluate(self, context):
x = self.get_argument(context, cls=NumericProxy)
y = self.get_argument(context, index=1, required=True, cls=NumericProxy)
return math.atan2(x, y)
###
# TODO: Formatting functions
@method(function('format-integer', nargs=(2, 3),
sequence_types=('xs:integer?', 'xs:string', 'xs:string?', 'xs:string')))
def evaluate(self, context):
value = self.get_argument(context, cls=NumericProxy)
picture = self.get_argument(context, index=1, required=True, cls=str)
if value is None:
return ''
if ';' not in picture:
fmt_token, fmt_modifier = picture, ''
else:
fmt_token, fmt_modifier = picture.rsplit(';', 1)
if len(fmt_token) == 1:
if value > 0:
return chr(ord(fmt_token) + value - 1)
else:
return '-{}'.format(chr(ord(fmt_token) + value))
@method(function('format-number', nargs=(2, 3),
sequence_types=('numeric?', 'xs:string', 'xs:string?', 'xs:string')))
def evaluate(self, context):
value = self.get_argument(context, cls=NumericProxy)
# picture = self.get_argument(context, index=1, required=True, cls=str)
if value is None:
return ''
@method(function('format-dateTime', nargs=(2, 5),
sequence_types=('xs:dateTime?', 'xs:string', 'xs:string?',
'xs:string?', 'xs:string?', 'xs:string?')))
def evaluate(self, context):
value = self.get_argument(context, cls=DateTime10)
# picture = self.get_argument(context, index=1, required=True, cls=str)
if value is None:
return ''
@method(function('format-date', nargs=(2, 5),
sequence_types=('xs:date?', 'xs:string', 'xs:string?',
'xs:string?', 'xs:string?', 'xs:string?')))
def evaluate(self, context):
value = self.get_argument(context, cls=Date10)
# picture = self.get_argument(context, index=1, required=True, cls=str)
if value is None:
return None
@method(function('format-time', nargs=(2, 5),
sequence_types=('xs:time?', 'xs:string', 'xs:string?',
'xs:string?', 'xs:string?', 'xs:string?')))
def evaluate(self, context):
value = self.get_argument(context, cls=Time)
# picture = self.get_argument(context, index=1, required=True, cls=str)
if value is None:
return ''
###
# String functions that use regular expressions
@method(function('analyze-string', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string', 'xs:string',
'element(fn:analyze-string-result)')))
def evaluate(self, context=None):
input_string = self.get_argument(context, default='', cls=str)
pattern = self.get_argument(context, 1, required=True, cls=str)
flags = 0
if len(self) > 2:
for c in self.get_argument(context, 2, required=True, cls=str):
if c in 'smix':
flags |= getattr(re, c.upper())
else:
raise self.error('FORX0001', "Invalid regular expression flag %r" % c)
try:
python_pattern = translate_pattern(pattern, flags, self.parser.xsd_version)
compiled_pattern = re.compile(python_pattern, flags=flags)
except (re.error, RegexError) as err:
msg = "Invalid regular expression: {}"
raise self.error('FORX0002', msg.format(str(err))) from None
except OverflowError as err:
raise self.error('FORX0002', err) from None
if compiled_pattern.match('') is not None:
raise self.error('FORX0003', "pattern matches a zero-length string")
level = 0
escaped = False
char_class = False
group_levels = [0]
for s in compiled_pattern.pattern:
if escaped:
escaped = False
elif s == '\\':
escaped = True
elif char_class:
if s == ']':
char_class = False
elif s == '[':
char_class = True
elif s == '(':
group_levels.append(level)
level += 1
elif s == ')':
level -= 1
etree = ElementTree if context is None else context.etree
lines = ['<analyze-string-result xmlns="{}">'.format(XPATH_FUNCTIONS_NAMESPACE)]
k = 0
while k < len(input_string):
match = compiled_pattern.search(input_string, k)
if match is None:
lines.append('<non-match>{}</non-match>'.format(input_string[k:]))
break
elif not match.groups():
start, stop = match.span()
if start > k:
lines.append('<non-match>{}</non-match>'.format(input_string[k:start]))
lines.append('<match>{}</match>'.format(input_string[start:stop]))
k = stop
else:
start, stop = match.span()
if start > k:
lines.append('<non-match>{}</non-match>'.format(input_string[k:start]))
k = start
match_items = []
group_tmpl = '<group nr="{}">{}'
empty_group_tmpl = '<group nr="{}"/>'
unclosed_groups = 0
for idx in range(1, compiled_pattern.groups + 1):
start, stop = match.span(idx)
if start < 0:
continue
elif start > k:
if unclosed_groups:
for _ in range(unclosed_groups):
match_items.append('</group>')
unclosed_groups = 0
match_items.append(input_string[k:start])
if start == stop:
if group_levels[idx] <= group_levels[idx - 1]:
for _ in range(unclosed_groups):
match_items.append('</group>')
unclosed_groups = 0
match_items.append(empty_group_tmpl.format(idx))
k = stop
elif idx == compiled_pattern.groups:
k = stop
match_items.append(group_tmpl.format(idx, input_string[start:k]))
match_items.append('</group>')
else:
next_start = match.span(idx + 1)[0]
if next_start < 0 or stop < next_start or stop == next_start \
and group_levels[idx + 1] <= group_levels[idx]:
k = stop
match_items.append(group_tmpl.format(idx, input_string[start:k]))
match_items.append('</group>')
else:
k = next_start
match_items.append(group_tmpl.format(idx, input_string[start:k]))
unclosed_groups += 1
for _ in range(unclosed_groups):
match_items.append('</group>')
lines.append('<match>{}</match>'.format(''.join(match_items)))
lines.append('</analyze-string-result>')
return etree.XML(''.join(lines))
###
# Functions and operators on nodes
@method(function('path', nargs=(0, 1), sequence_types=('node()?', 'xs:string?')))
def evaluate(self, context=None):
if context is None:
raise self.missing_context()
elif isinstance(context, XPathSchemaContext):
return
elif not self:
if context.item is None:
return '/'
item = context.item
else:
item = self.get_argument(context)
if item is None:
return
if is_document_node(item):
return '/'
elif isinstance(item, TypedElement):
elem = item.elem
elif is_etree_element(item):
elem = item
else:
elem = self._elem
try:
root = context.root.getroot()
except AttributeError:
root = context.root
path = 'Q{%s}root()' % XPATH_FUNCTIONS_NAMESPACE
else:
path = '/%s' % root.tag
for e, path in etree_iterpath(root, path):
if e is elem:
return path
@method(function('has-children', nargs=(0, 1), sequence_types=('node()?', 'xs:boolean')))
def evaluate(self, context=None):
if context is None:
raise self.missing_context()
elif not self:
if context.item is None:
return is_document_node(context.root)
item = context.item
if not is_xpath_node(item):
raise self.error('XPTY0004', 'context item must be a node')
else:
item = self.get_argument(context)
if item is None:
return False
elif not is_xpath_node(item):
raise self.error('XPTY0004', 'argument must be a node')
return is_document_node(item) or \
is_element_node(item) and (len(item) > 0 or item.text is not None) or \
isinstance(item, TypedElement) and (len(item.elem) > 0 or item.elem.text is not None)
@method(function('innermost', nargs=1, sequence_types=('node()*', 'node()*')))
def select(self, context=None):
if context is None:
raise self.missing_context()
context = context.copy()
nodes = [e for e in self[0].select(context)]
if any(not is_xpath_node(x) for x in nodes):
raise self.error('XPTY0004', 'argument must contain only nodes')
ancestors = {x for context.item in nodes for x in context.iter_ancestors(axis='ancestor')}
yield from context.iter_results([x for x in nodes if x not in ancestors])
@method(function('outermost', nargs=1, sequence_types=('node()*', 'node()*')))
def select(self, context=None):
if context is None:
raise self.missing_context()
context = context.copy()
nodes = {e for e in self[0].select(context)}
if any(not is_xpath_node(x) for x in nodes):
raise self.error('XPTY0004', 'argument must contain only nodes')
yield from context.iter_results([
context.item for context.item in nodes
if all(x not in nodes for x in context.iter_ancestors(axis='ancestor'))
])
##
# Functions and operators on sequences
@method(function('head', nargs=1, sequence_types=('item()*', 'item()?')))
def evaluate(self, context=None):
for item in self[0].select(context):
return item
@method(function('tail', nargs=1, sequence_types=('item()*', 'item()?')))
def select(self, context=None):
for k, item in enumerate(self[0].select(context)):
if k:
yield item
@method(function('generate-id', nargs=(0, 1), sequence_types=('node()?', 'xs:string')))
def evaluate(self, context=None):
arg = self.get_argument(context, default_to_context=True)
if arg is None:
return ''
elif not is_xpath_node(arg):
if self:
raise self.error('XPTY0004', "argument is not a node")
raise self.error('XPTY0004', "context item is not a node")
else:
return 'ID-{}'.format(id(arg))
@method(function('uri-collection', nargs=(0, 1),
sequence_types=('xs:string?', 'xs:anyURI*')))
def evaluate(self, context=None):
uri = self.get_argument(context)
if context is None:
raise self.missing_context()
elif isinstance(context, XPathSchemaContext):
return
elif not self or uri is None:
if context.default_resource_collection is None:
raise self.error('FODC0002', 'no default resource collection has been defined')
resource_collection = context.default_resource_collection
else:
uri = self.get_absolute_uri(uri)
try:
resource_collection = context.resource_collections[uri]
except (KeyError, TypeError):
url_parts = urlsplit(uri)
if url_parts.scheme in ('', 'file') and \
not url_parts.path.startswith(':') and url_parts.path.endswith('/'):
raise self.error('FODC0003', 'collection URI is a directory')
raise self.error('FODC0002', '{!r} collection not found'.format(uri)) from None
if not self.parser.match_sequence_type(resource_collection, 'xs:anyURI*'):
raise self.wrong_sequence_type("Type does not match sequence type xs:anyURI*")
return resource_collection
@method(function('unparsed-text', nargs=(1, 2),
sequence_types=('xs:string?', 'xs:string', 'xs:string?')))
@method(function('unparsed-text-lines', nargs=(1, 2),
sequence_types=('xs:string?', 'xs:string', 'xs:string*')))
def evaluate(self, context=None):
from urllib.request import urlopen # optional because it consumes ~4.3 MiB
from urllib.error import URLError
href = self.get_argument(context, cls=str)
if href is None:
return
elif urlsplit(href).fragment:
raise self.error('FOUT1170')
if len(self) > 1:
encoding = self.get_argument(context, index=1, required=True, cls=str)
else:
encoding = 'UTF-8'
try:
uri = self.get_absolute_uri(href)
except ValueError:
raise self.error('FOUT1170') from None
try:
codecs.lookup(encoding)
except LookupError:
raise self.error('FOUT1190') from None
try:
with urlopen(uri) as rp:
obj = rp.read()
except (ValueError, URLError) as err:
message = str(err)
if 'No such file' in message or \
'unknown url type' in message or 'HTTP Error 404' in message:
raise self.error('FOUT1170') from None
raise self.error('FOUT1190') from None
try:
text = codecs.decode(obj, encoding)
except UnicodeDecodeError:
if len(self) > 1:
raise self.error('FOUT1190') from None
try:
text = codecs.decode(obj, 'UTF-16')
except UnicodeDecodeError:
raise self.error('FOUT1190') from None
if not all(is_xml_codepoint(ord(s)) for s in text):
raise self.error('FOUT1190')
text = text.lstrip('\ufeff')
if self.symbol == 'unparsed-text-lines':
lines = XML_NEWLINES_PATTERN.split(text)
return lines[:-1] if lines[-1] == '' else lines
return text
@method(function('unparsed-text-available', nargs=(1, 2),
sequence_types=('xs:string?', 'xs:string', 'xs:boolean')))
def evaluate(self, context=None):
from urllib.request import urlopen # optional because it consumes ~4.3 MiB
from urllib.error import URLError
href = self.get_argument(context, cls=str)
if href is None:
return False
elif urlsplit(href).fragment:
return False
if len(self) > 1:
encoding = self.get_argument(context, index=1, required=True, cls=str)
else:
encoding = 'UTF-8'
try:
uri = self.get_absolute_uri(href)
codecs.lookup(encoding)
with urlopen(uri) as rp:
obj = rp.read()
except (ValueError, URLError, LookupError):
return False
try:
return all(is_xml_codepoint(ord(s)) for s in codecs.decode(obj, encoding))
except UnicodeDecodeError:
if len(self) > 1:
return False
try:
return all(is_xml_codepoint(ord(s)) for s in codecs.decode(obj, 'UTF-16'))
except UnicodeDecodeError:
return False
@method(function('environment-variable', nargs=1,
sequence_types=('xs:string', 'xs:string?')))
def evaluate(self, context=None):
name = self.get_argument(context, required=True, cls=str)
if context is None:
raise self.missing_context()
elif not context.allow_environment:
return
else:
return os.environ.get(name)
@method(function('available-environment-variables', nargs=0,
sequence_types=('xs:string*',)))
def evaluate(self, context=None):
if context is None:
raise self.missing_context()
elif not context.allow_environment:
return
else:
return list(os.environ)
###
# Parsing and serializing
@method(function('parse-xml', nargs=1,
sequence_types=('xs:string?', 'document-node(element(*))?')))
@method(function('parse-xml-fragment', nargs=1,
sequence_types=('xs:string?', 'document-node()?')))
def evaluate(self, context=None):
# TODO: resolve relative entity references with static base URI
arg = self.get_argument(context, cls=str)
if arg is None:
return
etree = ElementTree if context is None else context.etree
if self.symbol == 'parse-xml-fragment':
# Wrap argument in a fake document because an
# XML document can have only one root element
arg = '<document>{}</document>'.format(arg)
try:
root = etree.XML(arg)
except etree.ParseError:
raise self.error('FODC0006')
else:
return etree.ElementTree(root)
@method(function('serialize', nargs=(1, 2), sequence_types=(
'item()*', 'element(output:serialization-parameters)?', 'xs:string')))
def evaluate(self, context=None):
# TODO full implementation of serialization with
# https://www.w3.org/TR/xpath-functions-30/#xslt-xquery-serialization-30
params = self.get_argument(context, index=1) if len(self) == 2 else None
if params is None:
tmpl = '<output:serialization-parameters xmlns:output="{}"/>'
params = ElementTree.XML(tmpl.format(XSLT_XQUERY_SERIALIZATION_NAMESPACE))
elif not is_etree_element(params):
pass
elif params.tag != SERIALIZATION_PARAMS:
raise self.error('XPTY0004', 'output:serialization-parameters tag expected')
if context is None:
etree = ElementTree
else:
etree = context.etree
if context.namespaces:
for pfx, uri in context.namespaces.items():
etree.register_namespace(pfx, uri)
else:
for pfx, uri in self.parser.namespaces.items():
etree.register_namespace(pfx, uri)
item_separator = '\n'
kwargs = {}
character_map = {}
if len(params):
if len(params) > len({e.tag for e in params}):
raise self.error('SEPM0019')
for child in params:
if child.tag == SER_PARAM_OMIT_XML_DECLARATION:
value = child.get('value')
if value not in {'yes', 'no'} or len(child.attrib) > 1:
raise self.error('SEPM0017')
elif value == 'no':
kwargs['xml_declaration'] = True
elif child.tag == SER_PARAM_USE_CHARACTER_MAPS:
if len(child.attrib):
raise self.error('SEPM0017')
for e in child:
if e.tag != SER_PARAM_CHARACTER_MAP:
raise self.error('SEPM0017')
try:
character = e.attrib['character']
if character in character_map:
msg = 'duplicate character {!r} in character map'
raise self.error('SEPM0018', msg.format(character))
elif len(character) != 1:
msg = 'invalid character {!r} in character map'
raise self.error('SEPM0017', msg.format(character))
character_map[character] = e.attrib['map-string']
except KeyError as key:
msg = "missing {} in character map"
raise self.error('SEPM0017', msg.format(key)) from None
else:
if len(e.attrib) > 2:
msg = "invalid attribute in character map"
raise self.error('SEPM0017', msg)
elif child.tag == SER_PARAM_METHOD:
value = child.get('value')
if value not in {'html', 'xml', 'xhtml', 'text'} or len(child.attrib) > 1:
raise self.error('SEPM0017')
kwargs['method'] = value if value != 'xhtml' else 'html'
elif child.tag == SER_PARAM_INDENT:
value = child.get('value')
if value not in {'yes', 'no'} or len(child.attrib) > 1:
raise self.error('SEPM0017')
elif child.tag == SER_PARAM_ITEM_SEPARATOR:
try:
item_separator = child.attrib['value']
except KeyError:
raise self.error('SEPM0017') from None
# TODO params
elif child.tag == SER_PARAM_CDATA:
pass
elif child.tag == SER_PARAM_NO_INDENT:
pass
elif child.tag == SER_PARAM_STANDALONE:
pass
elif child.tag.startswith(f'{{{XSLT_XQUERY_SERIALIZATION_NAMESPACE}'):
raise self.error('SEPM0017')
chunks = []
for item in self[0].select(context):
if is_document_node(item):
item = item.getroot()
elif isinstance(item, TypedElement):
item = item.elem
elif isinstance(item, (AttributeNode, TypedAttribute, NamespaceNode)):
raise self.error('SENR0001')
elif isinstance(item, TextNode):
chunks.append(item.value)
continue
elif not is_etree_element(item):
chunks.append(str(item))
continue
elif hasattr(item, 'xsd_version') or is_schema_node(item):
continue # XSD schema or schema node
try:
chunks.append(etree.tostring(item, encoding='utf-8', **kwargs).decode('utf-8'))
except TypeError:
chunks.append(etree.tostring(item, encoding='utf-8').decode('utf-8'))
if not character_map:
return item_separator.join(chunks)
result = item_separator.join(chunks)
for character, map_string in character_map.items():
result = result.replace(character, map_string)
return result
###
# Higher-order functions
@method(function('function-lookup', nargs=2,
sequence_types=('xs:QName', 'xs:integer', 'function(*)?')))
def evaluate(self, context=None):
qname = self.get_argument(context, cls=QName)
arity = self.get_argument(context, index=1, cls=int)
# TODO: complete function signatures
# if (qname, arity) not in self.parser.function_signatures:
# raise self.error('XPST0017')
try:
return self.parser.symbol_table[qname.local_name](self.parser, nargs=arity)
except (KeyError, TypeError):
raise self.error('XPST0017', "unknown function {}".format(qname.local_name))
@method(function('function-name', nargs=1, sequence_types=('function(*)', 'xs:QName?')))
def evaluate(self, context=None):
if isinstance(self[0], XPathFunction):
func = self[0]
else:
func = self.get_argument(context, cls=XPathFunction)
return [] if func.name is None else func.name
@method(function('function-arity', nargs=1, sequence_types=('function(*)', 'xs:integer')))
def evaluate(self, context=None):
if isinstance(self[0], XPathFunction):
return self[0].arity
func = self.get_argument(context, cls=XPathFunction)
return func.arity
@method('#', bp=50)
def led(self, left):
left.expected(':', '(name)', 'Q{')
self[:] = left, self.parser.expression(rbp=90)
self[1].expected('(integer)')
return self
@method('#')
def evaluate(self, context=None):
if self[0].symbol == ':':
qname = QName(self[0][1].namespace, self[0].value)
elif self[0].symbol == 'Q{':
qname = QName(self[0][0].value, self[0][1].value)
else:
qname = QName(XPATH_FUNCTIONS_NAMESPACE, self[0].value)
arity = self[1].value
# TODO: complete function signatures
# if (qname, arity) not in self.parser.function_signatures:
# raise self.error('XPST0017')
try:
return self.parser.symbol_table[qname.local_name](self.parser, nargs=arity)
except (KeyError, TypeError):
raise self.error('XPST0017', "unknown function {}".format(qname.local_name))
@method(function('for-each', nargs=2,
sequence_types=('item()*', 'function(item()) as item()*', 'item()*')))
def select(self, context=None):
func = self[1][1] if self[1].symbol == ':' else self[1]
if not isinstance(func, XPathFunction):
func = self.get_argument(context, index=1, cls=XPathFunction)
for item in self[0].select(copy(context)):
result = func(context, argument_list=[item])
if isinstance(result, list):
yield from result
else:
yield result
@method(function('filter', nargs=2,
sequence_types=('item()*', 'function(item()) as xs:boolean', 'item()*')))
def select(self, context=None):
func = self[1][1] if self[1].symbol == ':' else self[1]
if not isinstance(func, XPathFunction):
func = self.get_argument(context, index=1, cls=XPathFunction)
for item in self[0].select(copy(context)):
if self.boolean_value(func(context, argument_list=[item])):
yield item
@method(function('fold-left', nargs=3,
sequence_types=('item()*', 'item()*',
'function(item()*, item()) as item()*', 'item()*')))
def select(self, context=None):
func = self[2][1] if self[2].symbol == ':' else self[2]
if not isinstance(func, XPathFunction):
func = self.get_argument(context, index=2, cls=XPathFunction)
zero = self.get_argument(context, index=1)
result = zero
for item in self[0].select(copy(context)):
result = func(context, argument_list=[result, item])
if isinstance(result, list):
yield from result
else:
yield result
@method(function('fold-right', nargs=3,
sequence_types=('item()*', 'item()*',
'function(item()*, item()) as item()*', 'item()*')))
def select(self, context=None):
func = self[2][1] if self[2].symbol == ':' else self[2]
if not isinstance(func, XPathFunction):
func = self.get_argument(context, index=2, cls=XPathFunction)
zero = self.get_argument(context, index=1)
result = zero
sequence = [x for x in self[0].select(copy(context))]
for item in reversed(sequence):
result = func(context, argument_list=[item, result])
if isinstance(result, list):
yield from result
else:
yield result
@method(function('for-each-pair', nargs=3,
sequence_types=('item()*', 'item()*',
'function(item(), item()) as item()*', 'item()*')))
def select(self, context=None):
func = self[2][1] if self[2].symbol == ':' else self[2]
if not isinstance(func, XPathFunction):
func = self.get_argument(context, index=2, cls=XPathFunction)
if not isinstance(func, XPathFunction):
raise self.error('XPTY0004', "invalid type for 3rd argument {!r}".format(func))
elif func.arity != 2:
raise self.error('XPTY0004', "function arity of 3rd argument must be 2")
for item1, item2 in zip(self[0].select(copy(context)), self[1].select(copy(context))):
result = func(context, argument_list=[item1, item2])
if isinstance(result, list):
yield from result
else:
yield result
###
# Redefined or extended functions
XPath30Parser.unregister('string-join')
@method(function('string-join', nargs=(1, 2),
sequence_types=('xs:string*', 'xs:string', 'xs:string')))
def evaluate(self, context=None):
items = [self.string_value(s) for s in self[0].select(context)]
return self.get_argument(context, 1, default='', cls=str).join(items)
| [
"55874150+MarkBenjamins@users.noreply.github.com"
] | 55874150+MarkBenjamins@users.noreply.github.com |
1d0f393d97c018895eba718afbf13b7965b7ddfb | b3ba88b0ada02029258cb9d7bbce49086050870a | /src/utils/common.py | 92a3d5c4fe10d5d3c5e8365e10abe75585422f0a | [
"MIT"
] | permissive | rexlow/Tornado-Server-Boilerplate | 504f7b80d694444bd246d04ff9ead0c8a697155b | a8852c46ef2688984b14ce83cd40f3263e57f04b | refs/heads/master | 2021-06-12T01:24:44.407798 | 2019-02-26T08:38:05 | 2019-02-26T08:38:05 | 166,561,456 | 3 | 1 | MIT | 2021-06-01T23:18:18 | 2019-01-19T15:26:01 | Python | UTF-8 | Python | false | false | 319 | py | #!/usr/bin/python3
import urllib
from .DotDict import DotDict
def parseEncodedString(encodedString: str):
return urllib.parse.unquote(urllib.parse.unquote(encodedString))
def unloadRequestParams(data):
res = {}
for k, v in data.items():
res[k] = data[k][0].decode(encoding="utf-8")
return res | [
"qiweilow950823@gmail.com"
] | qiweilow950823@gmail.com |
efb691981ff05fe7bcb03faa225d88b4bee1bde0 | 084d1b9cb341a1b943f95e98ee3cf680df502ba9 | /Products/mediaPage/tests/base.py | b0e818b0d28196ee7fc5c4b6020c8236190fd002 | [] | no_license | intk/Products.mediaPage | 629aa7c8f98e308b536f997cafbab177ba6ae1a5 | a3f4b0c900565b438593888a3009f8e7e4867792 | refs/heads/master | 2016-09-06T13:57:17.209247 | 2014-09-18T08:56:37 | 2014-09-18T08:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | """Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import Products.mediaPage
zcml.load_config('configure.zcml', Products.mediaPage)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML. Thus, we do it here. Note the use of installPackage()
# instead of installProduct().
# This is *only* necessary for packages outside the Products.*
# namespace which are also declared as Zope 2 products, using
# <five:registerPackage /> in ZCML.
# We may also need to load dependencies, e.g.:
# ztc.installPackage('borg.localrole')
ztc.installPackage('Products.mediaPage')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
ptc.setupPloneSite(products=['Products.mediaPage'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
roles = ('Member', 'Contributor')
self.portal.portal_membership.addMember('contributor',
'secret',
roles, [])
| [
"andreslb1@gmail.com"
] | andreslb1@gmail.com |
11120973f76467afd73c838f90f5d5d2a6539f40 | 21fb99baaaeaed674cd99fb1a5d39ea54a1db827 | /CmsHi/HiHLTAlgos/python/__init__.py | 55ffe5e80af7d2e370ef2a9f4aa62afbf98ad891 | [] | no_license | kurtejung/PurdueForest | e413d4bc953df386a50d01ca09d1b648d07a926c | 9c7de9e7452b0837a872cfdd428244cb46b55322 | refs/heads/master | 2020-05-16T20:32:30.896495 | 2014-06-02T18:42:18 | 2014-06-02T18:42:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/CmsHi/HiHLTAlgos/',1)[0])+'/cfipython/slc5_amd64_gcc462/CmsHi/HiHLTAlgos')
| [
"kurtejung@gmail.com"
] | kurtejung@gmail.com |
a47988e12caea650f9b6dc78153c6e2a74602047 | 5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8 | /client/local_objects/ClientPlayerManager.py | 8acf4ecba25471df1e138e3be612cc0741d8054f | [] | no_license | sheepsy90/survive | 26495f1ff2d8247fbb9470882f8be9f5272e7f2c | 0eddf637be0eacd34415761b78fc2c9d50bc1528 | refs/heads/master | 2021-01-09T05:55:16.546762 | 2017-02-03T20:15:28 | 2017-02-03T20:15:28 | 80,864,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | # -*- coding:utf-8 -*-
from client.local_objects.PlayerModel import PlayerModel
class ClientPlayerManager(object):
def __init__(self):
self.players = {}
self.me = None
def add_new_player_position(self, player_id, player_name, position, is_moving, is_me):
if player_id not in self.players:
self.players[player_id] = PlayerModel(player_id, player_name, position, is_moving)
else:
self.players[player_id].update_position(position, is_moving)
if is_me:
self.me = self.players[player_id]
def has_me(self):
return self.me is not None
def get_players(self):
return self.players.values()
def remove_player(self, name):
print "REMOVE PLAYER FROM CLIENT"
del self.players[name]
def get_me(self):
return self.me
def set_my_character_condition(self, blurriness, redness):
self.me.set_character_condition(blurriness, redness) | [
"robert.kessler@klarna.com"
] | robert.kessler@klarna.com |
49915c6689b5cfb63c853499fc46782cbfb4e004 | 02d6aa27cffce7620975cc1750b1fdc33cfb4a52 | /gsf/processed/gsf_sub_routine_run_seqs/func_code.py | cf44c59a8295fee0c9b6d81b1e4b41edab648578 | [] | no_license | AlgorithmicAmoeba/picklejar | d8d54216e35b8f10c814a8837b536d480e3ced63 | 8b44829149f39c6e7538b52ae1fae62be3270d93 | refs/heads/master | 2022-12-28T18:33:25.397765 | 2020-10-21T07:43:53 | 2020-10-21T07:43:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # first line: 501
@PickleJar.pickle(path='gsf/processed')
def gsf_sub_routine_run_seqs():
"""Returns the run sequences for the predict, update and resample subroutines
Returns
-------
run_seqss : List
[predict; update; resample] x [N_particles; run_seq]
"""
N_particles_gpu = numpy.array([int(i) for i in 2**numpy.arange(1, 19, 0.5)])
run_seqss = [
predict_subs_run_seq(N_particles_gpu, 100),
update_subs_run_seq(N_particles_gpu, 100),
resample_subs_run_seq(N_particles_gpu, 100)
]
return run_seqss
| [
"29543948+darren-roos@users.noreply.github.com"
] | 29543948+darren-roos@users.noreply.github.com |
d953b09cacf2af04049f33692d31022f851a9c4b | 29408c953766ac1f36b1a94c683bcd81729beb05 | /modin/engines/base/frame/data.py | 538f7dcf997100d3cba74d153e2656a651f0f8f2 | [
"Apache-2.0"
] | permissive | dineshsonachalam/modin | bd7e4081a0aafa3e5db3054a524475fb53324bb6 | 453024764ffdad5000047a4cfaa1e322d9941b6e | refs/heads/master | 2020-07-15T13:51:01.063607 | 2019-08-30T05:46:50 | 2019-08-30T05:46:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,076 | py | from itertools import groupby
import numpy as np
from operator import itemgetter
import pandas
from pandas.core.dtypes.cast import find_common_type
from pandas.core.index import ensure_index
from pandas.core.dtypes.common import is_numeric_dtype
from modin.backends.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
class BasePandasFrame(object):
_frame_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
@property
def __constructor__(self):
"""The constructor for this object. A convenience method"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
"""Initialize a dataframe.
Args:
partitions: A 2D numpy array of partitions. Must contain partition objects.
index: The index object for the dataframe. Converts to a pandas.Index.
columns: The columns object for the dataframe. Converts to a pandas.Index.
row_lengths: (optional) The lengths of each partition in the rows. The
"height" of each of the block partitions. Is computed if not provided.
column_widths: (optional) The width of each partition in the columns. The
"width" of each of the block partitions. Is computed if not provided.
dtypes: (optional) The data types for the dataframe.
"""
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
self._row_lengths_cache = row_lengths
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
@property
def _row_lengths(self):
"""Compute the row lengths if they are not cached.
Returns:
A list of row lengths.
"""
if self._row_lengths_cache is None:
self._row_lengths_cache = [obj.length() for obj in self._partitions.T[0]]
return self._row_lengths_cache
@property
def _column_widths(self):
"""Compute the column widths if they are not cached.
Returns:
A list of column widths.
"""
if self._column_widths_cache is None:
self._column_widths_cache = [obj.width() for obj in self._partitions[0]]
return self._column_widths_cache
@property
def dtypes(self):
"""Compute the data types if they are not cached.
Returns:
A pandas Series containing the data types for this dataframe.
"""
if self._dtypes is None:
self._dtypes = self._compute_dtypes()
return self._dtypes
def _compute_dtypes(self):
"""Compute the dtypes via MapReduce.
Returns:
The data types of this dataframe.
"""
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._build_mapreduce_func(0, lambda df: df.dtypes)
reduce_func = self._build_mapreduce_func(0, dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
dtypes = self._map_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
else:
dtypes = pandas.Series([])
# reset name to None because we use "__reduced__" internally
dtypes.name = None
return dtypes
_index_cache = None
_columns_cache = None
def _validate_set_axis(self, new_labels, old_labels):
"""Validates the index or columns replacement against the old labels.
Args:
new_labels: The labels to replace with.
old_labels: The labels to replace.
Returns:
The validated labels.
"""
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
def _get_index(self):
"""Gets the index from the cache object.
Returns:
A pandas.Index object containing the row labels.
"""
return self._index_cache
def _get_columns(self):
"""Gets the columns from the cache object.
Returns:
A pandas.Index object containing the column labels.
"""
return self._columns_cache
def _set_index(self, new_index):
"""Replaces the current row labels with new labels.
Args:
new_index: The replacement row labels.
"""
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
self._apply_index_objs(axis=0)
def _set_columns(self, new_columns):
"""Replaces the current column labels with new labels.
Args:
new_columns: The replacement column labels.
"""
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
if self._dtypes is not None:
self._dtypes.index = new_columns
self._apply_index_objs(axis=1)
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
@property
def axes(self):
"""The index, columns that can be accessed with an `axis` integer."""
return [self.index, self.columns]
def _filter_empties(self):
"""Removes empty partitions to avoid triggering excess computation."""
self._column_widths_cache = [w for w in self._column_widths if w > 0]
self._row_lengths_cache = [r for r in self._row_lengths if r > 0]
self._partitions = np.array(
[
[
self._partitions[i][j]
for j in range(len(self._partitions[i]))
if j < len(self._column_widths)
]
for i in range(len(self._partitions))
if i < len(self._row_lengths)
]
)
def _apply_index_objs(self, axis=None):
"""Lazily applies the index object (Index or Columns) to the partitions.
Args:
axis: The axis to apply to, None applies to both axes.
Returns:
A new 2D array of partitions that have the index assignment added to the
call queue.
"""
self._filter_empties()
if axis is None or axis == 0:
cum_row_lengths = np.cumsum([0] + self._row_lengths)
if axis is None or axis == 1:
cum_col_widths = np.cumsum([0] + self._column_widths)
if axis is None:
def apply_idx_objs(df, idx, cols):
df.index, df.columns = idx, cols
return df
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
elif axis == 0:
def apply_idx_objs(df, idx):
df.index = idx
return df
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
elif axis == 1:
def apply_idx_objs(df, cols):
df.columns = cols
return df
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""Lazily select columns or rows from given indices.
Note: If both row_indices and row_numeric_idx are set, row_indices will be used.
The same rule applied to col_indices and col_numeric_idx.
Args:
row_indices: The row labels to extract.
row_numeric_idx: The row indices to extract.
col_indices: The column labels to extract.
col_numeric_idx: The column indices to extract.
Returns:
A new dataframe.
"""
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(
1, row_numeric_idx, ordered=True
)
new_row_lengths = [len(indices) for _, indices in row_partitions_list]
new_index = self.index[row_numeric_idx]
else:
row_partitions_list = [
(i, slice(None)) for i in range(len(self._row_lengths))
]
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(
0, col_numeric_idx, ordered=True
)
new_col_widths = [len(indices) for _, indices in col_partitions_list]
new_columns = self.columns[col_numeric_idx]
new_dtypes = self.dtypes[col_numeric_idx]
else:
col_partitions_list = [
(i, slice(None)) for i in range(len(self._column_widths))
]
new_col_widths = self._column_widths
new_columns = self.columns
new_dtypes = self.dtypes
new_partitions = np.array(
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
)
for col_idx, col_internal_indices in col_partitions_list
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
def copy(self):
"""Copy this object.
Returns:
A copied version of this object.
"""
return self.__constructor__(
self._partitions,
self.index.copy(),
self.columns.copy(),
self._row_lengths,
self._column_widths,
self._dtypes,
)
@classmethod
def combine_dtypes(cls, list_of_dtypes, column_names):
"""Describes how data types should be combined when they do not match.
Args:
list_of_dtypes: A list of pandas Series with the data types.
column_names: The names of the columns that the data types map to.
Returns:
A pandas Series containing the finalized data types.
"""
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column.
dtypes = (
pandas.concat(list_of_dtypes, axis=1)
.apply(lambda row: find_common_type(row.values), axis=1)
.squeeze(axis=0)
)
dtypes.index = column_names
return dtypes
def astype(self, col_dtypes):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
dataframe with updated dtypes.
"""
columns = col_dtypes.keys()
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
def astype_builder(df):
return df.astype({k: v for k, v in col_dtypes.items() if k in df})
new_frame = self._frame_mgr_cls.map_partitions(self._partitions, astype_builder)
return self.__constructor__(
new_frame,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
new_dtypes,
)
# Metadata modification methods
def add_prefix(self, prefix, axis):
"""Add a prefix to the current row or column labels.
Args:
prefix: The prefix to add.
axis: The axis to update.
Returns:
A new dataframe with the updated labels.
"""
new_labels = self.axes[axis].map(lambda x: str(prefix) + str(x))
new_frame = self.copy()
if axis == 0:
new_frame.index = new_labels
else:
new_frame.columns = new_labels
return new_frame
def add_suffix(self, suffix, axis):
"""Add a suffix to the current row or column labels.
Args:
suffix: The suffix to add.
axis: The axis to update.
Returns:
A new dataframe with the updated labels.
"""
new_labels = self.axes[axis].map(lambda x: str(x) + str(suffix))
new_frame = self.copy()
if axis == 0:
new_frame.index = new_labels
else:
new_frame.columns = new_labels
return new_frame
# END Metadata modification methods
def _numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def _get_dict_of_block_index(self, axis, indices, ordered=False):
"""Convert indices to a dict of block index to internal index mapping.
Note: See `_get_blocks_containing_index` for primary usage. This method
accepts a list of indices rather than just a single value, and uses
`_get_blocks_containing_index`.
Args:
axis: The axis along which to get the indices
(0 - columns, 1 - rows)
indices: A list of global indices to convert.
Returns
For unordered: a dictionary of {block index: list of local indices}.
For ordered: a list of tuples mapping block index: list of local indices.
"""
if not ordered:
indices = np.sort(indices)
else:
indices = np.array(indices)
if not axis:
# INT_MAX to make sure we don't try to compute on partitions that don't
# exist.
cumulative = np.array(
np.append(self._column_widths[:-1], np.iinfo(np.int32).max)
).cumsum()
else:
cumulative = np.array(
np.append(self._row_lengths[:-1], np.iinfo(np.int32).max)
).cumsum()
def internal(block_idx, global_index):
return (
global_index
if not block_idx
else np.subtract(
global_index, cumulative[min(block_idx, len(cumulative) - 1) - 1]
)
)
partition_ids = np.digitize(indices, cumulative)
# If the output order doesn't matter or if the indices are monotonically
# increasing, the computation is significantly simpler and faster than doing
# the zip and groupby.
if not ordered or np.all(np.diff(indices) > 0):
count_for_each_partition = np.array(
[(partition_ids == i).sum() for i in range(len(cumulative))]
).cumsum()
# Compute the internal indices and pair those with the partition index.
# If the first partition has any values we need to return, compute those
# first to make the list comprehension easier. Otherwise, just append the
# rest of the values to an empty list.
if count_for_each_partition[0] > 0:
first_partition_indices = [
(0, internal(0, indices[slice(count_for_each_partition[0])]))
]
else:
first_partition_indices = []
partition_ids_with_indices = first_partition_indices + [
(
i,
internal(
i,
indices[
slice(
count_for_each_partition[i - 1],
count_for_each_partition[i],
)
],
),
)
for i in range(1, len(count_for_each_partition))
if count_for_each_partition[i] > count_for_each_partition[i - 1]
]
return (
dict(partition_ids_with_indices)
if not ordered
else partition_ids_with_indices
)
all_partitions_and_idx = zip(partition_ids, indices)
# In ordered, we have to maintain the order of the list of indices provided.
# This means that we need to return a list instead of a dictionary.
return [
(k, internal(k, [x for _, x in v]))
for k, v in groupby(all_partitions_and_idx, itemgetter(0))
]
def _join_index_objects(self, axis, other_index, how, sort):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how, sort=sort)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _build_mapreduce_func(self, axis, func):
"""Properly formats a MapReduce result so that the partitioning is correct.
Note: This should be used for any MapReduce style operation that results in a
reduced data dimensionality (dataframe -> series).
Args:
axis: The axis along which to apply the function.
func: The function to apply.
Returns:
A function to be shipped to the partitions to be executed.
"""
def _map_reduce_func(df):
series_result = func(df)
if axis == 0 and isinstance(series_result, pandas.Series):
# In the case of axis=0, we need to keep the shape of the data
# consistent with what we have done. In the case of a reduction, the
# data for axis=0 should be a single value for each column. By
# transposing the data after we convert to a DataFrame, we ensure that
# the columns of the result line up with the columns from the data.
# axis=1 does not have this requirement because the index already will
# line up with the index of the data based on how pandas creates a
# DataFrame from a Series.
return pandas.DataFrame(series_result).T
return pandas.DataFrame(series_result)
return _map_reduce_func
def _compute_map_reduce_metadata(self, axis, new_parts):
if axis == 0:
columns = self.columns
index = ["__reduced__"]
new_lengths = [1]
new_widths = self._column_widths
new_dtypes = self._dtypes
else:
columns = ["__reduced__"]
index = self.index
new_lengths = self._row_lengths
new_widths = [1]
if self._dtypes is not None:
new_dtypes = pandas.Series(
np.full(1, find_common_type(self.dtypes.values)),
index=["__reduced__"],
)
else:
new_dtypes = self._dtypes
return self.__constructor__(
new_parts, index, columns, new_lengths, new_widths, new_dtypes
)
def _map_reduce_full_axis(self, axis, func):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
Return:
Pandas series containing the reduced data.
"""
func = self._build_mapreduce_func(axis, func)
new_parts = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func
)
return self._compute_map_reduce_metadata(axis, new_parts)
def _map_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new dataframe.
"""
map_func = self._build_mapreduce_func(axis, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_mapreduce_func(axis, reduce_func)
map_parts = self._frame_mgr_cls.map_partitions(self._partitions, map_func)
reduce_parts = self._frame_mgr_cls.map_axis_partitions(
axis, map_parts, reduce_func
)
return self._compute_map_reduce_metadata(axis, reduce_parts)
def _map(self, func, dtypes=None):
"""Perform a function that maps across the entire dataset.
Args:
func: The function to apply.
dtypes: (optional) The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_partitions(self._partitions, func)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(self.columns), index=self.columns
)
return self.__constructor__(
new_partitions,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
dtypes=dtypes,
)
def _map_full_axis(self, axis, func):
"""Perform a function across an entire axis.
Note: The data shape is not changed (length and width of the table).
Args:
axis: The axis to apply over.
func: The function to apply.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func
)
return self.__constructor__(
new_partitions,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
)
def _apply_full_axis(
self, axis, func, new_index=None, new_columns=None, dtypes=None
):
"""Perform a function across an entire axis.
Note: The data shape may change as a result of the function.
Args:
axis: The axis to apply over.
func: The function to apply.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes: (optional) The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.index
)
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given parameters
# and builds a dictionary used in the constructor below. 0 gives the row lengths
# and 1 gives the column widths. Since the dimension of `axis` given may have
# changed, we current just recompute it.
lengths_objs = {
axis: None,
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(new_columns), index=new_columns
)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
lengths_objs[0],
lengths_objs[1],
dtypes,
)
def _apply_full_axis_select_indices(
self,
axis,
func,
apply_indices=None,
numeric_indices=None,
new_index=None,
new_columns=None,
keep_remaining=False,
):
"""Apply a function across an entire axis for a subset of the data.
Args:
axis: The axis to apply over.
func: The function to apply
apply_indices: The labels to apply over.
numeric_indices: The indices to apply over.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
keep_remaining: Whether or not to drop the data that is not computed over.
Returns:
A new dataframe.
"""
assert apply_indices is not None or numeric_indices is not None
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
if apply_indices is not None:
numeric_indices = old_index.get_indexer_for(apply_indices)
dict_indices = self._get_dict_of_block_index(axis, numeric_indices)
new_partitions = self._frame_mgr_cls.apply_func_to_select_indices_along_full_axis(
axis, self._partitions, func, dict_indices, keep_remaining=keep_remaining
)
# TODO Infer columns and index from `keep_remaining` and `apply_indices`
if new_index is None:
new_index = self.index if axis == 1 else None
if new_columns is None:
new_columns = self.columns if axis == 0 else None
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given parameters
# and builds a dictionary used in the constructor below. 0 gives the row lengths
# and 1 gives the column widths. Since the dimension of `axis` given may have
# changed, we currently just recompute it.
# TODO Determine lengths from current lengths if `keep_remaining=False`
lengths_objs = {
axis: None
if not keep_remaining
else [self._row_lengths, self._column_widths][axis],
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
return self.__constructor__(
new_partitions, new_index, new_columns, lengths_objs[0], lengths_objs[1]
)
def _apply_select_indices(
self,
axis,
func,
apply_indices=None,
row_indices=None,
col_indices=None,
new_index=None,
new_columns=None,
keep_remaining=False,
item_to_distribute=None,
):
"""Apply a function for a subset of the data.
Args:
axis: The axis to apply over.
func: The function to apply
apply_indices: (optional) The labels to apply over. Must be given if axis is
provided.
row_indices: (optional) The row indices to apply over. Must be provided with
`col_indices` to apply over both axes.
col_indices: (optional) The column indices to apply over. Must be provided
with `row_indices` to apply over both axes.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
keep_remaining: Whether or not to drop the data that is not computed over.
item_to_distribute: (optional) The item to split up so it can be applied
over both axes.
Returns:
A new dataframe.
"""
# TODO Infer columns and index from `keep_remaining` and `apply_indices`
if new_index is None:
new_index = self.index if axis == 1 else None
if new_columns is None:
new_columns = self.columns if axis == 0 else None
if axis is not None:
assert apply_indices is not None
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = old_index.get_indexer_for(apply_indices)
dict_indices = self._get_dict_of_block_index(axis, numeric_indices)
new_partitions = self._frame_mgr_cls.apply_func_to_select_indices(
axis,
self._partitions,
func,
dict_indices,
keep_remaining=keep_remaining,
)
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given
# parameters and builds a dictionary used in the constructor below. 0 gives
# the row lengths and 1 gives the column widths. Since the dimension of
# `axis` given may have changed, we current just recompute it.
# TODO Determine lengths from current lengths if `keep_remaining=False`
lengths_objs = {
axis: [len(apply_indices)]
if not keep_remaining
else [self._row_lengths, self._column_widths][axis],
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
return self.__constructor__(
new_partitions, new_index, new_columns, lengths_objs[0], lengths_objs[1]
)
else:
# We are apply over both axes here, so make sure we have all the right
# variables set.
assert row_indices is not None and col_indices is not None
assert keep_remaining
assert item_to_distribute is not None
row_partitions_list = self._get_dict_of_block_index(1, row_indices).items()
col_partitions_list = self._get_dict_of_block_index(0, col_indices).items()
new_partitions = self._frame_mgr_cls.apply_func_to_indices_both_axis(
self._partitions,
func,
row_partitions_list,
col_partitions_list,
item_to_distribute,
)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
self._row_lengths_cache,
self._column_widths_cache,
)
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""Copartition two dataframes.
Args:
axis: The axis to copartition along.
other: The other dataframes(s) to copartition against.
how: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis ^ 1, index_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
def _binary_op(self, op, right_frame, join_type="outer"):
"""Perform an operation that requires joining with another dataframe.
Args:
op: The function to apply after the join.
right_frame: The dataframe to join with.
join_type: (optional) The type of join to apply.
Returns:
A new dataframe.
"""
left_parts, right_parts, joined_index = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(new_frame, self.index, new_columns, None, None)
def _concat(self, axis, others, how, sort):
"""Concatenate this dataframe with one or more others.
Args:
axis: The axis to concatenate over.
others: The list of dataframes to concatenate with.
how: The type of join to use for the axis.
sort: Whether or not to sort the result.
Returns:
A new dataframe.
"""
# TODO Update to no longer force repartition
# Requires pruning of the partitions after they have been changed
left_parts, right_parts, joined_index = self._copartition(
axis ^ 1, others, how, sort, force_repartition=True
)
new_partitions = self._frame_mgr_cls.concat(axis, left_parts, right_parts)
if axis == 0:
new_index = self.index.append([other.index for other in others])
new_columns = joined_index
else:
new_columns = self.columns.append([other.columns for other in others])
new_index = joined_index
return self.__constructor__(new_partitions, new_index, new_columns)
def groupby_reduce(
self, axis, by, map_func, reduce_func, new_index=None, new_columns=None
):
"""Groupby another dataframe and aggregate the result.
Args:
axis: The axis to groupby and aggregate over.
by: The dataframe to group by.
map_func: The map component of the aggregation.
reduce_func: The reduce component of the aggregation.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.groupby_reduce(
axis, self._partitions, by._partitions, map_func, reduce_func
)
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.index
)
if axis == 0:
new_widths = self._column_widths
new_lengths = None
else:
new_widths = None
new_lengths = self._row_lengths
return self.__constructor__(
new_partitions, new_index, new_columns, new_lengths, new_widths
)
@classmethod
def from_pandas(cls, df):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
df: Pandas DataFrame object.
Returns:
A new dataframe.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_frame, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)
return cls(
new_frame,
new_index,
new_columns,
new_lengths,
new_widths,
dtypes=new_dtypes,
)
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame.
"""
df = self._frame_mgr_cls.to_pandas(self._partitions)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
df.index.name = self.index.name
return df
def to_numpy(self):
"""Converts Modin DataFrame to a 2D numpy array.
Returns:
Numpy array.
"""
return self._frame_mgr_cls.to_numpy(self._partitions)
def transpose(self):
"""Transpose the index and columns of this dataframe.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.lazy_map_partitions(
self._partitions, lambda df: df.T
).T
new_dtypes = pandas.Series(
np.full(len(self.index), find_common_type(self.dtypes.values)),
index=self.index,
)
return self.__constructor__(
new_partitions,
self.columns,
self.index,
self._column_widths,
self._row_lengths,
dtypes=new_dtypes,
)
# Head/Tail/Front/Back
@staticmethod
def _compute_lengths(lengths_list, n, from_back=False):
"""Computes the new lengths based on the lengths/widths of the previous and `n`.
Args:
lengths_list: The list of lengths or widths.
n: The number of rows or columns extracted.
from_back: Whether or not to compute from the back. Used in `tail`/`back`
Returns:
A list of lengths or widths of the resulting dataframe.
"""
if not from_back:
idx = np.digitize(n, np.cumsum(lengths_list))
if idx == 0:
return [n]
return [
lengths_list[i] if i < idx else n - sum(lengths_list[:i])
for i in range(len(lengths_list))
if i <= idx
]
else:
lengths_list = [i for i in lengths_list if i > 0]
idx = np.digitize(sum(lengths_list) - n, np.cumsum(lengths_list))
if idx == len(lengths_list) - 1:
return [n]
return [
lengths_list[i] if i > idx else n - sum(lengths_list[i + 1 :])
for i in range(len(lengths_list))
if i >= idx
]
def head(self, n):
"""Returns the first n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
A new dataframe.
"""
# We grab the front if it is transposed and flag as transposed so that
# we are not physically updating the data from this manager. This
# allows the implementation to stay modular and reduces data copying.
if n < 0:
n = max(0, len(self.index) + n)
new_row_lengths = self._compute_lengths(self._row_lengths, n)
new_partitions = self._frame_mgr_cls.take(
0, self._partitions, self._row_lengths, n
)
return self.__constructor__(
new_partitions,
self.index[:n],
self.columns,
new_row_lengths,
self._column_widths,
self.dtypes,
)
def tail(self, n):
"""Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
A new dataframe.
"""
# See head for an explanation of the transposed behavior
if n < 0:
n = max(0, len(self.index) + n)
new_row_lengths = self._compute_lengths(self._row_lengths, n, from_back=True)
new_partitions = self._frame_mgr_cls.take(
0, self._partitions, self._row_lengths, -n
)
return self.__constructor__(
new_partitions,
self.index[-n:],
self.columns,
new_row_lengths,
self._column_widths,
self.dtypes,
)
def front(self, n):
"""Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
A new dataframe.
"""
new_col_lengths = self._compute_lengths(self._column_widths, n)
new_partitions = self._frame_mgr_cls.take(
1, self._partitions, self._column_widths, n
)
return self.__constructor__(
new_partitions,
self.index,
self.columns[:n],
self._row_lengths,
new_col_lengths,
self.dtypes[:n],
)
def back(self, n):
"""Returns the last n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
A new dataframe.
"""
new_col_lengths = self._compute_lengths(self._column_widths, n, from_back=True)
new_partitions = self._frame_mgr_cls.take(
1, self._partitions, self._column_widths, -n
)
return self.__constructor__(
new_partitions,
self.index,
self.columns[-n:],
self._row_lengths,
new_col_lengths,
self.dtypes[n:],
)
# End Head/Tail/Front/Back
| [
"12377941+williamma12@users.noreply.github.com"
] | 12377941+williamma12@users.noreply.github.com |
ed8c4e4a14c88aebc9af0bec316c78844f6d49f5 | 2e568d6e7f18a404d844ab22e949c685c2798c6a | /PyMaforo/run.py | 077cc123ffbdcc9b451aae2db18fcb81e6145c98 | [] | no_license | JoeArtisan/PyMaforo | 7c4caabc535f18f7cc698da83b0c21f7c6aa072b | ecadc1aafa500dbad2905fc40a9eae9b9d706f41 | refs/heads/master | 2023-04-20T03:17:54.126123 | 2021-05-17T21:00:00 | 2021-05-17T21:00:00 | 220,867,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | import serial
import time
import os
clear = lambda: os.system('cls')
serial_port = 'COM4'
baudrate = 115200
go = True
def _ConnectToArduino():
arduino = serial.Serial(serial_port, baudrate, timeout=.1)
time.sleep(2) #give the connection two seconds to settle
return arduino
def _sendData(arduino,option):
try:
with arduino:
arduino.write(option.encode()) #read the data from the arduino
except:
print("Failed to connect on")
# Iniciamos la función
while go:
print("< ================================= >")
print("Proyecto PySemaforo - Inicio")
print("MENU")
print("1. Ver Semaforos")
print("0. Cerrar")
option = input("Ingrese la opción: ");
if option == '1':
clear()
print("< ================================= >")
print("Proyecto PySemaforo - Semaforos")
print("MENU")
print("1. Area 201")
print("2. Volver")
option2 = input("Ingrese la opción: ");
if option2 == '1':
clear()
print("< ================================= >")
print("Proyecto PySemaforo - Area 201")
print("MENU")
print("1. Dar Paso")
print("2. Dar Paso con Prioridad")
print("3. Volver")
option3 = input("Ingrese la opción: ");
if option3 == '1':
clear()
print("Inicializando Función Dar Paso")
_sendData(_ConnectToArduino(),'A')
elif option3 == '2':
clear()
print("Inicializando Función Dar Paso con Prioridad")
_sendData(_ConnectToArduino(),'B')
else:
print("Opción incorrecta, intente nuevamente.")
elif option2 == '2':
print("Listo")
else:
print("Opción incorrecta, intente nuevamente.")
elif option == '0':
clear()
print("El sistema se cerrará en:")
for i in [3,2,1]:
print(i)
time.sleep(0.5)
time.sleep(2)
print("Sistema cerrado correctamente :V")
go = False
else:
print("Opción incorrecta, intente nuevamente.")
| [
"noreply@github.com"
] | noreply@github.com |
d23f0fdc9f79350dc59b7bbff909a0248f0ab93b | 4e59f5fbd1e777f2488eb2a46deca34acf813979 | /clients/admin.py | b19f3d0c3e98075355f6e14f4524c33f0aa4eac9 | [] | no_license | BoughezalaMohamedAimen/laser | f8c051be5c85be8f09b3ac4272065ce24af26555 | 1ac9c97b8ead4edcfcadeaafa0ee567f3f3d3d0d | refs/heads/master | 2020-08-09T19:26:21.516671 | 2019-10-10T10:30:54 | 2019-10-10T10:30:54 | 214,154,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(SeanceHistorique)
admin.site.register(Abonnement)
| [
"mamoumou121@gmail.com"
] | mamoumou121@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.