blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a12f73648f46854614cd7f814a32252da0ca2ac6
|
ae6f85863eb6ebecca2950b4fbff394a4edcfca6
|
/chataggregator.py
|
79fa3b314e31448072f444fc877169aab9c6829d
|
[
"MIT"
] |
permissive
|
m-henderson/ChatAggregator
|
79ee32ae7a851b0ae5136184d55f5ea596eaab85
|
2692e9e5dfd5d78bbf5d658731b57630099ec3e4
|
refs/heads/master
| 2020-09-12T04:13:47.784589
| 2019-11-17T19:20:39
| 2019-11-17T19:20:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
import tklib
import tkinter as tk
root = tk.Tk()
root.title("Chatbox")
gui = tklib.main.MainWindow(root)
gui.grid()
root.mainloop()
|
[
"Dogeek@users.noreply.github.com"
] |
Dogeek@users.noreply.github.com
|
0b9f8c69a516049fee15317748749ab9d00bb152
|
e17966d3831e9f99527fb44c50c77c90e08694de
|
/cmasher/colormaps/cosmic/cosmic.py
|
22748307b61c1a4f48607bd8bc7069e7557c2c79
|
[
"BSD-3-Clause"
] |
permissive
|
ajdittmann/CMasher
|
85e4f94e45f962c247d2f8e04a1e966b01b77248
|
1ad6a5eaa3b473ef5eb3d56dfd3ecb8e887f16cd
|
refs/heads/master
| 2023-03-27T12:46:30.052679
| 2021-03-22T23:56:47
| 2021-03-22T23:56:47
| 277,986,917
| 0
| 0
|
BSD-3-Clause
| 2020-07-08T04:10:55
| 2020-07-08T04:10:54
| null |
UTF-8
|
Python
| false
| false
| 13,150
|
py
|
# %% IMPORTS
# Package imports
from matplotlib.cm import register_cmap
from matplotlib.colors import ListedColormap
# All declaration
__all__ = ['cmap']
# Author declaration
__author__ = "Ellert van der Velden (@1313e)"
# Package declaration
__package__ = 'cmasher'
# %% GLOBALS AND DEFINITIONS
# Type of this colormap
cm_type = 'sequential'
# RGB-values of this colormap
cm_data = [[0.00000000, 0.00000000, 0.00000000],
[0.00022254, 0.00017671, 0.00025009],
[0.00078387, 0.00060500, 0.00090179],
[0.00164763, 0.00123817, 0.00193717],
[0.00280144, 0.00205342, 0.00336061],
[0.00423939, 0.00303533, 0.00518202],
[0.00595880, 0.00417200, 0.00741444],
[0.00795875, 0.00545370, 0.01007259],
[0.01023937, 0.00687218, 0.01317220],
[0.01280170, 0.00842008, 0.01673051],
[0.01564747, 0.01009067, 0.02076595],
[0.01877857, 0.01187795, 0.02529699],
[0.02219739, 0.01377623, 0.03034346],
[0.02590682, 0.01577996, 0.03592673],
[0.02990978, 0.01788399, 0.04202298],
[0.03420930, 0.02008340, 0.04820472],
[0.03880874, 0.02237336, 0.05439934],
[0.04358445, 0.02474908, 0.06061196],
[0.04832042, 0.02720564, 0.06684805],
[0.05302794, 0.02973858, 0.07311083],
[0.05771012, 0.03234333, 0.07940403],
[0.06236976, 0.03501524, 0.08573111],
[0.06700933, 0.03774971, 0.09209532],
[0.07163105, 0.04054044, 0.09849968],
[0.07623693, 0.04327934, 0.10494709],
[0.08082877, 0.04595941, 0.11144026],
[0.08540827, 0.04858162, 0.11798198],
[0.08997690, 0.05114686, 0.12457468],
[0.09453596, 0.05365588, 0.13122062],
[0.09908671, 0.05610920, 0.13792216],
[0.10363029, 0.05850720, 0.14468161],
[0.10816775, 0.06085010, 0.15150121],
[0.11270017, 0.06313785, 0.15838356],
[0.11722839, 0.06537049, 0.16533065],
[0.12175321, 0.06754789, 0.17234460],
[0.12627538, 0.06966979, 0.17942753],
[0.13079560, 0.07173582, 0.18658154],
[0.13531450, 0.07374552, 0.19380872],
[0.13983264, 0.07569829, 0.20111112],
[0.14435069, 0.07759327, 0.20849123],
[0.14886940, 0.07942918, 0.21595220],
[0.15338885, 0.08120569, 0.22349476],
[0.15790940, 0.08292181, 0.23112093],
[0.16243170, 0.08457592, 0.23883387],
[0.16695619, 0.08616646, 0.24663627],
[0.17148270, 0.08769287, 0.25452854],
[0.17601191, 0.08915275, 0.26251464],
[0.18054395, 0.09054460, 0.27059648],
[0.18507865, 0.09186716, 0.27877510],
[0.18961684, 0.09311710, 0.28705556],
[0.19415775, 0.09429396, 0.29543682],
[0.19870231, 0.09539359, 0.30392490],
[0.20324973, 0.09641511, 0.31251899],
[0.20780042, 0.09735485, 0.32122351],
[0.21235420, 0.09821004, 0.33004063],
[0.21691053, 0.09897843, 0.33897132],
[0.22146970, 0.09965550, 0.34802028],
[0.22603124, 0.10023806, 0.35718935],
[0.23059455, 0.10072284, 0.36648009],
[0.23515923, 0.10110562, 0.37589528],
[0.23972479, 0.10138193, 0.38543762],
[0.24429061, 0.10154697, 0.39510979],
[0.24885596, 0.10159568, 0.40491433],
[0.25341998, 0.10152262, 0.41485371],
[0.25798164, 0.10132206, 0.42493024],
[0.26253975, 0.10098790, 0.43514603],
[0.26709296, 0.10051368, 0.44550301],
[0.27163969, 0.09989258, 0.45600280],
[0.27617812, 0.09911739, 0.46664673],
[0.28070701, 0.09817741, 0.47743992],
[0.28522342, 0.09706670, 0.48837993],
[0.28972520, 0.09577455, 0.49946960],
[0.29420947, 0.09429133, 0.51070913],
[0.29867327, 0.09260527, 0.52210001],
[0.30311267, 0.09070667, 0.53363982],
[0.30752387, 0.08858214, 0.54532909],
[0.31190255, 0.08621737, 0.55716750],
[0.31624329, 0.08360013, 0.56915082],
[0.32054033, 0.08071586, 0.58127560],
[0.32478720, 0.07754942, 0.59353712],
[0.32897683, 0.07408316, 0.60593109],
[0.33310058, 0.07030445, 0.61844686],
[0.33714920, 0.06619871, 0.63107428],
[0.34111255, 0.06174960, 0.64380328],
[0.34497874, 0.05694953, 0.65661652],
[0.34873478, 0.05179404, 0.66949545],
[0.35236629, 0.04628711, 0.68241857],
[0.35585736, 0.04044955, 0.69535960],
[0.35919048, 0.03450248, 0.70828712],
[0.36234671, 0.02894281, 0.72116432],
[0.36530584, 0.02392884, 0.73395112],
[0.36804648, 0.01964477, 0.74659946],
[0.37054659, 0.01629018, 0.75905887],
[0.37278395, 0.01408742, 0.77127140],
[0.37473691, 0.01326930, 0.78317727],
[0.37638521, 0.01407636, 0.79471435],
[0.37771093, 0.01674823, 0.80582046],
[0.37869941, 0.02151387, 0.81643596],
[0.37934000, 0.02858112, 0.82650669],
[0.37962694, 0.03812858, 0.83598519],
[0.37955939, 0.04951968, 0.84483436],
[0.37914154, 0.06144082, 0.85302802],
[0.37838226, 0.07366794, 0.86055174],
[0.37729438, 0.08604353, 0.86740266],
[0.37589428, 0.09845369, 0.87358820],
[0.37420030, 0.11081496, 0.87912536],
[0.37223249, 0.12306544, 0.88403831],
[0.37001170, 0.13515937, 0.88835682],
[0.36755886, 0.14706339, 0.89211456],
[0.36489450, 0.15875385, 0.89534754],
[0.36203830, 0.17021480, 0.89809281],
[0.35900949, 0.18143572, 0.90038753],
[0.35582553, 0.19241145, 0.90226812],
[0.35250263, 0.20314042, 0.90376962],
[0.34905654, 0.21362321, 0.90492566],
[0.34550110, 0.22386312, 0.90576779],
[0.34184894, 0.23386511, 0.90632553],
[0.33811203, 0.24363489, 0.90662650],
[0.33430129, 0.25317905, 0.90669631],
[0.33042661, 0.26250480, 0.90655861],
[0.32649697, 0.27161973, 0.90623512],
[0.32252050, 0.28053167, 0.90574576],
[0.31850454, 0.28924853, 0.90510876],
[0.31445576, 0.29777822, 0.90434077],
[0.31038061, 0.30612826, 0.90345722],
[0.30628532, 0.31430588, 0.90247243],
[0.30217392, 0.32231916, 0.90139844],
[0.29805278, 0.33017428, 0.90024789],
[0.29392464, 0.33787908, 0.89903036],
[0.28979522, 0.34543927, 0.89775663],
[0.28566704, 0.35286192, 0.89643473],
[0.28154364, 0.36015310, 0.89507278],
[0.27742935, 0.36731812, 0.89367887],
[0.27332615, 0.37436312, 0.89225902],
[0.26923690, 0.38129344, 0.89081940],
[0.26516428, 0.38811418, 0.88936560],
[0.26111077, 0.39483025, 0.88790274],
[0.25707865, 0.40144635, 0.88643542],
[0.25307008, 0.40796697, 0.88496784],
[0.24908704, 0.41439642, 0.88350381],
[0.24513141, 0.42073882, 0.88204676],
[0.24120493, 0.42699810, 0.88059982],
[0.23730925, 0.43317804, 0.87916579],
[0.23344590, 0.43928225, 0.87774724],
[0.22961633, 0.44531416, 0.87634645],
[0.22582190, 0.45127709, 0.87496549],
[0.22206389, 0.45717419, 0.87360621],
[0.21834349, 0.46300848, 0.87227029],
[0.21466236, 0.46878272, 0.87095954],
[0.21102119, 0.47449978, 0.86967506],
[0.20742080, 0.48016236, 0.86841794],
[0.20386210, 0.48577303, 0.86718919],
[0.20034666, 0.49133403, 0.86599021],
[0.19687454, 0.49684789, 0.86482134],
[0.19344637, 0.50231688, 0.86368323],
[0.19006344, 0.50774301, 0.86257688],
[0.18672570, 0.51312849, 0.86150244],
[0.18343350, 0.51847533, 0.86046025],
[0.18018787, 0.52378532, 0.85945104],
[0.17698826, 0.52906048, 0.85847458],
[0.17383532, 0.53430249, 0.85753132],
[0.17072887, 0.53951311, 0.85662124],
[0.16766875, 0.54469402, 0.85574432],
[0.16465509, 0.54984675, 0.85490071],
[0.16168705, 0.55497296, 0.85409003],
[0.15876483, 0.56007400, 0.85331248],
[0.15588710, 0.56515145, 0.85256749],
[0.15305387, 0.57020657, 0.85185517],
[0.15026358, 0.57524082, 0.85117488],
[0.14751579, 0.58025541, 0.85052658],
[0.14480878, 0.58525167, 0.84990960],
[0.14214173, 0.59023075, 0.84932376],
[0.13951263, 0.59519388, 0.84876836],
[0.13692028, 0.60014214, 0.84824310],
[0.13436243, 0.60507669, 0.84774724],
[0.13183741, 0.60999854, 0.84728036],
[0.12934276, 0.61490877, 0.84684171],
[0.12687634, 0.61980834, 0.84643072],
[0.12443548, 0.62469823, 0.84604664],
[0.12201756, 0.62957937, 0.84568878],
[0.11961967, 0.63445266, 0.84535639],
[0.11723878, 0.63931897, 0.84504866],
[0.11487170, 0.64417916, 0.84476479],
[0.11251507, 0.64903403, 0.84450394],
[0.11016531, 0.65388437, 0.84426522],
[0.10781885, 0.65873094, 0.84404775],
[0.10547170, 0.66357448, 0.84385054],
[0.10312014, 0.66841567, 0.84367273],
[0.10075970, 0.67325523, 0.84351320],
[0.09838664, 0.67809378, 0.84337110],
[0.09599597, 0.68293198, 0.84324519],
[0.09358395, 0.68777038, 0.84313463],
[0.09114522, 0.69260963, 0.84303810],
[0.08867599, 0.69745020, 0.84295474],
[0.08617051, 0.70229269, 0.84288313],
[0.08362511, 0.70713753, 0.84282243],
[0.08103357, 0.71198527, 0.84277110],
[0.07839237, 0.71683626, 0.84272829],
[0.07569525, 0.72169103, 0.84269246],
[0.07293844, 0.72654989, 0.84266263],
[0.07011617, 0.73141325, 0.84263735],
[0.06722404, 0.73628145, 0.84261542],
[0.06425764, 0.74115477, 0.84259561],
[0.06121130, 0.74603356, 0.84257635],
[0.05808247, 0.75091799, 0.84255664],
[0.05486570, 0.75580838, 0.84253483],
[0.05155846, 0.76070488, 0.84250968],
[0.04815913, 0.76560763, 0.84247998],
[0.04466405, 0.77051685, 0.84244395],
[0.04107468, 0.77543259, 0.84240043],
[0.03741096, 0.78035490, 0.84234809],
[0.03385282, 0.78528385, 0.84228537],
[0.03043137, 0.79021949, 0.84221065],
[0.02716948, 0.79516171, 0.84212275],
[0.02409051, 0.80011044, 0.84202021],
[0.02122047, 0.80506559, 0.84190153],
[0.01858815, 0.81002699, 0.84176524],
[0.01622412, 0.81499451, 0.84160961],
[0.01416537, 0.81996783, 0.84143342],
[0.01245080, 0.82494667, 0.84123518],
[0.01112317, 0.82993067, 0.84101341],
[0.01022949, 0.83491944, 0.84076663],
[0.00982139, 0.83991250, 0.84049339],
[0.00995551, 0.84490931, 0.84019224],
[0.01069215, 0.84990934, 0.83986145],
[0.01210010, 0.85491187, 0.83949971],
[0.01425466, 0.85991610, 0.83910577],
[0.01723795, 0.86492117, 0.83867832],
[0.02113724, 0.86992620, 0.83821555],
[0.02605147, 0.87493010, 0.83771618],
[0.03209157, 0.87993158, 0.83717944],
[0.03937186, 0.88492950, 0.83660298],
[0.04753062, 0.88992218, 0.83598667],
[0.05607917, 0.89490805, 0.83532877],
[0.06500234, 0.89988525, 0.83462804],
[0.07428800, 0.90485162, 0.83388399],
[0.08393083, 0.90980477, 0.83309565],
[0.09393285, 0.91474203, 0.83226231],
[0.10430211, 0.91966031, 0.83138359],
[0.11505191, 0.92455607, 0.83045962],
[0.12619999, 0.92942531, 0.82949010],
[0.13776962, 0.93426332, 0.82847628],
[0.14978894, 0.93906464, 0.82741934],
[0.16229169, 0.94382285, 0.82632213],
[0.17531808, 0.94853039, 0.82518803],
[0.18891548, 0.95317821, 0.82402273],
[0.20313976, 0.95775546, 0.82283421],
[0.21805616, 0.96224898, 0.82163430],
[0.23374218, 0.96664270, 0.82043899],
[0.25028497, 0.97091692, 0.81927329],
[0.26778032, 0.97504765, 0.81817391],
[0.28632927, 0.97900591, 0.81719338],
[0.30601898, 0.98275811, 0.81640936],
[0.32689118, 0.98626848, 0.81593233],
[0.34888332, 0.98950593, 0.81591102],
[0.37176267, 0.99245625, 0.81652140]]
# Create ListedColormap object for this colormap
cmap = ListedColormap(cm_data, name='cmr.cosmic', N=len(cm_data))
cmap_r = cmap.reversed()
# Register (reversed) cmap in MPL
register_cmap(cmap=cmap)
register_cmap(cmap=cmap_r)
|
[
"ellert_vandervelden@outlook.com"
] |
ellert_vandervelden@outlook.com
|
e459a8bed2ebf847119b43f03585274b9d6d301d
|
d8ce486989fe13074365a20b77743dc333b37b65
|
/python/paradrop/backend/exc/runtime.py
|
ff7817cab128621782d10a037dca7d526d2ffdfc
|
[] |
no_license
|
damouse/AnotherParadrop
|
a1ae8e8d91d54b091a1b8f4d564e4e6339e2c60f
|
7c3ca7a149b8e30ba11f6d6b6e4d953374bf8708
|
refs/heads/master
| 2021-01-17T06:52:55.933794
| 2016-07-11T01:10:00
| 2016-07-11T01:10:00
| 62,856,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
###################################################################
# Copyright 2013-2015 All Rights Reserved
# Authors: The Paradrop Team
###################################################################
from pdtools.lib.output import out
from paradrop.backend.exc import plangraph
from paradrop.lib import config
def generatePlans(update):
"""
This function looks at a diff of the current Chute (in @chuteStor) and the @newChute,
then adds Plan() calls to make the Chute match the @newChute.
Returns:
True: abort the plan generation process
"""
out.verbose("%r\n" % (update))
# Generate virt start script, stored in cache (key: 'virtPreamble')
update.plans.addPlans(plangraph.RUNTIME_GET_VIRT_PREAMBLE, (config.dockerconfig.getVirtPreamble, ))
# If the user specifies DHCP then we need to generate the config and store it to disk
update.plans.addPlans(plangraph.RUNTIME_GET_VIRT_DHCP, (config.dhcp.getVirtDHCPSettings, ))
update.plans.addPlans(plangraph.RUNTIME_SET_VIRT_DHCP, (config.dhcp.setVirtDHCPSettings, ))
# Reload configuration files
todoPlan = (config.configservice.reloadAll, )
abtPlan = [(config.osconfig.revertConfig, "dhcp"),
(config.osconfig.revertConfig, "firewall"),
(config.osconfig.revertConfig, "network"),
(config.osconfig.revertConfig, "wireless"),
(config.configservice.reloadAll, )]
update.plans.addPlans(plangraph.RUNTIME_RELOAD_CONFIG, todoPlan, abtPlan)
return None
|
[
"damouse007@gmail.com"
] |
damouse007@gmail.com
|
69e938ae8597f11320f9ad83b8e276f8401c8d4a
|
074dcf2274f2864710264db115b840c838b64ffd
|
/leet_0985_sum_of_even_numbers_after_queries.py
|
32e67b96e33cb9081f2f1c235692f83e8e231335
|
[] |
no_license
|
kkaixiao/pythonalgo2
|
212da2dac8d3a38fd06b0d1f5895cb9a768da43e
|
92b4b7c6b69d39bf79a9e20a9fc947304c2a1de5
|
refs/heads/master
| 2021-06-23T07:20:13.659422
| 2021-05-10T05:16:30
| 2021-05-10T05:16:30
| 224,455,645
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
"""
e have an array A of integers, and an array queries of queries.
For the i-th query val = queries[i][0], index = queries[i][1], we add val to A[index]. Then,
the answer to the i-th query is the sum of the even values of A.
(Here, the given index = queries[i][1] is a 0-based index, and each query permanently modifies
the array A.)
Return the answer to all queries. Your answer array should have answer[i] as the answer to the
i-th query.
Example 1:
Input: A = [1,2,3,4], queries = [[1,0],[-3,1],[-4,0],[2,3]]
Output: [8,6,2,4]
Explanation:
At the beginning, the array is [1,2,3,4].
After adding 1 to A[0], the array is [2,2,3,4], and the sum of even values is 2 + 2 + 4 = 8.
After adding -3 to A[1], the array is [2,-1,3,4], and the sum of even values is 2 + 4 = 6.
After adding -4 to A[0], the array is [-2,-1,3,4], and the sum of even values is -2 + 4 = 2.
After adding 2 to A[3], the array is [-2,-1,3,6], and the sum of even values is -2 + 6 = 4.
Note:
1 <= A.length <= 10000
-10000 <= A[i] <= 10000
1 <= queries.length <= 10000
-10000 <= queries[i][0] <= 10000
0 <= queries[i][1] < A.length
"""
class Solution:
def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]:
res = []
isEven = lambda x: x % 2 == 0
evenSum = sum(filter(isEven, A))
for query in queries:
numToAdd, idx = query[0], query[1]
if isEven(A[idx]):
evenSum -= A[idx]
A[idx] += numToAdd
if isEven(A[idx]):
evenSum += A[idx]
res.append(evenSum)
return res
|
[
"show-key@qq.com"
] |
show-key@qq.com
|
8a69e7b744e52b3f84dd74831ee475bb703574fe
|
baacad8007b78f105b41030fd48d643820142ba1
|
/hw-pca.py
|
22172f834d3d4ddc3c6bcb97c164458cb32c3d54
|
[] |
no_license
|
baoaya/point-cloud
|
b31ef7ca19eb3f2255876a0ed08fc0ccd67e5e6a
|
6c35cf38bfcfafbf25b5725ce2b13d0e8f0153c6
|
refs/heads/master
| 2023-03-21T03:21:56.844632
| 2021-03-07T11:42:07
| 2021-03-07T11:42:07
| 344,090,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
import numpy as np
import open3d as o3d
from matplotlib import pyplot as plt
# PCA
# x是点云矩阵,返回排序好的特征向量
def pca(x):
x_mean = np.mean(x, axis=0)
normalize_x = x - x_mean
normalize_x = normalize_x.T
h = normalize_x.dot(normalize_x.T)
eigen_values, eigen_vectors = np.linalg.eig(h)
return eigen_vectors
def hw_pca(x):
u = pca(x)
# 投影2维坐标
projection_matrix = (u.T[:][:2]).T
x_pca = x.dot(projection_matrix)
return x_pca
# 计算法向量
# x为点云矩阵,n为临近点个数
def hw_surface_normal(x, n):
point_cloud_o3d = o3d.geometry.PointCloud()
point_cloud_o3d.points = o3d.utility.Vector3dVector(point_cloud_o3d)
pcd_tree = o3d.geometry.KDTreeFlann(point_cloud_o3d) # 对点云建立kd树 方便搜索
normals = []
print(x.shape[0]) # 10000
for i in range(x.shape[0]):
# search_knn_vector_3d函数 , 输入值[每一点,x] 返回值 [int, open3d.utility.IntVector, open3d.utility.DoubleVector]
[_, idx, _] = pcd_tree.search_knn_vector_3d(point_cloud_o3d.points[i], n)
# asarray和array 一样 但是array会copy出一个副本,asarray不会,节省内存
k_nearest_point = np.asarray(point_cloud_o3d.points)[idx, :] # 找出每一点的10个临近点,类似于拟合成曲面,然后进行PCA找到特征向量最小的值,作为法向量
eigen_vectors = pca(k_nearest_point)
# 取最后的那个
normals.append(eigen_vectors[:, 2])
return normals
# 下采样
# x为点云矩阵
# r为Voxel Grid的大小
# take为取点方式,默认为random随机取点,centroid取中心点
def hw_downsampling(x, r, take="random"):
max = np.max(axis=0)
min = np.min(axis=0)
# x_min, y_min, z_min = min[0], min[1], min[2]
# x_max, y_max, z_max = max[0], max[1], max[2]
d = (max - min)/r
# dx, dy, dz = (x_max - x_min) / r, (y_max - y_min) / r, (z_max - z_min) / r
hash = {}
X
return
def main():
# 加载文件
x = np.loadtxt('/Users/jimmy/Desktop/DeepLearning/homework/1/data/airplane_0027.txt', delimiter=',')[:, 0:3]
x_pca = hw_pca(x)
# 显示
plt.scatter(x_pca[:, 0], x_pca[:, 1])
plt.show()
normals = hw_surface_normal(x, 10) # 10个临近点
ds_random = hw_downsampling(x, take="random")
ds_centroid = hw_downsampling(x, take="centroid")
return
if __name__ == "__main__":
main()
|
[
"wuhaojie@gmail.com"
] |
wuhaojie@gmail.com
|
1a8f7f2c0353ac2b624f1e8636896917b3309abb
|
d62e60b26fdb5373f72c2cf5ddcd7ad99b835f23
|
/eee/koans/about_control_statements.py
|
40b5444ef520f4ae763da2e1a9980d390e85d91f
|
[] |
no_license
|
glambertation/Paper
|
4cf6e605235eb0bbe7e6ebe24a916e5a34935e3d
|
7a4945490a53cc409641720621c7710b7ab9f2ae
|
refs/heads/master
| 2021-09-04T13:23:06.385324
| 2018-01-19T04:45:25
| 2018-01-19T04:45:25
| 105,601,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual('true value', result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual('true value', result)
def test_if_then_elif_else_statements(self):
if False:
result = 'first value'
elif True:
result = 'true value'
else:
result = 'default value'
self.assertEqual('true value', result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual([1,3,5,7,9], result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual(['FISH','AND','CHIPS'], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or European Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + \
"' Answer: '" + answer + "'")
text = __
self.assertMatch("Contestant: 'Robin' Answer: 'Blue! I mean Green!'", result[2])
self.assertNoMatch(text, result[0])
self.assertNoMatch(text, result[1])
self.assertNoMatch(text, result[3])
|
[
"songhaiyun@bytedance.com"
] |
songhaiyun@bytedance.com
|
33d4076089c5e19b3838fc02d813cc695a5951d3
|
9c8ab08d961dcc8087c34f90561f4e3d304f6bf8
|
/Exotag_SW_uploader/sw_uploader.py
|
a5a9a92cf05de66925c9fde56a7d175082c0d5ea
|
[] |
no_license
|
kdubovenko/eclipseRepo
|
230e249749175a7fb4e3e30308aca2ba378ff2d4
|
081eb845dc5e8328ed2b7454003506bcb0b79c16
|
refs/heads/master
| 2021-01-16T21:14:19.890046
| 2020-05-13T22:57:39
| 2020-05-13T22:57:39
| 100,223,402
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,865
|
py
|
#!/usr/bin/python
from __future__ import print_function
import sys, binascii, serial, time, getopt, io, struct
from intelhex import IntelHex
def main(argv):
inputfile = ''
port = ''
split = 1024
usage_string = 'huzzah_exotag_firmware.py -i <inputfile> -p <comm port:/dev/ttyUSB or \\\.\COM12>'
try:
opts, args = getopt.getopt(argv,"hi:p:s:",["file=","port=","split="])
except getopt.GetoptError:
print ()
sys.exit(2)
if not opts:
print ("no arguments provided ")
print (usage_string)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print (usage_string)
sys.exit()
elif opt in ("-i", "--file"):
inputfile = arg
elif opt in ("-p", "--port"):
port = arg
elif opt in ("-s", "--split"):
split = int(arg)
else:
print ("wrong arguments: " + opt)
print (usage_string)
print('input file: '+inputfile)
print('output port: '+port)
print('split in bytes: %d' % split)
if inputfile.endswith('.hex'):
ih = IntelHex() # create empty object
ih.padding=0xFF
ih.fromfile(inputfile,format='hex') # also load from hex
ascii_bin = binascii.hexlify(ih.tobinstr())
#sys.exit()
elif inputfile.endswith('.bin'): #reconsider the extension here, the routine actually handles ascii
with open(inputfile, 'r') as f:
read_data = f.read()
f.closed
read_data = read_data.rstrip('\r\n')
print (read_data)
print ([n for n in read_data])
ascii_bin = read_data
#binascii.unhexlify(read_data)
#sys.exit()
#print(ascii_bin)
crc_result=binascii.crc32(ascii_bin) & 0xffffffff
print("CRC = %08x" % crc_result)
#sys.exit()
exotag_uart = serial.Serial(port, 115200, timeout=1)
exotag_uart_io = io.TextIOWrapper(io.BufferedRWPair(exotag_uart, exotag_uart))
print('send start update: ' + unicode('exotag=update\r'))
exotag_uart_io.write(unicode('exotag=update'.rstrip()+'\r'))
exotag_uart_io.flush()
time.sleep(1)
#print (exotag_uart.readline())
print('write')
for i in range(0, len(ascii_bin), split):
print((ascii_bin[i:i + split]))
exotag_uart.write((ascii_bin[i:i + split]))
exotag_uart_io.flush()
time.sleep(1)
print( binascii.hexlify(struct.pack("<I", crc_result)) )
exotag_uart.write('!'.encode()+struct.pack("<I", crc_result))
exotag_uart_io.flush()
#time.sleep(1)
print('send end of firmware update')
exotag_uart_io.write(unicode('~'))
exotag_uart_io.flush()
print('read exotag messages')
exotag_string = exotag_uart_io.read()
print("***********************\r\n"+exotag_string+"\r\n***********************")
if (-1 != exotag_string.find('firmware update: SUCCESS')):
print('tell huzzah to push update to exotag')
exotag_uart_io.write(unicode('exotag=program'.rstrip()+'\r'))
exotag_uart_io.flush()
#time.sleep(10)
print('read exotag messages')
time.sleep(5)
exotag_string = exotag_uart_io.read()
print("***********************\r\n"+exotag_string+"\r\n***********************")
if (-1 != exotag_string.find('EFM8 flash write read-back verify success')):
print('EFM8 flash write read-back verify success - indicates SUCCESS')
print('reset exotag')
exotag_uart_io.write(unicode('exotag=reset'.rstrip()+'\r'))
exotag_uart_io.flush()
time.sleep(5)
exotag_string = exotag_uart_io.read()
print("***********************\r\n"+exotag_string+"\r\n***********************")
exotag_uart.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"kdubovenko@gmail.com"
] |
kdubovenko@gmail.com
|
058c007c891929098cb59e8d6ed13d88a8135a36
|
668188a66168ae01a84efa03a3a08d0618885e86
|
/classify/utils/d_process.py
|
353563784d96da27f4457ed499cd6518332d8600
|
[] |
no_license
|
Xie-JunWei/lstm-network
|
c59c939765c9362750922374083a827cf32a8671
|
3f4cc841e41e3b48a4cef427a5fe373355f38d1b
|
refs/heads/main
| 2023-02-26T02:20:53.668018
| 2021-02-07T14:13:55
| 2021-02-07T14:13:55
| 336,803,296
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
# 预处理 加随机噪声,归一化
import numpy as np
def dp(traj_,max_len_):
x, x_v, y, y_v ,z ,z_v= [],[],[],[],[],[]
ini = 0
# count = 1201
for line in traj_:
if ini==0:
x_0=line[0]
y_0=line[1]
z_0=line[2]
ini+=1
else:
rand_r = np.random.normal(0, 145, 3)
rand_v = np.random.normal(0, 23.2, 3)
np.random.shuffle(rand_r)
np.random.shuffle(rand_v)
# 添加随机噪声
# x.append((line[0]-x_0+rand_r[0])/10e3)
# y.append((line[1]-y_0+rand_r[1])/10e3)
# z.append((line[2]-z_0+rand_r[2])/10e3)
# x_v.append(line[3]+rand_v[0])
# y_v.append(line[4]+rand_v[1])
# z_v.append(line[5]+rand_v[2])
x.append((line[0] - x_0) / 10e5)
y.append((line[1] - y_0) / 10e5)
z.append((line[2] - z_0) / 10e5)
x_v.append(line[3] / 10e2)
y_v.append(line[4] / 10e2)
z_v.append(line[5] / 10e2)
max_len_=max_len_-1
if max_len_<0:
break
xnew, ynew, znew, x_vnew, y_vnew ,z_vnew=np.array(x), np.array(y),np.array(z),\
np.array(x_v),np.array(y_v),np.array(z_v)
new = np.transpose(np.vstack((xnew,ynew,znew,x_vnew,y_vnew,z_vnew)))
return new
class data_process(object):
def __init__(self, max_len):
self.max_len = max_len
def __call__(self, traj):
traj=dp(traj,self.max_len)
return traj
|
[
"noreply@github.com"
] |
Xie-JunWei.noreply@github.com
|
2aad664b1d1fb518045bd6f2279ab4277a41eb31
|
f2e3b1181c13bded5baaedda0813129838118971
|
/IOT-Web-Services/src/config.py
|
99f9f24404dd4126034502428ed1b7477644670f
|
[] |
no_license
|
zubairshakoorarbisoft/iot-api-usama
|
bf1627131c58a106139755d75e26a48de8a38219
|
d623745e35b24cf00f5d95389193045b16ef4f5a
|
refs/heads/main
| 2023-08-31T10:09:29.785934
| 2021-09-26T20:36:29
| 2021-09-26T20:36:29
| 410,654,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from pydantic import BaseSettings
class Settings(BaseSettings):
pgsql_host: str
pgsql_dns: str
pgsql_port: int
pgsql_user: str
pgsql_password: str
pgsql_db_name: str
class Config:
env_prefix = ''
env_file = '../.env'
settings = Settings()
|
[
"noreply@github.com"
] |
zubairshakoorarbisoft.noreply@github.com
|
2b17eb37f580aae31898a48853bef496335a74aa
|
bfe3c294db999eb46af04f1c3c6451c974a4a7eb
|
/PyPoll/main.py
|
16d95d67b9b898a4e524d21261092d068e2c12c5
|
[] |
no_license
|
diazdez/Python-Challenge
|
5fb06331a4edae06254169e3396496ab09e19d93
|
1a753dd82a91ca52563fb0d4fd9422dd68865e15
|
refs/heads/master
| 2022-12-22T10:16:40.641322
| 2020-09-22T21:51:54
| 2020-09-22T21:51:54
| 296,190,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
# PYPOLL: Homework
# PyPoll CSV File: election_data.csv
# CSV has a header with 3 categories: "Voter ID", "County" & “Candidate”
# The total number of votes cast
# A complete list of candidates who received votes
# The percentage of votes each candidate won
# The total number of votes each candidate won
# The winner of the election based on popular vote.
# Put together final script should both print the analysis to the terminal
# Export a text file with the analysis/results.
# Identify Variables
vote_count = 0
candidates_with_votes = []
candidate_vote_num = [0,0,0,0]
pct_of_votes = []
# create file path across Operating Systems (os)
# module for reading the csv file
import os
import csv
# file path
csvpath = os.path.join('Resources', 'election_data.csv')
# open/read file
with open(csvpath) as csvfile:
csvreader= csv.reader(csvfile, delimiter=',')
csvheader = next(csvreader)
for row in csvreader:
# number of rows equals the number of votes:
vote_count = vote_count + 1
# Candidates are listed in Column 3 (Index = 2)
listed_candidate = (row[2])
# Need to find the different candidates in the Column 3
# Need to find the vote count for each candidate
if listed_candidate not in candidates_with_votes:
#index the candidate to the list
candidates_with_votes.append(listed_candidate)
else:
pass
# print(candidates_with_votes) = found a total of 4 different types of candidates
#increase vote count by 1
with open(csvpath) as csvfile:
csvreader= csv.reader(csvfile, delimiter=',')
csvheader = next(csvreader)
for row in csvreader:
if row[2] == candidates_with_votes[0]:
candidate_vote_num[0] = candidate_vote_num[0] +1
elif row[2] == candidates_with_votes[1]:
candidate_vote_num[1] = candidate_vote_num[1] +1
elif row[2] == candidates_with_votes[2]:
candidate_vote_num[2] = candidate_vote_num[2] +1
elif row[2] == candidates_with_votes[3]:
candidate_vote_num[3] = candidate_vote_num[3] +1
#print(candidate_vote_num)
high_vote_count = 0
votecount_index = 0
for x in range(len(candidates_with_votes)):
#calculate the percentage of votes received per candidate: (CandidateVote#/TotalVote#)*100
pct = round(candidate_vote_num[x]/vote_count*100, 3)
pct_of_votes.append(pct)
if candidate_vote_num[x] > high_vote_count:
high_vote_count = candidate_vote_num[x]
votecount_index = x
winner = candidates_with_votes[votecount_index]
# print(winner)
# RESULTS
print()
print("Election Results")
print("-------------------------")
print("Total Votes: " + str(vote_count))
print("-------------------------")
for x in range(len(candidates_with_votes)):
print(str(candidates_with_votes[x]) + ": " +str(pct_of_votes[x])+"%" + " "+ "(" +str(candidate_vote_num[x]) +")")
print("-------------------------")
print("Winner: " + (winner))
print("-------------------------")
# # save output file path as text
pypoll_output_file = os.path.join("..", "pypoll_output.txt")
# # open the output file with write mode
with open(pypoll_output_file,"w") as text:
text.write(("Election Results")+ '\n')
text.write(("-------------------------")+ '\n')
text.write(("Total Votes: " + str(vote_count))+ '\n')
text.write(("-------------------------")+ '\n')
for x in range(len(candidates_with_votes)):
text.write((str(candidates_with_votes[x]) + ": " +str(pct_of_votes[x])+"%" + " "+ "(" +str(candidate_vote_num[x]) +")") + '\n')
text.write((" ")+ '\n')
text.write(("-------------------------")+ '\n')
text.write(("Winner: " + (winner)) + '\n')
text.write(("-------------------------")+ '\n')
|
[
"69435783+diazdez@users.noreply.github.com"
] |
69435783+diazdez@users.noreply.github.com
|
0b9ad678fafff5f6858e34778dcf49f9e2da81f1
|
dcf1478d7a3c2338f5daa04240ac6e90f6e6cce2
|
/application/routes.py
|
5af5ae553545eb5cbaa1778edd19170c79cf9e0c
|
[] |
no_license
|
LeoKnox/flask_py
|
555968ad90cb092e6ba383cb76c47931f8d49c98
|
68e8cc9159584bab33220177dc2003c699489095
|
refs/heads/master
| 2023-03-25T08:21:37.124324
| 2020-05-25T05:50:58
| 2020-05-25T05:50:58
| 260,389,671
| 0
| 0
| null | 2021-03-20T03:40:42
| 2020-05-01T05:53:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
from application import app, db, mycol
from application.models import Room
from application.forms import CreateRoomForm
from flask import render_template, request
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html", index="active")
@app.route("/dungeon")
def dungeon():
info = Room.objects.all()
return render_template("dungeon.html", dungeon="active", info=info)
@app.route("/room/", methods=["GET", "POST"])
@app.route("/room/<room_name>", methods=["GET", "POST"])
def room(room_name="Entry"):
if not room_name:
room_name="Entry"
room_data = Room.objects.get(room_name=room_name)
form = CreateRoomForm()
if form.validate_on_submit():
room_name = form.room_name.data
length = form.length.data
width = form.width.data
pos_x = form.pos_x.data
pos_y = form.pos_y.data
room = Room(room_name=room_name,length=length,width=width,pos_x=pos_x,pos_y=pos_y)
Room.objects(room_name=room_name).update(pos_y=6)
return render_template("room.html", room="active", info=room_data, form=form)
@app.route("/map")
def map():
x = 'Entry'
y = {'room_name':x}
newvalues = {"$set": {"pos_x":5,"pos_y":5,"doors":{"wall":1,"position":3}}}
z = mycol.update_many(y,newvalues)
return render_template("map.html", map="active")
@app.route("/monsters")
def monsters():
return render_template("monsters.html", monsters="active")
@app.route("/treasure")
def treasure():
return render_template("treasure.html", treasure="active")
|
[
"noreply@github.com"
] |
LeoKnox.noreply@github.com
|
a6db7fdd687e1c3b4a48e8e4f97344c121c8d0c1
|
3eb0d7197bbbb5f4677ce30620eda571decedf12
|
/pygbif/registry/__init__.py
|
00e3d4ea2ee4eabc0c402754a4534355c57f24d9
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
faroit/pygbif
|
f38e34571639f8135b15c749da51778ff36c0d01
|
4bfdb7bc87c372c59f1f0e81843a18585a3056f4
|
refs/heads/master
| 2023-01-22T11:10:40.686527
| 2020-09-29T23:41:18
| 2020-09-29T23:41:18
| 297,910,320
| 0
| 0
|
MIT
| 2020-09-23T08:53:46
| 2020-09-23T08:53:45
| null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
"""
GBIF registry APIs methods
* `organizations`: Organizations metadata
* `nodes`: Nodes metadata
* `networks`: Networks metadata
* `installations`: Installations metadata
* `datasets`: Search for datasets and dataset metadata
* `dataset_metrics`: Get details/metrics on a GBIF dataset
* `dataset_suggest`: Search that returns up to 20 matching datasets
* `dataset_search`: Full text search across all datasets
"""
from .nodes import nodes
from .networks import networks
from .installations import installations
from .datasets import datasets, dataset_metrics, dataset_suggest, dataset_search
from .organizations import organizations
|
[
"myrmecocystus@gmail.com"
] |
myrmecocystus@gmail.com
|
afdb85dbdce3a8016eb733c7a9673913e4f8feb2
|
a6c12a2790c7fb7b032f43eeafb12a79138376c2
|
/app2/views.py
|
6aeedd991680e7f96036e4e6272d08673b6bd703
|
[] |
no_license
|
karthi-chala/pro22
|
12fbb735b23acfea1bc017c86a7f91dad9d4cb00
|
52b913c8f865cee63a73910910983f18c22b53f8
|
refs/heads/main
| 2023-03-31T11:56:13.018840
| 2021-04-03T12:24:28
| 2021-04-03T12:24:28
| 354,331,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('<h1>welcome to index of app2</h1>')
def sample(request):
return render(request,'app2/sample.html')
|
[
"karthi.sacred27@gmail.com"
] |
karthi.sacred27@gmail.com
|
6dd5ad61bfdc032115e548d265370f29b51aac01
|
6e51ff71f150ff964caaaba3af6550f8258e975a
|
/news_scraper/spiders/inside.py
|
9ea59a93668ebb232a848afc399fa679186f243b
|
[
"MIT"
] |
permissive
|
yungjr/news-scraper
|
289fbab2f2c37f515016f346ece8953c669b0fc3
|
f10c7a7f4c456ec950cd5f8c12ee202e2d0be793
|
refs/heads/main
| 2023-03-16T01:03:24.146905
| 2021-02-28T09:01:31
| 2021-02-28T09:01:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
import scrapy
class InsideSpider(scrapy.Spider):
name = 'inside'
allowed_domains = ['www.inside.com.tw']
start_urls = ['https://www.inside.com.tw/tag/ai']
count = 1 # 執行次數
def parse(self, response):
yield from self.scrape(response) # 爬取網頁內容
# 定位「下一頁」按鈕元素
next_page_url = response.xpath(
"//a[@class='pagination_item pagination_item-next']/@href")
if next_page_url:
url = next_page_url.get() # 取得下一頁的網址
InsideSpider.count += 1
if InsideSpider.count <= 3:
yield scrapy.Request(url, callback=self.parse) # 發送請求
def scrape(self, response):
# 爬取文章標題
post_titles = response.xpath(
"//h3[@class='post_title']/a[@class='js-auto_break_title']/text()"
).getall()
# 爬取發佈日期
post_dates = response.xpath(
"//li[@class='post_date']/span/text()"
).getall()
# 爬取作者
post_authors = response.xpath(
"//span[@class='post_author']/a/text()"
).getall()
for data in zip(post_titles, post_dates, post_authors):
NewsScraperItem = {
"post_title": data[0],
"post_date": data[1],
"post_author": data[2]
}
yield NewsScraperItem
|
[
"mikekuforgit@gmail.com"
] |
mikekuforgit@gmail.com
|
3b01306425299986ae69ee9a5f0da7376a8f7ff7
|
a920c8dfcbf789c37b0207987070b81512217136
|
/graph.py
|
6c7db432592baa9a3e3d1c4c20cc6ee7650e83da
|
[] |
no_license
|
carter-yagemann/BeijingAir
|
affc7cdfbaf53fc5d1ea3dcf42758219a97821df
|
c08061382f968569fad7d77090012be85f7d7deb
|
refs/heads/master
| 2021-01-11T21:51:35.778990
| 2015-03-10T04:05:34
| 2015-03-10T04:05:34
| 78,866,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
#!/usr/bin/python
'''
BeijingAir
graph.py
Copyright Carter Yagemann 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import matplotlib.pyplot as plt
#-----------------------------------------------------#
# Helper Functions #
#-----------------------------------------------------#
def usage():
print 'Usage: python graph.py <csv_input_data>'
############################################
## Main ##
############################################
if len(sys.argv) != 2:
usage()
sys.exit()
filename = sys.argv[1]
try:
file = open(filename, 'r')
y_array = []
x_array = []
# Generate array
while True:
nextline = file.readline();
# Have we hit EOF?
if nextline == '':
break
# Convert csv to array
parsedline = nextline.split(', ')
# Exclude averages, No Data, or unexpected lines
if len(parsedline) < 3:
continue
if 'No Data' in parsedline[2]:
continue
if '24hr avg' in parsedline[1]:
continue
# Passed validation, store data point
y_array.append(parsedline[2])
x_array.append(parsedline[0])
file.close()
# Display graph
plt.title('Beijing Air Quality\n' + x_array[0] + ' to ' + x_array[-1])
plt.plot(y_array)
plt.ylabel('PM2.5')
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
fig = plt.gcf()
fig.canvas.set_window_title('BeijingAir')
plt.show()
except KeyboardInterrupt:
print "\nCaught keyboard interrupt, exiting."
sys.exit()
except:
print "\nUnexpected Exception:", str(sys.exc_info()[1])
sys.exit()
|
[
"yager.code@gmail.com"
] |
yager.code@gmail.com
|
a9d30b879ccaabca5abc2e5f1cbc6ca98d86d3c3
|
9d39f6ec24ea355ee82adfd4487453172953dd37
|
/tao_detection_release/configs/baselines/untitled.py
|
8638d6ce39fea718525ea919f636993395a6144e
|
[
"Apache-2.0"
] |
permissive
|
feiaxyt/Winner_ECCV20_TAO
|
d69c0efdb1b09708c5d95c3f0a38460dedd0e65f
|
dc36c2cd589b096d27f60ed6f8c56941b750a0f9
|
refs/heads/main
| 2023-03-19T14:17:36.867803
| 2021-03-16T14:04:31
| 2021-03-16T14:04:31
| 334,864,331
| 82
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,591
|
py
|
_base_ = '../detectors/detectors_htc_r101_64x4d_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='DetectoRS_ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='open-mmlab://resnext101_64x4d',
style='pytorch')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1230,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1230,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1230,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1230,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1230,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1230,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.0001,
nms=dict(type='nms', iou_thr=0.5),
# LVIS allows up to 300
max_per_img=300,
mask_thr_binary=0.5)
)
# dataset settings
dataset_type = 'LVISDataset'
data_root = 'data/lvis/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True, poly2mask=False),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_train.json',
seg_prefix=data_root + 'stuffthingmaps/train2017/',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 19])
total_epochs = 20
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"feiaxyt@163.com"
] |
feiaxyt@163.com
|
701944f9a0da6fa0588f63bf6e177d303b98160d
|
ded10c2f2f5f91c44ec950237a59225e8486abd8
|
/.history/2/matrix_squaring_20200420170220.py
|
c2d708f28a6dde0df64200c0ef45ffffb76e08f1
|
[] |
no_license
|
jearistiz/Statistical-Physics-Projects
|
276a86407b32ded4e06b32efb2fadbd8eff8daed
|
d9c5b16a50856e148dc8604d92b6de3ea21fc552
|
refs/heads/master
| 2022-11-05T03:41:23.623050
| 2020-06-28T06:36:05
| 2020-06-28T06:36:05
| 254,909,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,545
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción. Debe ser función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
# Valor de la discretización de posiciones según x_max y nx dados como input
dx = 2. * x_max / (nx - 1)
# Lista de valores de x teniendo en cuenta discretización y x_max
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
# Construcción de matriz densidad dada por aproximación de Trotter
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteración se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteración inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) está a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteración.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de rho, ésta es equivalente a la función
partición a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretixación de las posiciones
dx = grid_x[1] - grid_x[0]
# Cálculo del valor de beta_fin según valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Imprime infromación relevante
if print_steps:
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
# Itera algoritmo matrix squaring
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime información relevante
if print_steps:
print(u'Iteración %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
if print_steps:
print('----------------------------------------------------------------\n' +
u'beta_fin = %.3f'%beta_fin)
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_csv(data, data_headers=None, file_name='file.csv', relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serán las columnas de un archivo CSV que se guardará con
nombre file_name.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada lista es una columna del archivo.
data_headers: numpy array, shape=(nx,) -> nombres de las columnas
file_name: str -> nombre del archivo en el que se guardarán datos.
relevant_info: list of str -> información que se agrega como comentario en
primeras líneas. Cada elemento de esta lista
se agrega como una nueva línea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamación relevante en primera línea.
"""
# Almacena datos de probabilifad en diccionario: grid_x para posiciones y x_weights para
# valores de densidad de probabilidad.
if file_name=='file.csv':
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
file_name = script_dir + '/' + 'file_name'
if len(data_headers)!=len(data) or data_headers is None:
data_headers = range(len(data))
print( 'Nota: no hay suficientes headers en data_headers para función save_csv().\n'+
'Los headers usados en el archivo serán los números 0, 1, 2,...')
data_dict = {}
for i,column in enumerate(data):
data_dict[data_headers[i]] = column
# Pasamos datos a formato DataFrame de pandas.
data_pdDF = pd.DataFrame(data=data_dict)
# Crea archivo .csv y agrega comentarios relevantes dados como input
if relevant_info is not None:
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo en formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, file_name=None, relevant_info=None,
plot=True, save_plot=True, show_plot=True):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteración se usa una matriz densidad en aproximación de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteración
inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador
armónico cuántico.
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados.
N_iter: int -> número de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacción usado en aproximación de trotter. Debe
ser función de x.
potential_string: str -> nombre del potencial (con éste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
"""
# Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring( rho, grid_x, N_iter,
beta_ini, print_steps )
print( '----------------------------------------------------------------\n' +
u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2) +
u' N_iter = %d Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(N_iter,trace_rho))
# Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo .csv.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data==True:
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
if file_name is None:
file_name = script_dir+u'/pi_x-ms-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
else:
file_name = script_dir + u'/pi_x-ms-' + file_name +'.csv'
# Información relevante para agregar como comentario al archivo csv.
if relevant_info is None:
relevant_info = [ 'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin ]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = [grid_x.copy(),x_weights.copy()]
pi_x_data_headers = ['position_x','prob_density']
pi_x_data = save_csv(pi_x_data,pi_x_data_headers,file_name,relevant_info,print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
if file_name is None:
plot_name = script_dir+u'/pi_x-ms-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'%(potential_string,x_max,nx,N_iter,beta_fin)
else:
plot_name = script_dir+u'/pi_x-ms-plot-'+file_name+'.eps'
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return rho, trace_rho, grid_x
def Z_several_values( temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
Z_file_name = 'Z.csv', relevant_info = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, plot=False, save_plot=False, show_plot=False ):
"""
"""
beta_max = 1./temp_min
beta_min = 1./temp_max
N_temp = int(N_temp)
beta_array = np.linspace(beta_max,beta_min,N_temp)
Z = []
for beta_fin in beta_array:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter(x_max=x_max, nx=nx, N_iter=N_iter, beta_fin=beta_fin,
potential=potential, potential_string=potential_string,
print_steps=print_steps, save_data=save_pi_x_data, plot=plot,
save_plot=save_plot, show_plot=show_plot)
Z.append(trace_rho)
Z_data = [beta_array.copy(),1./beta_array.copy(),Z.copy()]
if Z_file_name == 'Z.csv':
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
Z_file_name = script_dir + '/' + Z_file_name
if save_Z_csv == True:
Z_data_headers = ['beta','temperature','Z']
Z_data = save_csv( Z_data, Z_data_headers, relevant_info=relevant_info,
file_name = Z_file_name, print_data = False )
if print_Z_data == True:
print(Z_data)
return Z_data
# Agranda letra en texto de figuras generadas
plt.rcParams.update({'font.size':15})
# Corre algoritmo matrix squaring
run_algorithm = True
# Parámetros físicos del algoritmo
x_max=5.
nx=201
N_iter=7
beta_fin=4
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Parámetros técnicos
print_steps=True
save_data=True
file_name=None
relevant_info=None
plot=True
save_plot=True
show_plot=True
if run_algorithm:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
print_steps, save_data, file_name, relevant_info, plot,
save_plot, show_plot)
# Borrador: cálculo de energía interna
calculate_avg_energy = False
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
Z_file_name = script_dir+'/'+'partition-function-test-2.csv'
temp_min = 1./10
temp_max = 1./2
N_temp = 10
if calculate_avg_energy:
t_0 = time()
Z_data = Z_several_values()
t_1= time()
print('<E(beta)> --> %.3f sec.'%(t_1-t_0))
# READ DATA IS OK
Z_file_name = script_dir+'/'+'partition-function-test-2.csv'
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
beta_read = Z_file_read['beta']
beta_read = beta_read.to_numpy()
temp_read = Z_file_read['temperature']
temp_read = temp_read.to_numpy()
Z_read = Z_file_read['Z']
Z_read = Z_read.to_numpy()
E_avg = np.gradient(-np.log(Z_read),beta_read)
def Z_QHO(beta):
return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
return 0.5/np.tanh(0.5*beta)
plt.figure()
plt.plot(temp_read,E_avg,label=u'$< E > Path Integral$')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$< E > theory$')
plt.plot(temp_read,Z_read,'v-',label=u'$ Z(T) $')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$< E >$ or $Z(T)$')
plt.show()
plt.close()
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
11f52d314437ea29e602494b2f3a7ab5684aebbd
|
9681736f96d9fd8e402600272f8a8b58fce20694
|
/melspec.py
|
aaac9b56fe678e438c386c2a1d8d106199c354de
|
[] |
no_license
|
akio-kobayashi/audio_processing_pt2
|
b988b7d4e553cc84eb83627d101781f5742d7ad3
|
7eaa2d2e1b4d1f33dbbb280c4f89546f300a61a7
|
refs/heads/master
| 2022-11-22T09:22:18.411992
| 2020-07-17T03:35:45
| 2020-07-17T03:35:45
| 276,017,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,237
|
py
|
#!/usr/bin/python3
import os
import numpy as np
import scipy
import h5py
import re
import csv
import librosa
import argparse
def compute_melspec(signal):
melspec=librosa.feature.melspectrogram(signal, sr=16000, S=None, n_fft=512, hop_length=200, win_length=400,
window='hamming', center=True, pad_mode='reflect', power=2.0, n_mels=40)
melspec = np.log(melspec+1.0e-8)
return melspec
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--csv', type=str, required=True)
parser.add_argument('--train', type=str, required=True)
parser.add_argument('--valid', type=str, required=True)
args = parser.parse_args()
data={}
keys=[]
with open(args.csv) as fp:
csv_file = open(args.csv, "r")
df = csv.DictReader(csv_file)
for row in df:
keys.append(row['filename'])
data[row['filename']] = row
counts={}
for n in range(5):
counts[n]=0
with h5py.File(args.valid, 'w') as valid:
with h5py.File(args.train, 'w') as train:
for key in keys:
if data[key]['category'] == 'cat':
label=0
elif data[key]['category'] == 'cow':
label=1
elif data[key]['category'] == 'dog':
label=2
elif data[key]['category'] == 'frog':
label=3
elif data[key]['category'] == 'pig':
label=4
else:
continue
counts[label] += 1
path=os.path.join('./ESC-50-master/audio',data[key]['filename'])
wav,sr=librosa.load(path)
mels=compute_melspec(wav)
if counts[label] > 30:
valid.create_group(key)
valid.create_dataset(key+'/feature', data=mels)
valid.create_dataset(key+'/label', data=label)
else:
train.create_group(key)
train.create_dataset(key+'/feature', data=mels)
train.create_dataset(key+'/label', data=label)
if __name__ == "__main__":
main()
|
[
"a-kobayashi@a.tsukuba-tech.ac.jp"
] |
a-kobayashi@a.tsukuba-tech.ac.jp
|
72e0e6a314371822134735a4588f6f9f0bd8ebb6
|
dabc28b01defdec0dd1978f39486df000ebc8c23
|
/iptFilter-range.py
|
9fc74add06d32ce20e61a00915655cbb7a21b224
|
[] |
no_license
|
jaberansariali/my-iptables-python
|
db10e92be3298cdb4df67f6d84ce4d892a1db0a8
|
0ff34f8c535e280f555ff9e3949e0937e2f0a1a5
|
refs/heads/master
| 2020-12-06T19:50:23.019142
| 2020-01-11T11:34:07
| 2020-01-11T11:34:07
| 232,537,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,907
|
py
|
#!/usr/bin/python3.6
##############################################################################################################################
######### WRITE BY A.ANSARI ##
######### ##
##############################################################################################################################
##########Do not change any thing ###########################
import iptc
import sys
############################################Discription###################################
# This the input parameter 1#Ch:input/output 2#Po:tcp/udp/all 3#: interface/all 4#S:ip/subnet or all 5#D:ip/subnet or all 6# source range ip 7# destination range ip 6#SP:portnumber/all 7#DP:portnumber/all 8#ACCEPT/DROP
if len(sys.argv) > 11:
print('You have specified too many arguments')
sys.exit()
if len(sys.argv) < 11:
print('You need to specify the path to be listed')
sys.exit()
Chain = sys.argv[1]
protocol = sys.argv[2]
interface = sys.argv[3]
source = sys.argv[4]
destination = sys.argv[5]
source_Range = sys.argv[6]
destination_Range = sys.argv[7]
source_port = sys.argv[8]
destination_port = sys.argv[9]
targets = sys.argv[10]
#print(len(sys.argv))
def drop():
# chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), Chain)
# rule = iptc.Rule()
# rule.protocol = protocol
# rule.in_interface = interface
# rule.add_match(match)
# match = iptc.Match(rule, "iprange")
# match.src_range = "192.168.1.100-192.168.1.200"
# match.dst_range = "172.22.33.106"
# rule.add_match(match)
# target = iptc.Target(rule, targets)
# rule.target = target
# chain.insert_rule(rule)
rule = iptc.Rule()
rule.protocol = protocol
match = iptc.Match(rule, protocol)
match.sport = source_port
match.dport = destination_port
rule.add_match(match)
match = iptc.Match(rule, "iprange")
match.src_range = source_Range
match.dst_range = destination_Range
rule.src = source
rule.dst = destination
rule.add_match(match)
rule.target = iptc.Target(rule, targets)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), Chain)
chain.insert_rule(rule)
def allowLoopback():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.in_interface = "lo"
target = iptc.Target(rule, "ACCEPT")
rule.target = target
chain.insert_rule(rule)
def allowEstablished():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'INPUT')
rule = iptc.Rule()
match = rule.create_match('state')
match.state = "RELATED,ESTABLISHED"
rule.target = iptc.Target(rule, 'ACCEPT')
chain.insert_rule(rule)
drop()
#allowLoopback()
#allowEstablished()
#print (sys.argv[1])
#print (sys.argv[2])
|
[
"a.ansari@fwutech.com"
] |
a.ansari@fwutech.com
|
3c4f13f5bdab54fff953eb3c1894c035d1da94a8
|
958769d6794a840643c1d7a016d811118edfe494
|
/Test/event.py
|
c8cace4645560d3ecb691d0b257ee902313c48bd
|
[] |
no_license
|
d-schmitt/Rostersimulation
|
9a3ccd1d2ec0b4e63b9058a8985b6a1ec770bcaa
|
cb58c4be25403b9c41b0c55bcaf83138b1402f17
|
refs/heads/master
| 2021-09-04T00:58:01.610131
| 2018-01-13T17:35:24
| 2018-01-13T17:35:24
| 109,983,162
| 0
| 0
| null | 2018-01-03T12:25:07
| 2017-11-08T14:01:01
|
Python
|
UTF-8
|
Python
| false
| false
| 10,012
|
py
|
from datetime import datetime, timedelta
from random import randint
#--------------------------------------------------------------------------------------------------------------------------------
# Erklaerung zu den verschiedenen Stati
# working: der MA ist gerade im Dienst
# none: der MA hat frei und steht - wenn keine Restriktionen dadurch verletzt werden - fuer das Rescheduling bereit
# vacation: der MA hat Urlaub, damit steht er nicht fuer das Rescheduling bereit
# sick: der MA ist krank und steht nicht fuer das Rescheduling bereit, waehrend er den Status sick hat, kann er die vorher
# festgelegten Schichten nicht antreten und es muss ein Rescheduling stattfinden.
#--------------------------------------------------------------------------------------------------------------------------------
# Ereignisklasse fuer die Simulation
class Event(object):
# Konstruktor
def __init__(self, eDate):
self.date = eDate # Datum, wann das Ereignis terminiert ist
#---------------------------------------------------------------------------------------------------------------------------------
# Abschnitt: Schichtbeginn & -Ende
#---------------------------------------------------------------------------------------------------------------------------------
# Spezialereignis Schichtbeginn
class shiftBegin(Event):
# Konstruktor
def __init__(self, r, currentTime):
eTime = self.terminateEvent(r, currentTime)[0]
self.type = self.terminateEvent(r, currentTime)[1]
Event.__init__(self, eTime) # Attribut an Superklasse weiterreichen
self.sType = self.setShiftType(eTime, r) # Schichtart bestimmen
# Funktion zur Bestimmung der Schichtart
def setShiftType(self, dt, r):
for st in r.shiftTypes:
if datetime.strptime(st["startTime"], '%H:%M').hour == dt.hour:
return st["name"]
# Funktion zur Terminierung des naechsten Schichtbeginns
def terminateEvent(self, r, currentTime):
nextShift = r.getNextStartTime(currentTime.hour)
nextShiftTime = nextShift[0] # Startzeit der naechsten Schicht
currentDay0 = currentTime - timedelta(hours=currentTime.hour, minutes=currentTime.minute) # Zeit auf 0 Uhr setzen
nextShiftDateTime = currentDay0 + timedelta(days=nextShift[2], hours=int(nextShiftTime[0:2]), minutes=int(nextShiftTime[3:5])) # Startzeit addieren
return(nextShiftDateTime, nextShift[1])
# Aenderung der Zustandsvariablen der einzelnen MItarbeiter, die durch den Schichtbeginn ausgeloest wird
def changeState(self, r, log_file):
#print("Zustandsaenderungen Schichtbeginn " + self.sType + ":")
workingList = r.getWorkingEmployees(self.date, self.sType) # Liste der Mitarbeiter
alertMA = 999
for e in workingList:
if e.state != "sick":
r.changeEmployeeState(e.fName + " " + e.lName, "working")
else:
alertMA = e.eID
r.printStates(log_file, alertMA)
return(r)
# Spezialereignis: Schichtende
class shiftEnd(Event):
# Konstruktor
def __init__(self, sType, beginnTime, r):
self.type = sType
self.beginnTime = str(beginnTime)
eTime = self.terminateEvent(r)
Event.__init__(self, eTime) # Attribut an Superklasse weiterreichen
self.sType = self.setShiftType(eTime, r) # Schichtart bestimmen
# Funktion zur Bestimmung der Schichtart
def setShiftType(self, dt, r):
for st in r.shiftTypes:
if datetime.strptime(st["endTime"], '%H:%M').hour == dt.hour:
return st["name"]
def terminateEvent(self, r):
endTime = r.getEndByStart(self.type)
beginTimeForm = datetime.strptime(self.beginnTime, '%Y-%m-%d %H:%M:%S')
currentDay0 = beginTimeForm - timedelta(hours=int(beginTimeForm.hour), minutes=int(beginTimeForm.minute)) # Zeit auf 0 Uhr setzen
nextEndDateTime = currentDay0 + timedelta(hours=int(endTime[0:2]), minutes=int(endTime[3:5])) # Startzeit addieren
return(nextEndDateTime)
# Aenderung der Zustandsvariablen der einzelnen Mitarbeiter, die durch das Schichtende ausgeloest wird
def changeState(self, r, log_file):
alertMA = 999
#print("Zustandsaenderungen Schichtende " + self.sType + ":")
beginTimeForm = datetime.strptime(self.beginnTime, '%Y-%m-%d %H:%M:%S')
workingList = r.getWorkingEmployees(beginTimeForm, self.sType) # Liste der Mitarbeiter
# TODO: Arbeitsstundenberechnung anpassen
for e in workingList:
if e.state != "sick":
r.changeEmployeeState(e.fName + " " + e.lName, "none")
for st in r.shiftTypes:
if st["name"] == self.sType:
e.hoursWorked = e.hoursWorked + st["workingHours"]
log_file.write(str(e.fName) + ": +" + str(st["workingHours"]) + " Arbeitsstunden\n")
r.printStates(log_file, alertMA)
return(r)
#---------------------------------------------------------------------------------------------------------------------------------
# Abschnitt: Urlaubsbeginn & -Ende
#---------------------------------------------------------------------------------------------------------------------------------
# Spezialereignis: Urlaubsbeginn
class vacationBegin(Event):
# Konstruktor
def __init__(self, r, currentTime):
eTime, employees = self.terminateEvent(r, currentTime)
Event.__init__(self, eTime)
self.employees = employees
# Zeitpunkt des Eregnisses terminieren
def terminateEvent(self, r, currentTime):
for key, value in r.vacationBegin.items():
if key >= currentTime:
return(key + timedelta(minutes=30), value)
return("none","none")
# Aenderung der Zustandsvariablen der, die durch den Urlaubsbeginn ausgeloest werden
def changeState(self, r, log_file):
alertMA = 999
for e in self.employees:
r.changeEmployeeState(e, "vacation")
r.printStates(log_file, alertMA)
return(r)
# Spezialereignis: Urlaubsende
class vacationEnd(Event):
# Konstruktor
def __init__(self, r, currentTime):
eTime, employees = self.terminateEvent(r, currentTime)
Event.__init__(self, eTime)
self.employees = employees
# Zeitpunkt des Eregnisses terminieren
def terminateEvent(self, r, currentTime):
for key, value in r.vacationEnd.items():
if key >= currentTime:
return(key + timedelta(hours=1), value)
return("none","none")
# Aenderung der Zustandsvariablen der, die durch den Urlaubsbeginn ausgeloest werden
def changeState(self, r, log_file):
alertMA = 999
for e in self.employees:
r.changeEmployeeState(e, "none")
r.printStates(log_file, alertMA)
return(r)
#---------------------------------------------------------------------------------------------------------------------------------
# Abschnitt: Krankheitsbeginn, Re-Scheduling & Krankheitsende
#---------------------------------------------------------------------------------------------------------------------------------
# Spezialereignis: Krankheitsbeginn
class illnessBegin(Event):
# Konstruktor
def __init__(self, r, currentTime, lastSick):
eTime, eEmployee = self.terminateEvent(r, currentTime, lastSick)
Event.__init__(self, eTime)
self. employee = eEmployee
self.duration = self.terminateDuration()
# Dauer der Krankheit festlegen
# TODO: besseren Algorithmus
def terminateDuration(self):
iTime = timedelta(days=randint(1,5))
return(iTime)
# Zeitpunkt des Ereignisses terminieren
def terminateEvent(self, r, currentTime, lastSick):
# durch lastSick wird sichergestellt, dass kein MA krank wird, der schon krank ist
iEmployee = r.determineIllness(lastSick)
iTime = r.determineIllnessTime(currentTime)
return(iTime, iEmployee)
# Zustandaenderung durchfuehren
def changeState(self, r, log_file, currentTime):
alertMA = 999
# Fehlermeldung drin lassen, bis die Ergebnisse der Simulation 100% passen
# Zu diesem Fall sollte es nie kommen
if self.employee.state == "sick":
print(self.employee.fName + " ist Waehrend der Krankheit nochmal krank geworden")
r.changeEmployeeState(self.employee.fName + " " + self.employee.lName, "sick")
r.printStates(log_file, alertMA)
r.addSickHours(self.employee.eID, currentTime, self.duration, log_file, currentTime)
return(r)
# Spezialereignis: Krankheitsende
class illnessEnd(Event):
# Konstruktor
def __init__(self, r, currentTime, employee, duration):
eTime = self.terminateEvent(r, currentTime, duration)
Event.__init__(self, eTime)
self. employee = employee
# Zeitpunkt des Ereignisses terminieren
def terminateEvent(self, r, currentTime, duration):
iTime = currentTime+duration-timedelta(hours=currentTime.hour, minutes=currentTime.minute)+timedelta(hours=23, minutes=30)
return(iTime)
# Zustandsaenderung durchfuehren
def changeState(self, r, log_file):
alertMA = 999
r.changeEmployeeState(self.employee.fName + " " + self.employee.lName, "none")
r.printStates(log_file, alertMA)
return(r)
|
[
"noreply@github.com"
] |
d-schmitt.noreply@github.com
|
3ccde3ab4eb46be77a8bb20983463e9784ae1ba4
|
6b98d916935f03ac8f75e52a174de73040602336
|
/chapter 7 python/Second2.py
|
625609071021a156550f36b666ffba245ee78c3b
|
[] |
no_license
|
AungKyawZaww9/python
|
61e014d4b2c5bd8fd675a59b50ca210dd410a384
|
627afbec42f358dfcd5909bc787029e3fb30391c
|
refs/heads/master
| 2023-06-22T00:28:48.825211
| 2021-07-25T19:16:15
| 2021-07-25T19:16:15
| 389,271,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from Second1 import Time
time1 = Time()
print("The initial military time is :",time1.printMilitary())
print("\nThe initial standard time is :",time1.printstandard())
time1.setTime( 7, 37, 40)
print("\n\nMilitary time after setime is :", time1.printMilitary())
print("\nStandard time after settime is :", time1.printstandard())
time1.setHour(4)
time1.setMinute(3)
time1.setSecond(6)
print("\nMilitary time after Set.Hour,Minute,Second :",time1.printMilitary())
print("\n\nStandard tme after :", time1.printMilitary())
|
[
"www.aungkyawzaww1999@gmail.com"
] |
www.aungkyawzaww1999@gmail.com
|
52c9c49a2314745c804a0f7e444d3616000acd05
|
479f1d9530cd7a969063957d1ef55768f4b060dc
|
/modules/Server.py
|
5aee92a2ab1073e662eb70ba8bcaff2d0b081a69
|
[] |
no_license
|
kenanismayilov335/FavePorn
|
4b1e1287a429a844dcd772e72229f1fbccdf9872
|
fc8ca60f86b48a3c2211d95b6d7c168b49548f36
|
refs/heads/main
| 2023-08-29T07:25:25.021818
| 2021-10-16T12:39:48
| 2021-10-16T12:39:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
import datetime
#SERVER STATUS ATTRIBUTES
isTesting= False
sessions= {}
whiteList= [
'KarmaKunh'
]
blackList= [
]
#SERVER STATISTICS ATTRIBUTES
pics_sent= 0
video_sent= 0
user_count= 0
all_video_research= {}
all_users= []
session_reset_time= datetime.datetime.now()
user_count_resetDate= datetime.datetime.now()
video_sent_resetDate = datetime.datetime.now()
pics_sent_resetDate = datetime.datetime.now()
all_video_research_resetDate = datetime.datetime.now()
all_users_resetDate = datetime.datetime.now()
#SERVER STATISTICS METHODS
def session_count():
return len( sessions)
def add_to_whiteList( username):
whiteList.append( username)
def remove_from_whiteList( username):
try:
whiteList.remove( username)
except:
print("error")
pass
def add_to_blackList( username):
blackList.append( username)
def remove_from_blackList( username):
try:
blackList.remove( username)
except:
print("error")
pass
|
[
"noreply@github.com"
] |
kenanismayilov335.noreply@github.com
|
40298913bb95315380f176d6bd947da284ffad40
|
c101831acd67a49a91259bf44a508520ffd72b06
|
/Assignment3/domain.py
|
5420824c6c7ececfbfb0bc2312f8b321be18f075
|
[] |
no_license
|
MihaiSilinc/Artificial-intelligence
|
6744989f01c2fe44d3dea5429ac88ea38ac47bdc
|
7576fd6aa9eff1d75da61efa2aabb5dbf76d6f56
|
refs/heads/master
| 2023-05-30T10:05:48.715575
| 2021-06-18T13:49:46
| 2021-06-18T13:49:46
| 378,163,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,813
|
py
|
# -*- coding: utf-8 -*-
from random import *
from utils import *
import numpy as np
from heapq import nlargest
import itertools
# the glass gene can be replaced with int or float, or other types
# depending on your problem's representation
class gene:
def __init__(self):
# random initialise the gene according to the representation
self.__gene = choice([UP, DOWN, LEFT, RIGHT])
def get_direction(self):
return self.__gene
def set_direction(self, otherDirection):
if otherDirection not in [UP, DOWN, LEFT, RIGHT]:
raise Exception("Invalid direction!")
self.__gene= otherDirection
class Individual:
def __init__(self, size = 0):
self.__size = size
#chromosome
self.__chromozome = [gene() for i in range(self.__size)]
self.__fitness = None
def get_size(self):
return self.__size
def get_gene(self, genePosition):
if genePosition >= self.__size:
raise Exception("No gene!")
return self.__chromozome[genePosition]
def set_gene(self, genePosition, newGene):
if genePosition >= self.__size:
raise Exception("No gene!")
self.__chromozome[genePosition] = newGene
def get_chromosome(self):
return self.__chromozome
def set_chromosome(self, chromosome):
self.__chromozome = chromosome
def fitness(self, map, x, y):
# x, y represents the starting position of the drone.
posx, posy = x, y
copy_map = map.copy()
score = 0
score += copy_map.markVisible(x, y)
for gene in self.__chromozome:
direction = gene.get_direction()
if direction == UP:
posx = posx - 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (copy_map[posx][posy] == 1):
posx = posx + 1
continue
#score += copy_map.markVisible(posx, posy)
if direction == DOWN:
posx = posx + 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (copy_map[posx][posy] == 1):
posx = posx - 1
continue
#score += copy_map.markVisible(posx, posy)
if direction == LEFT:
posy = posy - 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (copy_map[posx][posy] == 1):
posy = posy + 1
continue
#score += copy_map.markVisible(posx, posy)
if direction == RIGHT:
posy = posy + 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (copy_map[posx][posy] == 1):
posy = posy - 1
continue
#score += copy_map.markVisible(posx, posy)
score += copy_map.markVisible(posx, posy)
self.__fitness = score
return self.__fitness
def mutate(self, mutateProbability = 0.04):
if random() < mutateProbability:
mutated_gene = randrange(self.__size)
self.__chromozome[mutated_gene].set_direction(choice([UP, DOWN, LEFT, RIGHT]))
# perform a mutation with respect to the representation
def crossover(self, otherParent, crossoverProbability = 0.7):
offspring1, offspring2 = Individual(self.__size), Individual(self.__size)
if random() < crossoverProbability:
border = randrange(0, self.__size)
for i in range(border):
offspring1.set_gene(i, self.get_gene(i))
offspring2.set_gene(i, otherParent.get_gene(i))
for j in range(border, self.__size):
offspring1.set_gene(j, otherParent.get_gene(j))
offspring2.set_gene(j, self.get_gene(j))
else:
offspring1.set_chromosome(self.get_chromosome())
offspring2.set_chromosome(otherParent.get_chromosome())
return offspring1, offspring2
class Population():
def __init__(self, chromozomeSize = 0, initialX=0, initialY=0, map = None):
self.__chromozomeSize = chromozomeSize # chromozome size
self.__individuals = []
self.__x = initialX
self.__y = initialY
self.map = map
self.__individuals_scores = {}
# for ind in self.__individuals:
# self.__individuals_scores[ind] = 0
self.__total = 0
self.__best = 0
self.__bestIndividual = None
def clear_individuals(self):
self.__individuals.clear()
self.__individuals_scores = {}
def evaluate(self):
# evaluates the population
self.__total = 0
self.__best = 0
self.__bestIndividual = None
for x in self.__individuals:
individual_score = x.fitness(self.map, self.__x, self.__y)
self.__individuals_scores[x] = individual_score
self.__total += individual_score
if individual_score > self.__best:
self.__best = individual_score
self.__bestIndividual = x
return self.__total, self.__best
def add_individuals_scores(self, individuals_scores):
# individuals_scores - dict with individuals and scores
for i in individuals_scores:
self.__individuals.append(i)
self.__individuals_scores[i] = individuals_scores[i]
if individuals_scores[i] >= self.__best:
self.__best = individuals_scores[i]
self.__bestIndividual = i
self.__total += individuals_scores[i]
def __len__(self):
return len(self.__individuals)
@property
def populationSize(self):
return len(self.__individuals)
@property
def average(self):
return self.__total / len(self.__individuals)
@property
def total(self):
return self.__total
@property
def best(self):
return self.__best
@property
def individuals(self):
return self.__individuals
@property
def individuals_with_scores(self):
return self.__individuals_scores
@property
def bestIndividual(self):
return self.__bestIndividual
def getStartingPosition(self):
return self.__x, self.__y
def get_chromozome_size(self):
return self.__chromozomeSize
def random_individuals(self, size):
# generate a population with given size
self.__individuals_scores = {}
self.__individuals = [Individual(self.__chromozomeSize) for i in range(size)]
self.evaluate()
def set_individuals(self, individuals):
# generate a population from list of individuals
self.__individuals_scores = {}
self.__individuals.clear()
for i in individuals:
if len(i.get_chromosome()) != self.__chromozomeSize:
raise Exception('Incompatible individuals!')
self.__individuals.append(i)
self.evaluate()
def selection(self, k = 0):
selected = set()
while(len(selected) != k):
individual = np.random.choice(self.__individuals, 1, False,
[(self.__individuals_scores[y] / self.__total) for y in self.__individuals])
selected.add(individual[0])
return selected
def bestK(self, k = 2):
a = nlargest(k, self.__individuals_scores, key=self.__individuals_scores.get)
x1 = []
x2 = []
for i in self.__individuals:
x1.append(self.__individuals_scores[i])
for i in a:
x2.append(self.__individuals_scores[i])
x1.sort(reverse=True)
print(x1)
print(x2)
print(len(self.__individuals))
print(len(self.__individuals_scores))
print('---------------')
return a
def filter(self, k):
# filter , keep the best individuals
filtered = self.bestK(k)
survivors = {}
for ind in filtered:
survivors[ind] = self.__individuals_scores[ind]
self.clear_individuals()
self.__best = 0
self.__total = 0
self.__bestIndividual = None
self.add_individuals_scores(survivors)
def find_optimal_solution(self):
genes = [gene(), gene(), gene(), gene()]
genes[0].set_direction(UP)
genes[1].set_direction(DOWN)
genes[2].set_direction(LEFT)
genes[3].set_direction(RIGHT)
ALL_CHROMOSOMES = itertools.product(genes, repeat=self.__chromozomeSize)
best_score = 0
best_individual = None
i = 0
for c in ALL_CHROMOSOMES:
i += 1
print(i)
chromosome = list(c)
print(chromosome)
ind = Individual(self.__chromozomeSize)
ind.set_chromosome(chromosome)
score = ind.fitness(self.map, self.__x, self.__y)
if score > best_score:
best_individual = ind
best_score = score
return best_individual.get_chromosome(), best_score
# a = [LEFT, RIGHT, UP, DOWN]
# x = itertools.product(a, repeat=10)
# i = 0
# for e in x:
# print(e)
# i += 1
# print(i)
class Map():
def __init__(self, n = 20, m = 20):
self.n = n
self.m = m
self.surface = np.zeros((self.n, self.m))
# creates a random map of given size
def randomMap(self, fill = 0.2, n = 20, m = 20):
self.n = n
self.m = m
self.surface = np.zeros((self.n, self.m))
for i in range(self.n):
for j in range(self.m):
if random() <= fill:
self.surface[i][j] = 1
else:
self.surface[i][j] = 0
def __getitem__(self, key):
return self.surface[key]
def get_size(self):
return self.n, self.m
def __str__(self):
string=""
for i in range(self.n):
for j in range(self.m):
string = string + str(int(self.surface[i][j]))
string = string + "\n"
return string
def copy(self):
copy = Map(self.n, self.m)
copy.surface = np.array(self.surface, copy=True)
return copy
def readUDMSensors(self, x,y):
readings=[0,0,0,0]
# UP
xf = x - 1
while ((xf >= 0) and (self.surface[xf][y] == 0)):
xf = xf - 1
readings[UP] = readings[UP] + 1
# DOWN
xf = x + 1
while ((xf < self.n) and (self.surface[xf][y] == 0)):
xf = xf + 1
readings[DOWN] = readings[DOWN] + 1
# LEFT
yf = y + 1
while ((yf < self.m) and (self.surface[x][yf] == 0)):
yf = yf + 1
readings[LEFT] = readings[LEFT] + 1
# RIGHT
yf = y - 1
while ((yf >= 0) and (self.surface[x][yf] == 0)):
yf = yf - 1
readings[RIGHT] = readings[RIGHT] + 1
return readings
def markVisible(self, x, y):
marked = 0
if self.surface[x][y] == 0:
marked += 1
self.surface[x][y] = 2
# UP
xf = x - 1
while ((xf >= 0) and (self.surface[xf][y] != 1)):
# add to the count if it wasn't marked previously
if self.surface[xf][y] == 0:
marked += 1
self.surface[xf][y] = 2
xf = xf - 1
# DOWN
xf = x + 1
while ((xf < self.n) and (self.surface[xf][y] != 1)):
# add to the count if it wasn't marked previously
if self.surface[xf][y] == 0:
marked += 1
self.surface[xf][y] = 2
xf = xf + 1
# LEFT
yf = y + 1
while ((yf < self.m) and (self.surface[x][yf] != 1)):
# add to the count if it wasn't marked previously
if self.surface[x][yf] == 0:
marked += 1
self.surface[x][yf] = 2
yf = yf + 1
# RIGHT
yf = y - 1
while ((yf >= 0) and (self.surface[x][yf] != 1)):
# add to the count if it wasn't marked previously
if self.surface[x][yf] == 0:
marked += 1
self.surface[x][yf] = 2
yf = yf - 1
return marked
# def image(self, colour=BLUE, background=WHITE):
# imagine = pygame.Surface((400, 400))
# brick = pygame.Surface((20, 20))
# destination = pygame.Surface((20, 20))
# roadGreedy = pygame.Surface((20, 20))
# roadAStar = pygame.Surface((20, 20))
# common_road = pygame.Surface((20, 20))
# brick.fill(BLUE)
# imagine.fill(WHITE)
# destination.fill(RED)
#
# for i in range(self.n):
# for j in range(self.m):
# if (self.surface[i][j] == 1):
# imagine.blit(brick, (j * 20, i * 20))
# if (self.surface[i][j] == 2):
# imagine.blit(destination, (j * 20, i * 20))
# if (self.surface[i][j] == 3):
# imagine.blit(roadGreedy, (j * 20, i * 20))
# if (self.surface[i][j] == 4):
# imagine.blit(roadAStar, (j * 20, i * 20))
# if (self.surface[i][j] == 5):
# imagine.blit(common_road, (j * 20, i * 20))
#
# return imagine
def get_neighbours(self, xi, yi):
possibilities = [(xi + 1, yi), (xi - 1, yi), (xi, yi + 1), (xi, yi - 1)]
# squares have coordinates between 0 and 19
first_cut = list(filter(lambda t: (0 <= t[0] <= 19 and 0 <= t[1] <= 19), possibilities))
return list(filter(lambda t: (self.surface[t[0]][t[1]] == 0 or self.surface[t[0]][t[1]] >= 2), first_cut))
def convertChromozomeToPath(self, chromozome, x, y):
path = []
path.append([x,y])
posx = x
posy = y
for gene in chromozome:
direction = gene.get_direction()
if direction == UP:
posx = posx - 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (self.surface[posx][posy] == 1):
posx = posx + 1
continue
# score += copy_map.markVisible(posx, posy)
elif direction == DOWN:
posx = posx + 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (self.surface[posx][posy] == 1):
posx = posx - 1
continue
# score += copy_map.markVisible(posx, posy)
elif direction == LEFT:
posy = posy - 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (self.surface[posx][posy] == 1):
posy = posy + 1
continue
# score += copy_map.markVisible(posx, posy)
elif direction == RIGHT:
posy = posy + 1
if not (0 <= posx <= 19) or not (0 <= posy <= 19) or (self.surface[posx][posy] == 1):
posy = posy - 1
continue
# score += copy_map.markVisible(posx, posy)
print('added')
path.append([posx, posy])
return path
class Statistics:
def __init__(self):
self.runs = []
self.best = []
self.std = []
def add_generation_score(self, score):
self.runs.append(score)
def add_best_score(self, score):
self.best.append(score)
def add_standard_deviation(self, std):
self.std.append(std)
def get_scores(self):
return self.runs, self.best, self.std
|
[
"mihaisilinc@yahoo.com"
] |
mihaisilinc@yahoo.com
|
262a5197dd5ba8597678e06daaf76597fdf97d15
|
a4c9b7353a31d9aac6919fbac601cf365eee9cb7
|
/social_example_project/project_specific.py
|
19ff00b4d651ff7ae129143a36e8945577b6f673
|
[] |
no_license
|
crass/django-social
|
b885e788dd4ace57335141e7fef4106474b70c37
|
cce7516dbf16509ba1e6e4bb55c6c0b7c8b9c7d9
|
refs/heads/master
| 2020-04-01T18:43:31.889103
| 2012-03-05T07:44:28
| 2012-03-05T07:44:28
| 3,245,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
from django import shortcuts
from django.db.models import signals
from django.contrib.auth.models import User
from social.notification import Notification
# deploy contrib for a quick try
from social.contrib import comments
# auto subscribe users to objects they comment
comments.signals.comment_was_posted.connect(comments.comments_subscription)
# auto notify subscribers of an object when it receives a comment
# full blown, reusable:
# comments.signals.comment_was_posted.connect(comments.comment_notification)
# quickstart example:
def comment_notification(sender, comment=None, **kwargs):
Notification(comment=comment, template='comment_quickstart',
subscribers_of=comment.content_object).emit()
comments.signals.comment_was_posted.connect(comment_notification)
from social.contrib import auth
auth.signals.post_save.connect(auth.subscribe_user_to_himself, sender=User)
auth.subscribe_existing_users_to_themselves(None)
def user_detail(request, username,
template_name='auth/user_detail.html', extra_context=None):
context = {
'user': shortcuts.get_object_or_404(User, username=username)
}
context.update(extra_context or {})
return shortcuts.render(request, template_name, context)
|
[
"jamespic@gmail.com"
] |
jamespic@gmail.com
|
2968955f3edef66592f8efcbb0a5fc9bdd9b1e10
|
1bdbf12e6fa1091beeb4ce0d923664e779cda509
|
/tuples.py
|
7962b449b5c5faf5de4dbe4080c489c970d8225c
|
[] |
no_license
|
balajisaikumar2000/Python-Snippets
|
d61176f3a3752861c5a355c09b27c418727b90fd
|
b36a642b99cb2517438f773d21654ae4fef2cc05
|
refs/heads/master
| 2023-01-24T05:57:03.654796
| 2020-12-05T13:38:58
| 2020-12-05T13:38:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
#immutable
x = tuple(("apple","banana","cherry"))
y = list(x)
y[0] = "mango"
x = tuple(y)
print(x)
#creating single tuple
a = ("balaji") #this will cause an error
print(type(a))
b= ("balaji",) #correct process
print(type(b))
#count():
z = (1,2,4,1,2,6,7,8,4,6)
print(z.count(2))
#index():
m = (1,2,3,6,3,5,6,3,9,6,1)
print(m.index(6))
|
[
"balajisaikumar3@gmail.com"
] |
balajisaikumar3@gmail.com
|
8739c3d861b4a52332c4d5679e34725d6ed4d01e
|
bee554d289d8f18fb04dea4adaaccb03b28e5efb
|
/Re/Demo1.py
|
0b8c80a8a1a0dea001c3476e416326b17a91ca1f
|
[] |
no_license
|
loveCanopy/Python
|
f24e1fce68850e895dcf24b4da6b65928ba43caf
|
4bf8838b7b73596f502349b1450c7c48767e3a57
|
refs/heads/master
| 2021-01-10T08:17:05.101714
| 2017-03-14T08:59:42
| 2017-03-14T08:59:42
| 54,179,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#compile match split sub
import re
pattern=re.compile(r'hello')
match=pattern.match("hello world")
m=re.match(r"hell",'hellworld')
n=re.match(r"world$","hellworld")
p=re.compile(r'\d+')
r=p.split("one1two2three3four4")
f=p.findall("one1two2three3four4")
l=re.search(r'he','worhell')
print(l)
p2=re.compile(r'[abc]')
k=p2.sub('O','Markmarkmarkmark',count=2)
print(k)
print(f)
print(r)
print(n)
print(m.group())
print(match.group())
|
[
"1039431583@qq.com"
] |
1039431583@qq.com
|
d63652dbb64052fd613ea28b3451203c2542516c
|
9fc4767cb81fa96c920ac2a59a74448c9875c574
|
/ABC/ABC067/a.py
|
b61046b2b452f1aff06aff4dacc332975578ee39
|
[] |
no_license
|
temp176/AtCoder
|
ce6a2070d8d3931256fae29303f6efc8d50902c8
|
70c5489cfdff95cb400791b63ff4a8fc3e9c3b9b
|
refs/heads/master
| 2021-06-08T17:45:59.475097
| 2020-05-17T05:32:46
| 2020-05-17T05:32:46
| 132,405,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
A,B=[int(i) for i in input().split()]
if A % 3 == 0 or B % 3 == 0 or (A + B) % 3 == 0:
print('Possible')
else:
print('Impossible')
|
[
"teheodks@gmail.com"
] |
teheodks@gmail.com
|
0aab8557c40a843eb79d544ec0964ebe1827f7b2
|
8902f2f30a35f0a87b3d40196fda7c74a883a535
|
/core/tests/conftest.py
|
95fc6217fe80f533e8cf88898e4482ae83bb9759
|
[] |
no_license
|
sleonardoaugusto/scanpy
|
e970e89c74818b438689586cd181141cb37b9e9e
|
14d61d06f62a9840ad94b887512ab6da4b6744e9
|
refs/heads/main
| 2023-08-05T12:24:16.318014
| 2021-09-06T07:16:42
| 2021-09-07T21:55:16
| 398,474,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from pathlib import Path
import pytest
from core.pages.pages import Login, Home, Settings
from core.webdriver import Driver
from scrapper.scrapper import BASE_URL
@pytest.fixture(scope='session')
def fix_user():
class User:
username = 'bobbybackupy'
password = 'Argyleawesome123!'
secret_key = 'J6PMJ5GNXMGVU47A'
return User()
@pytest.fixture(scope='session')
def fix_login_page():
with Driver(path='webdrivers/chromedriver') as driver:
login_page = Login(webdriver=driver, url=BASE_URL)
login_page.open()
yield login_page
@pytest.fixture(scope='session')
def fix_home_page(fix_login_page, fix_user):
webdriver = fix_login_page.webdriver
fix_login_page.login(username=fix_user.username, password=fix_user.password)
home = Home(webdriver)
yield home
@pytest.fixture(scope='session')
def fix_settings_page(fix_home_page, fix_user):
webdriver = fix_home_page.webdriver
fix_home_page.navbar.open_settings(fix_user.secret_key)
settings = Settings(webdriver)
yield settings
@pytest.fixture(autouse=True, scope='session')
def delete_logs():
Path.unlink(Path('log.log'))
|
[
"sleonardoaugusto@gmail.com"
] |
sleonardoaugusto@gmail.com
|
34cca73a90032fba4534e5ecaff98d67950e3f3b
|
3fef5ea4a88bc42a0acdb7ae581639ebbb3dd962
|
/tb_store/tb_store/apps/users/urls.py
|
417f3eb0393e93d10a2fd7acd9b0985ab7c96cbc
|
[] |
no_license
|
ducgt/Django-Store
|
96476597811c6db772e196d9cd56ef9516e1e287
|
f68b7dd657b2a9c5769a3a657be083dad5b2ff11
|
refs/heads/master
| 2021-04-21T20:54:54.811437
| 2019-07-22T08:16:44
| 2019-07-22T08:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
from django.conf.urls import url
from rest_framework import routers
from . import views
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^usernames/(?P<username>\w{5,20})/count/$', views.UsernameCountView.as_view()),
url(r'^mobiles/(?P<mobile>1[3-9]\d{9})/count/$', views.MobileCountView.as_view()),
url(r'^usernames/$', views.UserView.as_view()),
# url(r'^authorizations/$', obtain_jwt_token),
url(r"^user/$", views.UserDetailView.as_view()),
url(r"^email/$", views.EmailView.as_view()),
url(r'^emails/verification/$', views.VerifyEmailView.as_view()),
url(r'^browse_histories/$', views.UserBrowsingHistoryView.as_view()),
url(r'^password/$', views.PassWord2.as_view()),
url(r'^authorizations/$', views.UserAuthorizeView.as_view()),
]
router = routers.DefaultRouter()
router.register(r'addresses', views.AddressViewSet, base_name='addresses')
urlpatterns += router.urls
# POST /addresses/ 新建 -> create
# PUT /addresses/<pk>/ 修改 -> update
# GET /addresses/ 查询 -> list
# DELETE /addresses/<pk>/ 删除 -> destroy
# PUT /addresses/<pk>/status/ 设置默认 -> status
# PUT /addresses/<pk>/title/ 设置标题 -> title
|
[
"15670339118@qq.com"
] |
15670339118@qq.com
|
34e8b079677924bf95cde928c5a71e25e99cf66d
|
0e294ec96263fafc3f39aa2f4bb2fff49569582e
|
/bach_resize.py
|
370475c1ad81d44b6ac4d5448775c00103b339ee
|
[] |
no_license
|
dailing/mask_rcnn
|
6988f529b57fc2bfe3f79f060b3b6eeaec79330f
|
f499c8f98c5e9a9a23d7cbe8c1036ada81e16655
|
refs/heads/master
| 2020-05-25T10:37:22.463188
| 2020-04-19T10:32:04
| 2020-04-19T10:32:04
| 187,763,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
import os.path
import cv2
from tqdm import tqdm
from random import shuffle
from util.augment import Compose, FundusAOICrop, Resize, ResizeKeepAspectRatio
input_dir = '../dataset/'
out_dir = '../dataset/small_pic'
os.makedirs(out_dir, exist_ok=True)
files = os.listdir(input_dir)
shuffle(files)
for i in tqdm(files):
ofile = f'{out_dir}/{i}'
if os.path.exists(ofile):
continue
transform = Compose(
(, ResizeKeepAspectRatio(299))
)
img = cv2.imread(f'{input_dir}/{i}', cv2.IMREAD_COLOR)
img = transform(img)
ok, content = cv2.imencode('.png', img)
assert ok is True
with open(ofile, 'wb') as f:
f.write(content)
# cv2.imwrite(ofile, img, dict(ext='png'))
|
[
"qzyz_dailing@163.com"
] |
qzyz_dailing@163.com
|
98d55bb8d3c2863135d11c2f7e2e4a05ccf5512e
|
95e42bbdd441f70bd5647668de0b609eb46d5edf
|
/Part 3 - Classification/Section 15 - K-Nearest Neighbors (K-NN)/my_knn.py
|
a2ee7ecebce220b0ade5b9dacd34cc208ee76e31
|
[] |
no_license
|
varunnaagaraj/ML-A-Z_Course
|
1b4f90e8ab4e00a670ea1d3fe3d0bd2575bf876f
|
ad36d9ebcae7d04e9a8015508b9dc87f26146283
|
refs/heads/master
| 2020-05-02T23:59:53.165017
| 2019-03-28T23:16:47
| 2019-03-28T23:16:47
| 178,296,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
# K Nearest Neighbors
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting the logistic regression model
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2)
classifier.fit(X_train, y_train)
#Prediction
y_pred = classifier.predict(X_test)
#Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#Visualization
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"varun.naagaraj@gmail.com"
] |
varun.naagaraj@gmail.com
|
8dfc8d52199cc913852b7553e3552f508e77e2ca
|
b9bd670454d308c49f80156d6f493be97d2ce696
|
/Zhihu/Zhihu/settings.py
|
2e5c9b9216d2481b14236bdc2e976664a2b6391d
|
[] |
no_license
|
CP-0/py_spider_codes
|
2cf3ee2dd8143891c0e28c5298a3e902b9a35fc4
|
d7875b33c21d06b3a9af952248bf9f3acd50db93
|
refs/heads/master
| 2021-09-20T01:13:38.914499
| 2018-08-02T05:54:30
| 2018-08-02T05:54:30
| 112,149,605
| 83
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,372
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for Zhihu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Zhihu'
SPIDER_MODULES = ['Zhihu.spiders']
NEWSPIDER_MODULE = 'Zhihu.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'Zhihu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'Zhihu.middlewares.ZhihuSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'Zhihu.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Zhihu.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MONGO_URI = 'localhost'
MONGO_DATABASE = 'zhihu'
|
[
"cyj001@cyj721001.uu.me"
] |
cyj001@cyj721001.uu.me
|
aa39e1ee08966f77ab522c5c903530e0749a334e
|
4114590f97ca3734862c9255f1a4bbc730ded8d7
|
/manage.py
|
968edc33bd4ccde3e25d2a75a4000a4e62c18250
|
[] |
no_license
|
filipe027/probex2018
|
dea369f784f3c3ab82787736a79d3ee0a09d79f8
|
37c5bb12987cb39c0fe682d5b912f25b8dcd92c7
|
refs/heads/master
| 2020-03-23T05:53:22.024489
| 2019-11-10T22:10:38
| 2019-11-10T22:10:38
| 141,173,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Chuvas.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"filipe.freitas@cear.ufpb.br"
] |
filipe.freitas@cear.ufpb.br
|
24b38a0832ea721afae965b47d43a1cd21e4b07a
|
d4b67c170af87fb407d8088e1fbb8e3600231533
|
/optimize_cage.py
|
abff3ee09cd1c2afe80a318c3456a2f3bcdd6245
|
[
"MIT"
] |
permissive
|
qingmeizhujiu/deep_cage
|
404708d1c6b1659ab9b7bab6c0c9cc32f051740d
|
da4c07dcc5c10d44c1b040fbbd0d897ea5ab1f98
|
refs/heads/master
| 2022-12-07T04:26:51.625134
| 2020-09-08T08:25:11
| 2020-09-08T08:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,963
|
py
|
""" Optimize the initial cage for a new source shape """
from __future__ import print_function
from pprint import pprint
import traceback
import sys
import datetime
import shutil
import torch
import os
import numpy as np
import pymesh
from pytorch_points.misc import logger
from pytorch_points.network.operations import faiss_knn
from pytorch_points.network.geo_operations import mean_value_coordinates_3D, edge_vertex_indices
from pytorch_points.utils.pc_utils import load, save_ply, save_ply_with_face, center_bounding_box
from pytorch_points.utils.geometry_utils import get_edge_points
from pytorch_points.utils.pytorch_utils import weights_init, check_values, save_network, load_network, \
clamp_gradient_norm, tolerating_collate, clamp_gradient, fix_network_parameters
from pytorch_points.network.model_loss import MeshLaplacianLoss
import network2 as networks
from common import loadInitCage, build_dataset, deform_with_MVC, read_trimesh
from losses import MVCRegularizer
from option import DeformationOptions
from matplotlib.colors import Normalize
from matplotlib import cm
import openmesh as om
import pandas as pd
class MyOptions(DeformationOptions):
def initialize(self, parser):
parser.add_argument(
"--model", type=str, default="/home/mnt/points/data/MPI-FAUST/training/registrations/tr_reg_010.ply")
parser.add_argument("--use_cage", type=str,
help="path to optimized cage")
parser.add_argument("--opt_lap", action="store_true",
help="optimize deformed shape using laplacian")
return super().initialize(parser)
def parse(self):
super().parse()
assert(self.opt.source_model is not None), "source model is required for optimize_cage"
if not isinstance(self.opt.source_model, str):
self.opt.source_model = self.opt.source_model[0]
return self.opt
def visualize_correspondence(opt, source_shape, source_face, target_shape, target_face, corres_1, corres_2):
"""
source_shape (1,N,3)
source_face (1,F,3)
target_shape (1,N2,3)
target_face (1,F2,3)
corres_face_1 (P)
corres_face_2 (P)
"""
# save these points with color codes
P = corres_2.shape[0]
assert(corres_1.shape[0] == corres_2.shape[0])
corres_1 = corres_1.cpu().numpy().reshape(-1)
corres_2 = corres_2.cpu().numpy().reshape(-1)
normalize = Normalize(vmin=0, vmax=corres_1.shape[0])
cmap = cm.get_cmap("jet")
colors_picked = cmap(normalize(np.arange(P, dtype=np.float32)))[:, :3]
colors_source = np.ones((source_face.shape[1], 3), dtype=np.float32)
colors_source[corres_1, :] = colors_picked
save_ply_with_face(source_shape[0].cpu().detach().numpy(), source_face[0].cpu().detach().numpy(),
os.path.join(opt.log_dir, opt.subdir, "source_corr.ply"), colors_source)
colors_target = np.ones((target_face.shape[1], 3), dtype=np.float32)
colors_target[corres_2, :] = colors_picked
save_ply_with_face(target_shape[0].cpu().detach().numpy(), target_face[0].cpu().detach().numpy(),
os.path.join(opt.log_dir, opt.subdir, "target_corr.ply"), colors_target)
def optimize(opt):
"""
weights are the same with the original source mesh
target=net(old_source)
"""
# load new target
if opt.is_poly:
target_mesh = om.read_polymesh(opt.model)
else:
target_mesh = om.read_trimesh(opt.model)
target_shape_arr = target_mesh.points()
target_shape = target_shape_arr.copy()
target_shape = torch.from_numpy(
target_shape[:, :3].astype(np.float32)).cuda()
target_shape.unsqueeze_(0)
target_faces_arr = target_mesh.face_vertex_indices()
target_faces = target_faces_arr.copy()
target_faces = torch.from_numpy(
target_faces[:, :3].astype(np.int64)).cuda()
target_faces.unsqueeze_(0)
states = torch.load(opt.ckpt)
if "states" in states:
states = states["states"]
cage_v = states["template_vertices"].transpose(1, 2).cuda()
cage_f = states["template_faces"].cuda()
shape_v = states["source_vertices"].transpose(1, 2).cuda()
shape_f = states["source_faces"].cuda()
if os.path.isfile(opt.model.replace(os.path.splitext(opt.model)[1], ".picked")) and os.path.isfile(opt.source_model.replace(os.path.splitext(opt.source_model)[1], ".picked")):
new_label_path = opt.model.replace(os.path.splitext(opt.model)[1], ".picked")
orig_label_path = opt.source_model.replace(os.path.splitext(opt.source_model)[1], ".picked")
logger.info("Loading picked labels {} and {}".format(orig_label_path, new_label_path))
new_label = pd.read_csv(new_label_path, delimiter=" ",skiprows=1, header=None)
orig_label = pd.read_csv(orig_label_path, delimiter=" ",skiprows=1, header=None)
orig_label_name = orig_label.iloc[:,5]
new_label_name = new_label.iloc[:,5].tolist()
new_to_orig_idx = []
for i, name in enumerate(new_label_name):
matched_idx = orig_label_name[orig_label_name==name].index
if matched_idx.size == 1:
new_to_orig_idx.append((i, matched_idx[0]))
new_to_orig_idx = np.array(new_to_orig_idx)
if new_label.shape[1] == 10:
new_vidx = new_label.iloc[:,9].to_numpy()[new_to_orig_idx[:,0]]
target_points = target_shape[:, new_vidx, :]
else:
new_label_points = torch.from_numpy(new_label.iloc[:,6:9].to_numpy().astype(np.float32))
target_points = new_label_points.unsqueeze(0).cuda()
target_points, new_vidx, _ = faiss_knn(1, target_points, target_shape, NCHW=False)
target_points = target_points.squeeze(2) # B,N,3
new_label[9] = new_vidx.squeeze(0).squeeze(-1).cpu().numpy()
new_label.to_csv(new_label_path, sep=" ", header=[str(new_label.shape[0])]+[""]*(new_label.shape[1]-1), index=False)
target_points = target_points[:, new_to_orig_idx[:,0], :]
target_points = target_points.cuda()
source_shape, _ = read_trimesh(opt.source_model)
source_shape = torch.from_numpy(source_shape[None, :,:3]).float()
if orig_label.shape[1] == 10:
orig_vidx = orig_label.iloc[:,9].to_numpy()[new_to_orig_idx[:,1]]
source_points = source_shape[:, orig_vidx, :]
else:
orig_label_points = torch.from_numpy(orig_label.iloc[:,6:9].to_numpy().astype(np.float32))
source_points = orig_label_points.unsqueeze(0)
# find the closest point on the original meshes
source_points, new_vidx, _ = faiss_knn(1, source_points, source_shape, NCHW=False)
source_points = source_points.squeeze(2) # B,N,3
orig_label[9] = new_vidx.squeeze(0).squeeze(-1).cpu().numpy()
orig_label.to_csv(orig_label_path, sep=" ", header=[str(orig_label.shape[0])]+[""]*(orig_label.shape[1]-1), index=False)
source_points = source_points[:,new_to_orig_idx[:,1],:]
_, source_center, _ = center_bounding_box(source_shape[0])
source_points -= source_center
source_points = source_points.cuda()
# # shift target so that the belly match
# try:
# orig_bellyUp_idx = orig_label_name[orig_label_name=="bellUp"].index[0]
# orig_bellyUp = orig_label_points[orig_bellyUp_idx, :]
# new_bellyUp_idx = [i for i, i2 in new_to_orig_idx if i2==orig_bellyUp_idx][0]
# new_bellyUp = new_label_points[new_bellyUp_idx,:]
# target_points += (orig_bellyUp - new_bellyUp)
# except Exception as e:
# logger.warn("Couldn\'t match belly to belly")
# traceback.print_exc(file=sys.stdout)
# source_points[0] = center_bounding_box(source_points[0])[0]
elif not os.path.isfile(opt.model.replace(os.path.splitext(opt.model)[1], ".picked")) and \
os.path.isfile(opt.source_model.replace(os.path.splitext(opt.source_model)[1], ".picked")):
logger.info("Could not find {}. Assuming SMPL model".format(opt.model.replace(os.path.splitext(opt.model)[1], ".picked")))
source_shape, source_faces = read_trimesh(opt.source_model)
assert(source_faces.shape[0] == target_faces.shape[1]), \
"opt.model must be a SMPL model with {} faces and {} vertices. Otherwise a correspondence file {} must be present.".format(
source_faces.shape[0], source_shape.shape[0], opt.model.replace(os.path.splitext(opt.model)[1], ".picked"))
# align faces not vertices
orig_label_path = opt.source_model.replace(os.path.splitext(opt.source_model)[1], ".picked")
logger.info("Loading picked labels {}".format(orig_label_path))
orig_label = pd.read_csv(orig_label_path, delimiter=" ",skiprows=1, header=None)
source_shape = torch.from_numpy(source_shape[None, :, :3]).cuda().float()
source_faces = torch.from_numpy(source_faces[None, :, :3]).cuda().long()
idx = torch.from_numpy(orig_label.iloc[:,1].to_numpy()).long()
source_points = torch.gather(source_shape.unsqueeze(1).expand(-1, idx.numel(), -1, -1), 2, source_faces[:, idx, :, None].expand(-1, -1, -1, 3))
source_points = source_points.mean(dim=-2)
target_points = torch.gather(target_shape.unsqueeze(1).expand(-1, idx.numel(), -1, -1), 2, target_faces[:, idx, :, None].expand(-1, -1, -1, 3))
target_points = target_points.mean(dim=-2)
_, source_center, _ = center_bounding_box(source_shape[0])
source_points -= source_center
elif not os.path.isfile(opt.model.replace(os.path.splitext(opt.model)[1], ".picked")):
logger.info("Could not find {}. Assuming SMPL model".format(opt.model.replace(os.path.splitext(opt.model)[1], ".picked")))
source_shape, source_faces = read_trimesh(opt.source_model)
assert(source_faces.shape[0] == target_faces.shape[1]), \
"opt.model must be a SMPL model with {} faces and {} vertices. Otherwise a correspondence file {} must be present.".format(
source_faces.shape[0], source_shape.shape[0], opt.model.replace(os.path.splitext(opt.model)[1], ".picked"))
source_shape, source_faces = read_trimesh(opt.source_model)
_, source_center, _ = center_bounding_box(source_shape[0])
source_points -= source_center
source_shape = torch.from_numpy(source_shape[None, :, :3]).cuda().float()
source_faces = torch.from_numpy(source_faces[None, :, :3]).cuda().long()
# select a subset of faces, otherwise optimization is too slow
idx = torch.from_numpy(np.random.permutation(2048)).cuda().long()
source_points = torch.gather(source_shape.unsqueeze(1).expand(-1, source_faces.shape[1], -1, -1), 2, source_faces[:, idx,:, None].expand(-1, -1, -1, 3))
source_points = source_points.mean(dim=-2)
target_points = torch.gather(target_shape.unsqueeze(1).expand(-1, source_faces.shape[1], -1, -1), 2, target_faces[:,idx,: None].expand(-1, -1, -1, 3))
target_points = target_points.mean(dim=-2)
target_points = target_points[:, idx]
source_points = source_points[:, idx]
target_shape[0], target_center, target_scale = center_bounding_box(target_shape[0])
_, _, source_scale = center_bounding_box(shape_v[0])
# scale according y axis (body height)
target_scale_factor = (source_scale/target_scale)[0,1]
target_shape *= target_scale_factor
target_points -= target_center
target_points = (target_points*target_scale_factor).detach()
# make sure test use the normalized
target_shape_arr[:] = target_shape[0].cpu().numpy()
om.write_mesh(os.path.join(opt.log_dir, opt.subdir, os.path.splitext(
os.path.basename(opt.model))[0]+"_normalized.obj"), target_mesh)
opt.model = os.path.join(opt.log_dir, opt.subdir, os.path.splitext(
os.path.basename(opt.model))[0]+"_normalized.obj")
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-initial.obj"),
shape_v[0].cpu().numpy(), shape_f[0].cpu().numpy())
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "cage-initial.obj"),
cage_v[0].cpu().numpy(), cage_f[0].cpu().numpy())
save_ply(target_points[0].cpu().numpy(), os.path.join(
opt.log_dir, opt.subdir, "target_points.ply"))
save_ply(source_points[0].cpu().numpy(), os.path.join(
opt.log_dir, opt.subdir, "source_points.ply"))
logger.info("Optimizing for {} corresponding vertices".format(
target_points.shape[1]))
cage_init = cage_v.clone().detach()
lap_loss = MeshLaplacianLoss(torch.nn.MSELoss(reduction="none"), use_cot=True,
use_norm=True, consistent_topology=True, precompute_L=True)
mvc_reg_loss = MVCRegularizer(threshold=50, beta=1.0, alpha=0.0)
cage_v.requires_grad_(True)
optimizer = torch.optim.Adam([cage_v], lr=opt.lr, betas=(0.5, 0.9))
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, int(opt.nepochs*0.4), gamma=0.5, last_epoch=-1)
if opt.dim == 3:
weights_ref = mean_value_coordinates_3D(
source_points, cage_init, cage_f, verbose=False)
else:
raise NotImplementedError
for t in range(opt.nepochs):
optimizer.zero_grad()
weights = mean_value_coordinates_3D(
target_points, cage_v, cage_f, verbose=False)
loss_mvc = torch.mean((weights-weights_ref)**2)
# reg = torch.sum((cage_init-cage_v)**2, dim=-1)*1e-4
reg = torch.tensor(0.0).cuda()
if opt.clap_weight > 0:
reg = lap_loss(cage_init, cage_v, face=cage_f)*opt.clap_weight
reg = reg.mean()
if opt.mvc_weight > 0:
reg += mvc_reg_loss(weights)*opt.mvc_weight
loss = loss_mvc + reg
if (t+1) % 50 == 0:
print("t {}/{} mvc_loss: {} reg: {}".format(t, opt.nepochs, loss_mvc.item(), reg.item()))
if loss_mvc.item() < 5e-6:
break
loss.backward()
optimizer.step()
scheduler.step()
return cage_v, cage_f
def test_one(opt, cage_shape, new_source, new_source_face, new_target, new_target_face):
states = torch.load(opt.ckpt)
if "states" in states:
states = states["states"]
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-initial.ply"),
states["source_vertices"][0].transpose(
0, 1).detach().cpu(),
states["source_faces"][0].detach().cpu())
# states["template_vertices"] = cage_shape.transpose(1, 2)
# states["source_vertices"] = new_source.transpose(1, 2)
# states["source_faces"] = new_source_face
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-Sa.ply"),
new_source[0].detach().cpu(), new_source_face[0].detach().cpu())
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-Sb.ply"),
new_target[0].detach().cpu(), new_target_face[0].detach().cpu())
net = networks.FixedSourceDeformer(opt, 3, opt.num_point, bottleneck_size=512,
template_vertices=cage_shape.transpose(1, 2), template_faces=states["template_faces"].cuda(),
source_vertices=new_source.transpose(1, 2), source_faces=new_source_face).cuda()
net.eval()
load_network(net, states)
outputs = net(new_target.transpose(1, 2).contiguous())
deformed = outputs["deformed"]
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-Sab.ply"),
deformed[0].detach().cpu(), new_target_face[0].detach().cpu())
def test_all(opt, new_cage_shape):
opt.phase = "test"
opt.target_model = None
print(opt.model)
if opt.is_poly:
source_mesh = om.read_polymesh(opt.model)
else:
source_mesh = om.read_trimesh(opt.model)
dataset = build_dataset(opt)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, drop_last=False,
collate_fn=tolerating_collate,
num_workers=0, worker_init_fn=lambda id: np.random.seed(np.random.get_state()[1][0] + id))
states = torch.load(opt.ckpt)
if "states" in states:
states = states["states"]
# states["template_vertices"] = new_cage_shape.transpose(1, 2)
# states["source_vertices"] = new_source.transpose(1,2)
# states["source_faces"] = new_source_face
# new_source_face = states["source_faces"]
om.write_mesh(os.path.join(opt.log_dir, opt.subdir,
"template-Sa.ply"), source_mesh)
net = networks.FixedSourceDeformer(opt, 3, opt.num_point, bottleneck_size=opt.bottleneck_size,
template_vertices=states["template_vertices"], template_faces=states["template_faces"].cuda(),
source_vertices=states["source_vertices"], source_faces=states["source_faces"]).cuda()
print(net)
load_network(net, states)
source_points = torch.from_numpy(
source_mesh.points().copy()).float().cuda().unsqueeze(0)
with torch.no_grad():
# source_face = net.source_faces.detach()
for i, data in enumerate(dataloader):
data = dataset.uncollate(data)
target_shape, target_filename = data["target_shape"], data["target_file"]
logger.info("", data["target_file"][0])
sample_idx = None
if "sample_idx" in data:
sample_idx = data["sample_idx"]
outputs = net(target_shape.transpose(1, 2), cage_only=True)
if opt.d_residual:
cage_offset = outputs["new_cage"]-outputs["cage"]
outputs["cage"] = new_cage_shape
outputs["new_cage"] = new_cage_shape+cage_offset
deformed = deform_with_MVC(outputs["cage"], outputs["new_cage"], outputs["cage_face"].expand(
outputs["cage"].shape[0], -1, -1), source_points)
for b in range(deformed.shape[0]):
t_filename = os.path.splitext(target_filename[b])[0]
source_mesh_arr = source_mesh.points()
source_mesh_arr[:] = deformed[0].cpu().detach().numpy()
om.write_mesh(os.path.join(
opt.log_dir, opt.subdir, "template-{}-Sab.obj".format(t_filename)), source_mesh)
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "template-{}-Sb.ply".format(t_filename)),
data["target_mesh"][b].detach().cpu(), data["target_face"][b].detach().cpu())
pymesh.save_mesh_raw(
os.path.join(opt.log_dir, opt.subdir, "template-{}-cage1.ply".format(t_filename)),
outputs["cage"][b].detach().cpu(), outputs["cage_face"][b].detach().cpu(),
)
pymesh.save_mesh_raw(
os.path.join(opt.log_dir, opt.subdir, "template-{}-cage2.ply".format(t_filename)),
outputs["new_cage"][b].detach().cpu(), outputs["cage_face"][b].detach().cpu(),
)
if i % 20 == 0:
logger.success("[{}/{}] Done".format(i, len(dataloader)))
dataset.render_result(os.path.join(opt.log_dir, opt.subdir))
if __name__ == "__main__":
parser = MyOptions()
opt = parser.parse()
opt.log_dir = os.path.dirname(opt.ckpt)
os.makedirs(os.path.join(opt.log_dir, opt.subdir), exist_ok=True)
if opt.use_cage is None:
# optimize initial cage for the new target
cage_v, cage_f = optimize(opt)
pymesh.save_mesh_raw(os.path.join(opt.log_dir, opt.subdir, "optimized_template_cage.ply"),
cage_v[0].detach().cpu(), cage_f[0].detach().cpu())
else:
cage_v, cage_f = read_trimesh(opt.use_cage)
cage_v = torch.from_numpy(cage_v[:, :3].astype(np.float32)).cuda()
cage_f = torch.from_numpy(cage_f[:, :3].astype(np.int64)).cuda()
cage_v.unsqueeze_(0)
cage_f.unsqueeze_(0)
# # test using the new source and initial cage
# target_shape_pose, target_face_pose, _ = read_trimesh("/home/mnt/points/data/MPI-FAUST/training/registrations/tr_reg_002.ply")
# target_shape_pose = torch.from_numpy(target_shape_pose[:,:3].astype(np.float32)).cuda()
# target_face_pose = torch.from_numpy(target_face_pose[:,:3].astype(np.int64)).cuda()
# target_shape_pose, _, _ = center_bounding_box(target_shape_pose)
# target_shape_pose.unsqueeze_(0)
# target_face_pose.unsqueeze_(0)
# test_one(opt, cage_v, target_shape, target_face, target_shape_pose, target_face_pose)
test_all(opt, cage_v)
|
[
"yifan.wang@inf.ethz.ch"
] |
yifan.wang@inf.ethz.ch
|
ca84b1277c35cd6489cbf442d0400faa19000fa6
|
25d5b4790c94949bc4ac4e5575ef3b5e7e16c2ec
|
/api/models.py
|
f6df86e8908f2ab50fd7dea8e88700d379c6faa4
|
[] |
no_license
|
andre0shazam/DjangoRest
|
c5d8535ab159d7c225c817eb6c8a8399fdbbc0a5
|
421632c31538e14c6afffa0f214bf78e2f3f77f5
|
refs/heads/main
| 2023-06-16T06:28:04.931136
| 2021-07-09T03:36:36
| 2021-07-09T03:36:36
| 384,311,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django.db import models
class Client(models.Model):
nome = models.CharField(max_length=30)
sobrenome = models.CharField(max_length=30)
idade = models.CharField(max_length=30)
sexo = models.CharField(max_length=30)
# Create your models here.
|
[
"andredog058@gmail.com"
] |
andredog058@gmail.com
|
5f6fd827837ea66d8f74cc7cefddea9c8c314cc1
|
9a75cddf9eb2684dfd43cd38cd76878a0c4ff7db
|
/api/models.py
|
94473f53a9fe498eb19214bbe8eaaf0c8803848e
|
[] |
no_license
|
redcrix/amazon-scraper
|
9cf178b42815b664aec9d3a604c4227d4f3ef594
|
17145d1c7d362799d2500b463f438b090dd5d09e
|
refs/heads/master
| 2020-06-24T17:04:10.347983
| 2019-08-28T06:34:57
| 2019-08-28T06:34:57
| 199,024,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
from django.db import models
class Input(models.Model):
url=models.CharField(max_length=1000)
page_no=models.IntegerField()
def __str__(self):
return self.url
class Api(models.Model):
Product_name=models.CharField(max_length=50)
by_info = models.CharField(max_length=15)
Product_url = models.CharField(max_length=1000)
Product_img = models.CharField(max_length=1000)
Product_price = models.CharField(max_length=10)
rating = models.CharField(max_length=15)
total_review = models.CharField(max_length=15)
ans_ask = models.CharField(max_length=15)
prod_des = models.CharField(max_length=800)
feature = models.CharField(max_length=1000)
cust_review = models.CharField(max_length=5000)
def __str__(self):
return self.Product_name
|
[
"contact@redcrix.com"
] |
contact@redcrix.com
|
2a08c1264727f25d9e966fcfe5077a7a6a878d3c
|
12b6e1a471614339c6def409d374fa886823c829
|
/mne_nirs/statistics/tests/test_statsmodels.py
|
832f294e1f052fe1abacb380f927baf94980c38a
|
[] |
no_license
|
PiranitaGomez/mne-nirs
|
687913a2dbc9855987123c06ba9a58b6d815fbbb
|
cbf6bdcf61cccf35983006d53e3a9aff1e6fbd51
|
refs/heads/master
| 2023-03-11T05:38:05.913691
| 2021-02-26T02:43:54
| 2021-02-26T02:43:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_allclose
import pytest
import pandas as pd
import statsmodels.formula.api as smf
from ...simulation import simulate_nirs_raw
from ...experimental_design import make_first_level_design_matrix
from ...statistics import run_GLM, statsmodels_to_results
from ...utils._io import glm_to_tidy
@pytest.mark.parametrize('func', ('mixedlm', 'ols', 'rlm'))
@pytest.mark.filterwarnings('ignore:.*optimization.*:')
@pytest.mark.filterwarnings('ignore:.*on the boundary.*:')
@pytest.mark.filterwarnings('ignore:.*The Hessian matrix at the estimated.*:')
def test_statsmodel_to_df(func):
func = getattr(smf, func)
np.random.seed(0)
amplitude = 1.432
df_cha = pd.DataFrame()
for n in range(5):
raw = simulate_nirs_raw(sfreq=3., amplitude=amplitude,
sig_dur=300., stim_dur=5.,
isi_min=15., isi_max=45.)
design_matrix = make_first_level_design_matrix(raw, stim_dur=5.0)
glm_est = run_GLM(raw, design_matrix)
with pytest.warns(RuntimeWarning, match='Non standard source detect'):
cha = glm_to_tidy(raw, glm_est, design_matrix)
cha["ID"] = '%02d' % n
df_cha = df_cha.append(cha)
df_cha["theta"] = df_cha["theta"] * 1.0e6
roi_model = func("theta ~ -1 + Condition", df_cha,
groups=df_cha["ID"]).fit()
df = statsmodels_to_results(roi_model)
assert type(df) == pd.DataFrame
assert_allclose(df["Coef."]["Condition[A]"], amplitude, rtol=1e-12)
assert df["Significant"]["Condition[A]"]
assert df.shape == (8, 8)
roi_model = smf.rlm("theta ~ -1 + Condition", df_cha,
groups=df_cha["ID"]).fit()
df = statsmodels_to_results(roi_model)
assert type(df) == pd.DataFrame
assert_allclose(df["Coef."]["Condition[A]"], amplitude, rtol=1e-12)
assert df["Significant"]["Condition[A]"]
assert df.shape == (8, 8)
|
[
"noreply@github.com"
] |
PiranitaGomez.noreply@github.com
|
0df692a6adbec35cf1c008fd20438bfd4e45212a
|
6123fe5a8c0b1d7b890558c662a16450696363f4
|
/training/compare.py
|
10ab58c6a28b6a28506c055d8f89c322498f052e
|
[] |
no_license
|
RandallBalestriero/EMDGN
|
06023b6c5b09d3badc489b543e9d57d690948488
|
ce164e227e55b78b640f99d525367709a07abaed
|
refs/heads/master
| 2022-12-31T16:47:41.591652
| 2020-10-22T02:30:43
| 2020-10-22T02:30:43
| 249,811,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
import sys
sys.path.insert(0, "../../SymJAX")
sys.path.insert(0, "../")
import numpy as np
import symjax as sj
import symjax.tensor as T
import matplotlib.pyplot as plt
import utils
import networks
from tqdm import tqdm
import matplotlib
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--std', type=float, default=0.55)#0.15
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--dataset', type=str)
parser.add_argument('--model', type=str)
parser.add_argument('--network', type=str)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--leakiness', type=float, default=0.1)
parser.add_argument('--noise', type=float, default=0.1)
args = parser.parse_args()
np.random.seed(args.seed)
BS = 1000
DATA = networks.create_dataset(args.dataset, BS, noise_std=args.std)
if args.network == 'small':
Ds = [1, 8, DATA.shape[1]]
R = 16
elif args.network == 'large':
Ds = [1, 16, 16, DATA.shape[1]]
R = 64
graph = sj.Graph('test')
with graph:
if args.model != 'EM':
lr = sj.tensor.Variable(1., name='lr', trainable=False)
emt = networks.create_fns(BS, R, Ds, 1, var_x = np.ones(Ds[-1]),
lr=0.005, leakiness=args.leakiness)
model = networks.create_vae(50, Ds, args.seed, lr=lr,
leakiness=args.leakiness, scaler=1)
else:
model = networks.create_fns(BS, R, Ds, 1, lr=0.001, leakiness=args.leakiness,
var_x=args.std**2)
for RUN in range(20):
graph.reset()
# do the VAE case
if args.model != 'EM':
filename = 'nnsaving_likelihood_{}_{}_{}_{}_{}_{}.npz'
for lr_ in [0.005, 0.001, 0.0001]:
lr.assign(lr_)
out = networks.EM(model, DATA, epochs=args.epochs, n_iter=500, extra=emt)
np.savez(filename.format(args.dataset, args.epochs, args.model,
lr_, args.network, RUN), L=out[0], LL=out[1],
samples=model['sample'](4*BS),
noise=np.random.randn(4*BS,2) * np.sqrt(model['varx']()), data=DATA)
else:
filename = 'nnsaving_likelihood_{}_{}_{}_{}_{}.npz'
out = networks.EM(model, DATA, epochs=args.epochs, n_iter=100, update_var=4)
np.savez(filename.format(args.dataset, args.epochs, args.model, args.network, RUN), L=out,
samples=model['sample'](4*BS), noise=np.random.randn(4*BS,Ds[-1]) * np.sqrt(model['varx']()), data=DATA)
|
[
"randallbalestriero@gmail.com"
] |
randallbalestriero@gmail.com
|
93e9bc9a42dc775c374e0bc17ac9f423f8fe1b71
|
a69d8dd2a2f0ba514dc4137bec9e9a279af8cc2a
|
/links/migrations/0003_vote.py
|
01ad26a235fa08d23e59be0cc918049867743f3a
|
[] |
no_license
|
JosePedroZarate/API-react-apollo
|
db05e86d3fc3ed0aa74875a796e5a9f222326940
|
02dc6db4619135f9f9dbedd636612a5798905f3c
|
refs/heads/master
| 2023-05-17T21:28:43.285670
| 2021-06-14T19:07:49
| 2021-06-14T19:07:49
| 376,927,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
# Generated by Django 3.1.3 on 2021-06-10 02:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('links', '0002_link_posted_by'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='links.link')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"jzaratespinosa2@gmail.com"
] |
jzaratespinosa2@gmail.com
|
59a949fbb8a08da9f8d058fcbe6e2e7aa3d462e6
|
e38913d34f512b2840c22e9cb46e20863d5ec991
|
/skills_kindregan.py
|
f008e5236f97c8a5d4a1f0b94953a7a6fef0b6d1
|
[] |
no_license
|
ultramarine7/self_assessment_4_10
|
fdfd631592ef5747f929fb1af8b50763b1661e88
|
b3ef7b44eca249249a4b58644ee11884e6da914f
|
refs/heads/master
| 2021-01-10T02:01:46.857975
| 2016-04-11T16:46:52
| 2016-04-11T16:46:52
| 55,943,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,416
|
py
|
"""Skills Assessment: Lists
Edit the function bodies until all of the doctests pass when you run this file.
"""
number_list = [1, 2, 7, -5]
odd_list = []
# Originally overthought this and was trying to write a code that reproduced a list. But assumed later
# the list is 1, 2, 7, -5.
# number_list = []
# i = 1
# while i < 3:
# number_list.append(i)
# i += 1
# i = -1
# while i > -3:
# number_list.append(i)
# i -= 1
def all_odd(number_list):
"""Return a list of only the odd numbers in the input list.
>>> all_odd([1, 2, 7, -5])
[1, 7, -5]
>>> all_odd([2, -6, 8])
[]
"""
for num in number_list:
if num % 2 != 0:
odd_list.append(num)
# print odd_list
return []
number_list = [2, -6, 8]
odd_list = []
def all_odd(number_list):
for num in number_list:
if num % 2 == 0:
odd_list.append(num)
return []
all_odd(number_list)
##################################################################################
number_list = [2, 3, 5, 6, -1, -2]
even_list = []
def all_even(number_list):
"""Return a list of only the even numbers in the input list.
>>> all_even([2, 6, -1, -2])
[2, 6, -2]
>>> all_even([-1, 3, 5])
[]
"""
for num in number_list:
if num % 2 == 0:
even_list.append(num)
return []
number_list = [-1, 3, 5]
even_list = []
def all_even(number_list):
for num in number_list:
if num % 2 != 0:
even_list.append(num)
# print even_list
return []
all_even(number_list)
###################################################################
my_list = ["Toyota", "Jeep", "Volovo"]
def print_indexes(my_list):
"""Print the index of each item in the input_list, followed by the item itself.
Do this without using a counting variable---that is, don't do something
like this:
count = 0
for item in list:
print count
count = count + 1
Output should look like this:
>>> print_indexes(["Toyota", "Jeep", "Volvo"])
0 Toyota
1 Jeep
2 Volvo
"""
for item, index in enumerate(my_list):
print item, index
########################################################
word_list = ["all", "are", "tiny"]
long_word_list = []
def long_words(word_list):
"""Return all words in input list that are longer than 4 characters.
>>> long_words(["hello", "hey", "spam", "spam", "bacon", "bacon"])
['hello', 'bacon', 'bacon']
>>> long_words(["all", "are", "tiny"])
[]
"""
word_length = 4
for word in word_list:
if len(word) > word_length:
long_word_list.append(word)
# print long_word_list
return []
word_list = ["all", "are", "tiny"]
long_word_list = []
def long_words(word_list):
word_length = 4
for word in word_list:
if len(word) > word_length:
long_word_list.append(word)
return []
#########################################################
number_list = [-5, 2, -5, 7]
def smallest_int(number_list):
"""Find the smallest integer in a list of integers and return it.
DO NOT USE the built-in function `min`!
>>> smallest_int([-5, 2, -5, 7])
-5
>>> smallest_int([3, 7, 2, 8, 4])
2
If the input list is empty, return None:
>>> smallest_int([]) is None
True
"""
for number in number_list:
number = int(number)
number_list.sort()
minimum = number_list[0]
small_list = minimum
return small_list
number_list = [3, 7, 2, 8, 4]
def smallest_int(number_list):
for number in number_list:
number = int(number)
number_list.sort()
minimum = number_list[0]
small_list = minimum
return small_list
smallest_int(number_list)
########################################################
number_list = [-5, 2, -5, 7]
def largest_int(number_list):
"""Find the largest integer in a list of integers and return it.
DO NOT USE the built-in function `max`!
>>> largest_int([-5, 2, -5, 7])
7
>>> largest_int([3, 7, 2, 8, 4])
8
If the input list is empty, return None:
>>> largest_int([]) is None
True
"""
for num in number_list:
num = int(num)
number_list.sort()
maximum = number_list[-1]
return
number_list = [3, 7, 2, 8, 4]
def largest_int(number_list):
for num in number_list:
num = int(num)
number_list.sort()
maximum = number_list[-1]
return
largest_int(number_list)
##########################################################################
number_list = [2, 6, -2]
division_list = []
def halvesies(number_list):
"""Return list of numbers from input list, each divided by two.
>>> halvesies([2, 6, -2])
[1.0, 3.0, -1.0]
If any of the numbers are odd, make sure you don't round off the half:
>>> halvesies([1, 5])
[0.5, 2.5]
"""
for num in number_list:
division = float(num) / 2
division_list.append(division)
return
number_list = [1, 5]
division_list = []
def halvesies(number_list):
for num in number_list:
division = float(num) / 2
division_list.append(division)
return
halvesies(number_list)
###################################################################################
word_list = ["hello", "hey", "hello", "spam"]
length_list = []
def word_lengths(word_list):
"""Return the length of words in the input list.
>>> word_lengths(["hello", "hey", "hello", "spam"])
[5, 3, 5, 4]
"""
for word in word_list:
length = len(word)
length_list.append(length)
return length_list
##############################################################################
sum_numbers = [1, 2, 3, 10]
def sum_numbers(number_list): # I had no idea there is sum() lol
"""Return the sum of all of the numbers in the list.
Python has a built-in function, `sum()`, which already does this -- but for
this exercise, you should not use it.
>>> sum_numbers([1, 2, 3, 10])
16
Any empty list should return the sum of zero:
>>> sum_numbers([])
0
"""
total = 0
for num in number_list:
total = total + num
return total
#############################################################################################
number_list = [1, 2, 3]
def mult_numbers(number_list):
"""Return product (result of multiplication) of the numbers in the list.
>>> mult_numbers([1, 2, 3])
6
Obviously, if there is a zero in the input, the product will be zero:
>>> mult_numbers([10, 20, 0, 50])
0
As explained at http://en.wikipedia.org/wiki/Empty_product, if the list is
empty, the product should be 1:
>>> mult_numbers([])
1
"""
total = 1
for num in range(0, len(number_list)):
total = total * number_list[num]
return total
############################################################################################
word_list = ["spam", "spam", "bacon", "balloonicorn"]
def join_strings(word_list):
"""Return a string of all input strings joined together.
Python has a built-in method on lists, `join`---but for this exercise, you
should not use it.
>>> join_strings(["spam", "spam", "bacon", "balloonicorn"])
'spamspambaconballoonicorn'
For an empty list, you should return an empty string:
>>> join_strings([])
''
"""
joined_list = ""
for word in range(0, len(word_list)):
joined_list = joined_list + word_list[word]
return joined_list
join_strings(word_list)
###############################################################################
number_list = [2, 12, 3]
def average(number_list):
"""Return the average (mean) of the list of numbers given.
>>> average([2, 12, 3])
5.666666666666667
There is no defined answer if the list given is empty. It's fine if
this raises an error when given an empty list.
"""
average = 0
sum = 0
for num in number_list:
sum = sum + num
average = float(sum) / len(number_list)
return average
average(number_list)
###########################################################################
list_of_words = ["Labrador", "Poodle", "French Bulldog"]
def join_strings_with_comma(list_of_words):
"""Return ['list', 'of', 'words'] like "list, of, words".
>>> join_strings_with_comma(["Labrador", "Poodle", "French Bulldog"])
'Labrador, Poodle, French Bulldog'
If there's only one thing in the list, it should return just that
thing, of course:
>>> join_strings_with_comma(["Pretzel"])
'Pretzel'
"""
for words in list_of_words:
joined_words = ", ".join(list_of_words)
return list_of_words
join_strings_with_comma(list_of_words)
##############################################################################
# END OF ASSIGNMENT: You can ignore everything below.
if __name__ == "__main__":
import doctest
print
result = doctest.testmod()
if not result.failed:
print "*** %s TESTS PASSED. GOOD WORK!" % result.attempted
print
|
[
"kindc005@riveter03-ml.swna.wdpr.disney.com"
] |
kindc005@riveter03-ml.swna.wdpr.disney.com
|
5e1b614ea755b33c5ce4f98f3cfb783b2e3a24c5
|
51dcd31096526bfa6aeae4baea9f0f45657c6623
|
/ocean/datasets/datasets.py
|
81bb5f5316ed602de9d5b6f81d5883b0d3b2b7a4
|
[] |
no_license
|
sopac/ocean-portal-docker
|
eba5de774e5a2b3e9b019440c39e7f0041715dd9
|
159aeba7143e66fdd9ed253de935407f898b4873
|
refs/heads/master
| 2021-01-20T08:07:58.698449
| 2017-09-10T09:24:04
| 2017-09-10T09:24:04
| 90,103,531
| 1
| 5
| null | 2017-12-13T03:30:45
| 2017-05-03T03:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
#
# (c) 2012 Commonwealth of Australia
# Australian Bureau of Meteorology, COSPPac COMP
# All Rights Reserved
#
# Authors: Danielle Madeley <d.madeley@bom.gov.au>
import cgi
import datetime
from ocean.config import regionConfig
from ocean.core import ReportableException
class MissingParameter(ReportableException):
pass
class ValidationError(ReportableException):
pass
class Dataset(object):
# these are the possible paramaters that can be passed to a dataset
# and their types
__form_params__ = {
'dataset': str,
'variable': str,
'plot': str,
'date': datetime.date,
'period': str,
'area': str,
'step': str,
}
# these parameters are required, failure to include them is an exception
__required_params__ = [
'dataset',
'variable',
'plot',
'period',
]
__periods__ = [
]
__variables__ = [
]
__plots__ = [
]
@classmethod
def parse(self, validate=True):
form = cgi.FieldStorage()
output = {}
for k, t in self.__form_params__.items():
if k not in form:
continue
v = form[k].value
# coerce the form values into the right types
if hasattr(self, 'parse_%s' % k):
v = getattr(self, 'parse_%s' % k)(v)
if not isinstance(v, t):
raise TypeError("Form parameter '%s' is of type %s, expected %s" %
(k, type(v), t))
else:
v = t(v)
# run validation
# FIXME: should this be done afterwards with the entire param set?
if validate and hasattr(self, 'validate_%s' % k):
try:
getattr(self, 'validate_%s' % k)(v)
except AssertionError, e:
raise ValidationError(e)
output[k] = v
# check for required
if validate:
for k in self.__required_params__:
if k not in output:
raise MissingParameter("Expected parameter '%s'" % k)
return output
@classmethod
def parse_date(self, p):
if len(p) == 8:
day = int(p[6:8])
elif len(p) == 6:
day = 1
else:
raise TypeError("Length of date must be 6 or 8, not %i" % len(p))
return datetime.date(int(p[0:4]), int(p[4:6]), day)
@classmethod
def validate_variable(self, p):
if not p in self.__variables__:
raise ValidationError("Unknown variable '%s'" % p)
@classmethod
def validate_plot(self, p):
if not p in self.__plots__:
raise ValidationError("Unknown plot type '%s'" % p)
@classmethod
def validate_period(self, p):
if not p in self.__periods__:
raise ValidationError("Unknown period '%s'" % p)
@classmethod
def validate_area(self, p):
if p not in regionConfig.regions:
raise ValidationError("Unknown area '%s'" % p)
def process(self, params):
raise NotImplemented
|
[
"sachindras@spc.int"
] |
sachindras@spc.int
|
3258e737d5e764ba89d735a746e74cee2d3960b6
|
c27f94e58f3b2cc6e3ae313d9860d24bebd55515
|
/src/training/dataprep/convert_to_parquet.py
|
b8699b0fbf8b1534457cd6f74be7f2865144f5b1
|
[
"Apache-2.0"
] |
permissive
|
mengdong/merlin-on-vertex
|
5cea099db676c932d776c0ca61b8bcbb1dbfbc66
|
0d8ac410f6afb01ff1b2ec70b51cb5caa28a66ad
|
refs/heads/main
| 2023-08-19T13:27:03.053988
| 2021-10-09T05:01:16
| 2021-10-09T05:01:16
| 415,205,910
| 0
| 0
|
Apache-2.0
| 2021-10-09T04:55:34
| 2021-10-09T04:55:33
| null |
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import glob
import logging
import nvtabular as nvt
import numpy as np
import os
import shutil
import time
import utils
CRITEO_FILE_RE = 'day_*'
def convert(args):
"""Converts Criteo TSV files to Parquet."""
# Create Dask cluster
client = utils.create_dask_cluster(
gpus=args.devices.split(','),
device_memory_fraction=args.device_limit_frac,
device_pool_fraction=args.device_pool_frac,
local_directory=args.dask_path,
protocol=args.protocol
)
# Specify column names
cont_names = ["I" + str(x) for x in range(1, 14)]
cat_names = ["C" + str(x) for x in range(1, 27)]
cols = ["label"] + cont_names + cat_names
# Specify column dtypes.
dtypes = {}
dtypes["label"] = np.int32
for x in cont_names:
dtypes[x] = np.int32
for x in cat_names:
dtypes[x] = "hex"
# Create an NVTabular Dataset from Criteo TSV files
file_list = glob.glob(os.path.join(args.input_path, CRITEO_FILE_RE))
dataset = nvt.Dataset(
file_list,
engine="csv",
names=cols,
part_mem_fraction=args.part_mem_frac,
sep="\t",
dtypes=dtypes,
client=client,
)
# Convert to Parquet
dataset.to_parquet(
output_path=args.output_path,
preserve_files=True,
)
def parse_args():
parser = argparse.ArgumentParser(description=("Multi-GPU Criteo Preprocessing"))
parser.add_argument(
"--input_path",
type=str,
help="A path to Criteo TSV files")
parser.add_argument(
"--output_path",
type=str,
help="A path to Criteo Parquet files")
parser.add_argument(
"--dask_path",
type=str,
help="A path to Dask working directory")
parser.add_argument(
"-d",
"--devices",
type=str,
help='Comma-separated list of visible devices (e.g. "0,1,2,3"). '
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "ucx"],
default="tcp",
type=str,
help="Communication protocol to use (Default 'tcp')",
)
parser.add_argument(
"--device_limit_frac",
default=0.7,
type=float,
help="Worker device-memory limit as a fraction of GPU capacity (Default 0.8). "
)
parser.add_argument(
"--device_pool_frac",
default=0.9,
type=float,
help="RMM pool size for each worker as a fraction of GPU capacity (Default 0.9). "
"The RMM pool frac is the same for all GPUs, make sure each one has enough memory size",
)
parser.add_argument(
"--num_io_threads",
default=0,
type=int,
help="Number of threads to use when writing output data (Default 0). "
"If 0 is specified, multi-threading will not be used for IO.",
)
parser.add_argument(
"--part_mem_frac",
default=0.125,
type=float,
help="Maximum size desired for dataset partitions as a fraction "
"of GPU capacity (Default 0.125)",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s')
logging.root.setLevel(logging.NOTSET)
logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('asyncio').setLevel(logging.WARNING)
args = parse_args()
convert(args)
|
[
"jarokaz@no-reply.com"
] |
jarokaz@no-reply.com
|
eb4e148f1f3df6fa2869bba533a6a3eea067e17f
|
fcc8102e3649ade30449e4ddb91536251b9896be
|
/code-1mwtt/ex12_bin_search.py
|
3a14fbc0aecb2f3208b9a905cdc67ca3a830f351
|
[
"MIT"
] |
permissive
|
shilpasayura/algohack
|
b8373da344acd5b78e92c34b748cabf3ab036463
|
74e88e558244464771a18d8ed5f466a0e2acec78
|
refs/heads/master
| 2023-04-10T07:17:31.626591
| 2023-03-17T15:50:58
| 2023-03-17T15:50:58
| 136,417,023
| 6
| 6
| null | 2018-09-15T16:27:54
| 2018-06-07T03:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
def bin_search(xs, x):
# return the least index of an element equal to x sorted x
low = 0
high = len(xs)
while low + 1 < high:
guess = (low + high) // 2
if x < xs[guess]:
high = guess
else:
low = guess
return low
def test(test_case_xs, test_case_x, expected):
actual = bin_search(test_case_xs, test_case_x)
if actual == expected:
print("Passed test for " + test_case_x)
else:
print("Didn't pass test for " + test_case_x)
print("The result was " + str(actual) + " but it should have been " + str(expected))
test([], "x", 0)
test(["code", "learn", "to"], "code", 0)
test(["code", "learn", "to"], "learn", 1)
test(["code", "learn", "to"], "to", 2)
sentence = "A brownish cloud descends every Friday, growing, hovering impressively, jeopardously keeping low, moving nimbly over populated quarters, returning silently to unknown, violently wild xylogenic yttriferous zones."
words = sentence.lower().split(" ")
for i in range(0, len(words)):
test(words, words[i], i)
|
[
"shilpasayura@gmail.com"
] |
shilpasayura@gmail.com
|
8ae9c07b59c89ec6ed4ff6ddaa00edd00dcc9bfe
|
e96ed9d92cea007f98b03b7213b5daab2e3f33ea
|
/World.py
|
06873071bd363a15c3efbe4a7c574d1187f9ce1f
|
[] |
no_license
|
SterlingPeet/PyBioSim
|
70a4add7fda16db1427bfbdde169c512199beb0c
|
b86fdff97f42cde661bb5bef584cb7e9eeaee067
|
refs/heads/master
| 2021-01-15T19:13:55.579793
| 2015-03-05T22:17:34
| 2015-03-05T22:17:34
| 30,716,717
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
'''
Created on Jan 16, 2015
@author: Arindam
'''
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
import os
class World(object):
def __init__(self, width, height):
self.agents=[]
self.obstacles=[]
self.balls=[]
self.width = width
self.height = height
def draw(self, ax):
ax.set_xlim3d(-self.width,self.width)
ax.set_ylim3d(-self.width,self.width)
ax.set_zlim3d(-self.height,self.height)
for agent in self.agents:
agent.draw(ax)
for obstacle in self.obstacles:
obstacle.draw(ax)
for ball in self.balls:
ball.draw(ax)
return ax
|
[
"arindam.b@gatech.edu"
] |
arindam.b@gatech.edu
|
c383feae171c9937de1ed0b92f573b81edbc2d96
|
eb69eff59cfee055cdb831c886a98b27fc2c03df
|
/count_words_in_books/without_asyncio.py
|
49e47f2f0c75dec7861d944c1d9556c8bf4bd7ee
|
[] |
no_license
|
overfl0/depcon_asyncio
|
eb1f1600a727828f3b04b7d14184c3cf4381df5c
|
09ba2318bdc866a83550530ecba5f81d2a411370
|
refs/heads/master
| 2021-07-01T14:53:34.508406
| 2019-12-06T10:04:49
| 2019-12-06T10:04:49
| 226,342,907
| 0
| 0
| null | 2021-02-26T02:42:29
| 2019-12-06T14:07:07
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
from count_words_in_books.common import links_wl
from urllib.request import urlopen
def count_words(text):
wordcount = {}
for word in text.split():
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
def main():
for link in links_wl:
data = urlopen(link).read()
count_words(data)
|
[
"lukaszchojnacki2@gmail.com"
] |
lukaszchojnacki2@gmail.com
|
d67673100f1b976c27b6772711072121e573ee42
|
0ff58efd704400dd8ed21b1ce5781b93be7ddc80
|
/wiki_split_3.py
|
a7f55758bb2c5badc8ed28ab64d237b19b1bdecf
|
[
"MIT"
] |
permissive
|
danieleandreatta/wiki_page_stats
|
452ff39b747a18c008e8ed989ea1f064e295513f
|
02f342f4bf73497ffc3ca1fc2c163eb1627e72b0
|
refs/heads/master
| 2021-01-19T21:28:22.494149
| 2015-02-28T10:44:53
| 2015-02-28T10:44:53
| 29,706,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
#!/usr/bin/python3.4
import time
import sys
t0 = time.clock()
pages = []
with open(sys.argv[1], 'rb') as f:
for line in f:
ws = line.split()
if ws[0] == b'en' and int(ws[2]) > 500:
pages.append((ws[1].decode('ascii'), int(ws[2])))
pages.sort(reverse=True, key=lambda x:x[1])
t1 = time.clock()
print('Query took %.2f seconds' % (t1-t0))
for i in range(min(10, len(pages))):
print('%s (%d)' % pages[i])
|
[
"d_andreatta@hotmail.com"
] |
d_andreatta@hotmail.com
|
361dcd2a8a5f84b0725ec737cdeb87b13b46a64e
|
1207c58fa92dad30050b9f3bcc1173d7e7034c73
|
/mnist_tools.py
|
127a5ccc739ea8502a23197d9b85c1bbf5fe0bd9
|
[] |
no_license
|
chagge/rethinking-generalization
|
b49cf59c8d4d2c3607fa2074a80f86d8e682150c
|
317c1ae29ae119d7399e8e04e95eb903f4d1c045
|
refs/heads/master
| 2021-01-22T09:04:55.449746
| 2017-02-12T16:41:09
| 2017-02-12T16:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
# -*- coding: utf-8 -*-
import gzip, os, six, sys
from six.moves.urllib import request
from PIL import Image
import numpy as np
parent = "http://yann.lecun.com/exdb/mnist"
train_images_filename = "train-images-idx3-ubyte.gz"
train_labels_filename = "train-labels-idx1-ubyte.gz"
test_images_filename = "t10k-images-idx3-ubyte.gz"
test_labels_filename = "t10k-labels-idx1-ubyte.gz"
n_train = 60000
n_test = 10000
dim = 28 * 28
def load_mnist(data_filename, label_filename, num):
images = np.zeros(num * dim, dtype=np.uint8).reshape((num, dim))
label = np.zeros(num, dtype=np.uint8).reshape((num, ))
with gzip.open(data_filename, "rb") as f_images, gzip.open(label_filename, "rb") as f_labels:
f_images.read(16)
f_labels.read(8)
for i in six.moves.range(num):
label[i] = ord(f_labels.read(1))
for j in six.moves.range(dim):
images[i, j] = ord(f_images.read(1))
if i % 100 == 0 or i == num - 1:
sys.stdout.write("\rloading images ... ({} / {})".format(i + 1, num))
sys.stdout.flush()
sys.stdout.write("\n")
return images, label
def load_train_images():
if not os.path.exists("../" + train_images_filename):
download_mnist_data()
images, labels = load_mnist("../" + train_images_filename, "../" + train_labels_filename, n_train)
return images, labels
def load_test_images():
if not os.path.exists("../" + test_images_filename):
download_mnist_data()
images, labels = load_mnist("../" + test_images_filename, "../" + test_labels_filename, n_test)
return images, labels
def download_mnist_data():
print("Downloading {} ...".format(train_images_filename))
request.urlretrieve("{}/{}".format(parent, train_images_filename), "../" + train_images_filename)
print("Downloading {} ...".format(train_labels_filename))
request.urlretrieve("{}/{}".format(parent, train_labels_filename), "../" + train_labels_filename)
print("Downloading {} ...".format(test_images_filename))
request.urlretrieve("{}/{}".format(parent, test_images_filename), "../" + test_images_filename)
print("Downloading {} ...".format(test_labels_filename))
request.urlretrieve("{}/{}".format(parent, test_labels_filename), "../" + test_labels_filename)
print("Done")
def extract_bitmaps():
train_dir = "train_images"
test_dir = "test_images"
try:
os.mkdir(train_dir)
os.mkdir(test_dir)
except:
pass
data_train, label_train = load_test_images()
data_test, label_test = load_test_images()
print "Saving training images ..."
for i in xrange(data_train.shape[0]):
image = Image.fromarray(data_train[i].reshape(28, 28))
image.save("{}/{}_{}.bmp".format(train_dir, label_train[i], i))
print "Saving test images ..."
for i in xrange(data_test.shape[0]):
image = Image.fromarray(data_test[i].reshape(28, 28))
image.save("{}/{}_{}.bmp".format(test_dir, label_test[i], i))
|
[
"musyoku@users.noreply.github.com"
] |
musyoku@users.noreply.github.com
|
eefab490fac85d434563527a62d7c61c21cdedd4
|
501615c82801733e69c7447ab9fd68d3883ed947
|
/hotfix/mz_platform/services/functions/career_course_service.py
|
a590aa114a064a4fa0554f9cf3c981374b55dd5d
|
[] |
no_license
|
az0ne/python
|
b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee
|
aec5d23bb412f7dfca374fb5c5b9988c1b817347
|
refs/heads/master
| 2021-07-18T02:08:46.314972
| 2017-10-27T06:23:36
| 2017-10-27T06:23:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,828
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'changfu'
from mz_platform.services.functions.mz_service import MZService, Orm2Str
from mz_platform.orms.course_models import CareerCourse
from mz_platform.orms.mz_common import CareerObjRelation
from mz_platform.orms.course_models import Course
from mz_platform.orms.mz_user import UserProfile
from mz_platform.orms.mz_article import Article
from mz_platform.orms.mz_common import CareerAd
from mz_platform.orms.course_models import Lesson
class CareerCourseService(MZService):
""" 职业课程service
"""
factory_functions = dict(career_course=CareerCourse)
def __init__(self):
"""
@brief
@return:
"""
super(CareerCourseService, self).__init__()
def get_related_objects(self, career_course_id, obj_type='COURSE', ad_type='COURSE', is_active=True,
what='*', conditions=None, limit=None, order_by=None):
"""
@brief 获取职业课程所相关的objects, 如课程,文章,老师,视频, 广告
@param career_course_id: 职业课程的id
@param obj_type: objects的类型,可选为'COURSE', 'ARTICLE', 'TEACHER', 'LESSON', 'CAREERAD', 默认为'COURSE'
@param ad_type: 广告的类型,指明广告的位置.当ob_type='CAREERAD'时,该参数有效.可选值为'COURSE', 'ARTICLE', 默认为'COURSE'
@param is_active: True or False, 如果为True,则返回激活的课程,如果为False, 返回全部课程, 默认为True
@param what:
@param conditions:
@param limit:
@param order_by:
@return: 课程信息字典的列表
@note example1:
get_related_objects(career_course_id=5, obj_type='COURSE', is_activate=False, order_by='-id',
what='id', conditions=['id>10'], limit='9, 14')
获取id为5的职业课程的全部相关小课程(is_active=False)的id字段,条件为小课程的id大于10.从结果的第9条开始取,取14条,并按id字段降序排列
example2:
get_related_objects(career_course_id=5, obj_type='CAREERAD', ad_type='ARTICLE', is_activate=False, order_by='-id',
what='id', limit='9, 14')
获取id为5的职业课程的文章详情页的广告的(is_active=False)的id字段。从结果的第9条开始取,取14条,并按id字段降序排列
"""
relation_map = dict(CAREERAD=([(CareerAd, ('career_id', 'type', 'is_actived'), (career_course_id, ad_type, 1 if is_active else 0))],
CareerAd,
None),
COURSE=([(CareerObjRelation, ('career_id', 'is_actived', 'obj_type'), (career_course_id, 1 if is_active else 0, 'COURSE'))],
Course,
[('inner', (CareerObjRelation, Course), ('obj_id', 'id'))]),
ARTICLE=([(CareerObjRelation, ('career_id', 'is_actived', 'obj_type'), (career_course_id, 1 if is_active else 0, 'ARTICLE'))],
Article,
[('inner', (CareerObjRelation, Article), ('obj_id', 'id'))]),
TEACHER=(),
LESSON=())
more_conditions, where, join = relation_map[obj_type.upper()]
data = Orm2Str.orm2str(what=what,
where=where,
join=join,
conditions=conditions,
more_conditions=more_conditions,
limit=limit,
order_by=order_by)
return self.db.select(**data)
|
[
"1461847795@qq.com"
] |
1461847795@qq.com
|
14f82a34f2211b09f1441103c7218d080c5601e0
|
5226b3ed1ecbafae65859049289447a1681215bd
|
/split_every_page.py
|
eaa611af6948c4df9a0837ff62ba9536961a793a
|
[
"Apache-2.0"
] |
permissive
|
ClemensBasler/python-pdf-test
|
35dfca34cabe3a674e092003d0bae43c05a16622
|
02a5e397c5bca05a80cb7de2250ebacd6e889ae8
|
refs/heads/master
| 2020-04-29T17:44:43.224042
| 2019-03-18T14:38:22
| 2019-03-18T14:38:22
| 176,305,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# pdf_splitter.py
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
def pdf_splitter(path):
fname = os.path.splitext(os.path.basename(path))[0]
pdf = PdfFileReader(path)
for page in range(pdf.getNumPages()):
pdf_writer = PdfFileWriter()
pdf_writer.addPage(pdf.getPage(page))
output_filename = '{}_page_{}.pdf'.format(
fname, page+1)
with open(output_filename, 'wb') as out:
pdf_writer.write(out)
print('Created: {}'.format(output_filename))
if __name__ == '__main__':
path = 'input/test.pdf'
pdf_splitter(path)
|
[
"clemens-basler@t-online.de"
] |
clemens-basler@t-online.de
|
7178f8ac1e0922e8cfe3a8096f3a33236d592e8a
|
6be1cdfea8c3b2fe73f9ba28da0f70c906a22ed3
|
/algorithm/_abc/iterable/genetic.py
|
f946a449dd8e4377406af25a855bbb1dc50c3297
|
[] |
no_license
|
chivdan/evoguess
|
63b52c7a376cf374994c87253726ee303ec0a2e7
|
2294dfe29369998786dd0b79b311c3efc385c895
|
refs/heads/master
| 2023-08-02T16:43:49.153184
| 2021-10-08T13:21:46
| 2021-10-08T13:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from .evolution import *
class Genetic(Evolution):
tweak_chunk_size = 2
name = 'Algorithm(Iterable): Genetic'
def __init__(self, crossover, *args, **kwargs):
self.crossover = crossover
super().__init__(*args, **kwargs)
def tweak(self, selected: Population):
raise NotImplementedError
def join(self, parents: Population, children: Population):
raise NotImplementedError
def __info__(self):
return {
**super().__info__(),
'crossover': self.crossover.__info__()
}
__all__ = [
'Genetic',
'Population',
]
|
[
"art-pavlenko@mail.ru"
] |
art-pavlenko@mail.ru
|
8ddf2cf2991093f5f0b31aa38198e71470511241
|
54a73236923028a91d0427daeacfa44a82ae7e68
|
/pytests/blob1.py
|
581e83ca9370be5ad8af01615aebe2a449af84cf
|
[] |
no_license
|
pbxrshd/transfer
|
85dd3de93be73ad75ea17c52facf57a5fa47c04f
|
15aed0f5087e77e048eb97af1be464a637e5eabc
|
refs/heads/master
| 2016-09-05T15:45:48.175922
| 2015-07-09T13:46:20
| 2015-07-09T13:46:20
| 27,556,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,810
|
py
|
#!/usr/bin/python
import cv
import time
import serial
from scipy import *
from scipy.cluster import vq
import numpy
import sys, os, random, hashlib
from math import *
top = 0
bottom = 1
left = 0
right = 1
def merge_collided_bboxes( bbox_list ):
for this_bbox in bbox_list:
# Collision detect every other bbox:
for other_bbox in bbox_list:
if this_bbox is other_bbox: continue # Skip self
# Assume a collision to start out with:
has_collision = True
if (this_bbox[bottom][0]*1.1 < other_bbox[top][0]*0.9): has_collision = False
if (this_bbox[top][0]*.9 > other_bbox[bottom][0]*1.1): has_collision = False
if (this_bbox[right][1]*1.1 < other_bbox[left][1]*0.9): has_collision = False
if (this_bbox[left][1]*0.9 > other_bbox[right][1]*1.1): has_collision = False
if has_collision:
# merge these two bboxes into one, then start over:
top_left_x = min( this_bbox[left][0], other_bbox[left][0] )
top_left_y = min( this_bbox[left][1], other_bbox[left][1] )
bottom_right_x = max( this_bbox[right][0], other_bbox[right][0] )
bottom_right_y = max( this_bbox[right][1], other_bbox[right][1] )
new_bbox = ( (top_left_x, top_left_y), (bottom_right_x, bottom_right_y) )
bbox_list.remove( this_bbox )
bbox_list.remove( other_bbox )
bbox_list.append( new_bbox )
# Start over with the new list:
return merge_collided_bboxes( bbox_list )
# When there are no collions between boxes, return that list:
return bbox_list
def detect_faces( image, haar_cascade, mem_storage ):
faces = []
image_size = cv.GetSize( image )
faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( image_size[0]/10, image_size[1]/10) )
for face in faces:
box = face[0]
cv.Rectangle(image, ( box[0], box[1] ),
( box[0] + box[2], box[1] + box[3]), cv.RGB(255, 0, 0), 1, 8, 0)
class Target:
def __init__(self):
try:
self.ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
self.have_eye = 1
except:
print " may want to plug something into /dev/ttyUSB0"
self.have_eye = 0
self.joymap = [1,2,3,4] # link between joystick and the servo to move
self.joyreverse = [0,0,0,0,0]
fps=6
is_color = True
self.capture = cv.CaptureFromCAM(0)
cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 );
cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 );
frame = cv.QueryFrame(self.capture)
frame_size = cv.GetSize(frame)
self.writer = None
frame = cv.QueryFrame(self.capture)
cv.NamedWindow("Target", 1)
def ServoMove(self, servo, angle):
servo = self.joymap[servo]
if self.joyreverse[servo]:
angle = 180 - angle
if (0 <= angle <= 180):
self.ser.write(chr(255))
self.ser.write(chr(servo))
self.ser.write(chr(angle))
else:
print "Servo angle must be an integer between 0 and 180.\n"
def run(self):
frame = cv.QueryFrame( self.capture )
frame_size = cv.GetSize( frame )
# Capture the first frame from webcam for image properties
display_image = cv.QueryFrame( self.capture )
# Greyscale image, thresholded to create the motion mask:
grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )
# The RunningAvg() function requires a 32-bit or 64-bit image...
running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
# ...but the AbsDiff() function requires matching image depths:
running_average_in_display_color_depth = cv.CloneImage( display_image )
# RAM used by FindContours():
mem_storage = cv.CreateMemStorage(0)
# The difference between the running average and the current frame:
difference = cv.CloneImage( display_image )
target_count = 1
last_target_count = 1
last_target_change_t = 0.0
k_or_guess = 1
codebook=[]
frame_count=0
last_frame_entity_list = []
t0 = time.time()
# For toggling display:
image_list = [ "display", "difference", "threshold", "camera", "faces"]
image_index = 0 # Index into image_list
# Prep for text drawing:
text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
text_coord = ( 5, 15 )
text_color = cv.CV_RGB(255,255,255)
haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt.xml' )
max_targets = 3
while True:
camera_image = cv.QueryFrame( self.capture )
frame_count += 1
frame_t0 = time.time()
# Create an image with interactive feedback:
display_image = cv.CloneImage( camera_image )
# Create a working "color image" to modify / blur
color_image = cv.CloneImage( display_image )
# Smooth to get rid of false positives
cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 )
# Use the Running Average as the static background
# a = 0.020 leaves artifacts lingering way too long.
# a = 0.320 works well at 320x240, 15fps. (1/a is roughly num frames.)
cv.RunningAvg( color_image, running_average_image, 0.420, None )
# Convert the scale of the moving average.
cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )
# Subtract the current frame from the moving average.
cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )
# Convert the image to greyscale.
cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
# Threshold the image to a black and white motion mask:
cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
# Smooth and threshold again to eliminate "sparkles"
cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )
cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY )
cv.Dilate(grey_image, grey_image, None, 18)
cv.Erode(grey_image, grey_image, None, 20)
grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
non_black_coords_array = numpy.where( grey_image_as_array > 3 )
# Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )
points = [] # Was using this to hold either pixel coords or polygon coords.
bounding_box_list = []
# Now calculate movements using the white pixels as "motion" data
contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )
while contour:
bounding_rect = cv.BoundingRect( list(contour) )
point1 = ( bounding_rect[0], bounding_rect[1] )
point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )
bounding_box_list.append( ( point1, point2 ) )
polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )
# To track polygon points only (instead of every pixel):
#points += list(polygon_points)
# Draw the contours:
levels = 0
cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
#cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
contour = contour.h_next()
# Find the average size of the bbox (targets), then
# remove any tiny bboxes (which are prolly just noise).
# "Tiny" is defined as any box with 1/10th the area of the average box.
# This reduces false positives on tiny "sparkles" noise.
box_areas = []
for box in bounding_box_list:
box_width = box[right][0] - box[left][0]
box_height = box[bottom][0] - box[top][0]
box_areas.append( box_width * box_height )
#cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
average_box_area = 0.0
if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)
trimmed_box_list = []
for box in bounding_box_list:
box_width = box[right][0] - box[left][0]
box_height = box[bottom][0] - box[top][0]
# Only keep the box if it's not a tiny noise box:
if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )
# Draw the trimmed box list:
#for box in trimmed_box_list:
# cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
bounding_box_list = merge_collided_bboxes( trimmed_box_list )
# Draw the merged box list:
for box in bounding_box_list:
cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )
# Here are our estimate points to track, based on merged & trimmed boxes:
estimated_target_count = len( bounding_box_list )
if frame_t0 - last_target_change_t < .650: # 1 change per 0.35 secs
estimated_target_count = last_target_count
else:
if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
last_target_change_t = frame_t0
# Clip to the user-supplied maximum:
estimated_target_count = min( estimated_target_count, max_targets )
points = non_black_coords_array
center_points = []
if len(points):
k_or_guess = max( estimated_target_count, 1 ) # Need at least one target to look for.
if len(codebook) == estimated_target_count:
k_or_guess = codebook
#points = vq.whiten(array( points )) # Don't do this! Ruins everything.
codebook, distortion = vq.kmeans( array( points ), k_or_guess )
# Convert to tuples (and draw it to screen)
for center_point in codebook:
center_point = ( int(center_point[0]), int(center_point[1]) )
center_points.append( center_point )
trimmed_center_points = []
removed_center_points = []
for box in bounding_box_list:
# Find the centers within this box:
center_points_in_box = []
for center_point in center_points:
if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :
# This point is within the box.
center_points_in_box.append( center_point )
# Now see if there are more than one. If so, merge them.
if len( center_points_in_box ) > 1:
# Merge them:
x_list = y_list = []
for point in center_points_in_box:
x_list.append(point[0])
y_list.append(point[1])
average_x = int( float(sum( x_list )) / len( x_list ) )
average_y = int( float(sum( y_list )) / len( y_list ) )
trimmed_center_points.append( (average_x, average_y) )
# Record that they were removed:
removed_center_points += center_points_in_box
if len( center_points_in_box ) == 1:
trimmed_center_points.append( center_points_in_box[0] ) # Just use it.
# If there are any center_points not within a bbox, just use them.
# (It's probably a cluster comprised of a bunch of small bboxes.)
for center_point in center_points:
if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
trimmed_center_points.append( center_point )
# Determine if there are any new (or lost) targets:
actual_target_count = len( trimmed_center_points )
last_target_count = actual_target_count
# Now build the list of physical entities (objects)
this_frame_entity_list = []
# An entity is list: [ name, color, last_time_seen, last_known_coords ]
for target in trimmed_center_points:
# Is this a target near a prior entity (same physical entity)?
entity_found = False
entity_distance_dict = {}
for entity in last_frame_entity_list:
entity_coords= entity[3]
delta_x = entity_coords[0] - target[0]
delta_y = entity_coords[1] - target[1]
distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
entity_distance_dict[ distance ] = entity
# Did we find any non-claimed entities (nearest to furthest):
distance_list = entity_distance_dict.keys()
distance_list.sort()
for distance in distance_list:
# Yes; see if we can claim the nearest one:
nearest_possible_entity = entity_distance_dict[ distance ]
if nearest_possible_entity in this_frame_entity_list:
continue
# Found the nearest entity to claim:
entity_found = True
nearest_possible_entity[2] = frame_t0 # Update last_time_seen
nearest_possible_entity[3] = target # Update the new location
this_frame_entity_list.append( nearest_possible_entity )
break
if entity_found == False:
# It's a new entity.
color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
last_time_seen = frame_t0
new_entity = [ name, color, last_time_seen, target ]
this_frame_entity_list.append( new_entity )
# Now "delete" any not-found entities which have expired:
entity_ttl = 1.0 # 1 sec.
ent_count = 0
for entity in last_frame_entity_list:
last_time_seen = entity[2]
if frame_t0 - last_time_seen > entity_ttl:
pass
else:
# Save it for next time... not expired yet:
this_frame_entity_list.append( entity )
ent_count += 1
# For next frame:
last_frame_entity_list = this_frame_entity_list
# Draw the found entities to screen:
count = 0
if ent_count != 0:
entity = this_frame_entity_list[0]
center_point = entity[3]
c = entity[1] # RGB color tuple
# print '%s %d %d %d' % (entity[0], count, center_point[0], center_point[1])
cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
cv.Circle(display_image, center_point, 5, cv.CV_RGB(c[0], c[1], c[2]), 3)
text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
text_coord = ( 5, 15 )
text_color = cv.CV_RGB(255,255,255)
x = 50 + (center_point[0] * 80 / 320)
y = 20 + (center_point[1] * 80 / 240)
if self.have_eye:
self.ServoMove(0, int(x))
self.ServoMove(1, int(y))
s = '%3.0d %3.0d' % (x, y)
cv.PutText(display_image, str(s), text_coord, text_font, text_color )
#print "min_size is: " + str(min_size)
# Listen for ESC or ENTER key
c = cv.WaitKey(7) % 0x100
if c == 27 or c == 10:
break
# Toggle which image to show
if chr(c) == 'd':
image_index = ( image_index + 1 ) % len( image_list )
image_name = image_list[ image_index ]
# Display frame to user
if image_name == "display":
image = display_image
# cv.PutText( image, "AABBs and contours", text_coord, text_font, text_color )
elif image_name == "camera":
image = camera_image
cv.PutText( image, "No overlay", text_coord, text_font, text_color )
elif image_name == "difference":
image = difference
cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
elif image_name == "faces":
# Do face detection
detect_faces( camera_image, haar_cascade, mem_storage )
image = camera_image # Re-use camera image here
cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
elif image_name == "threshold":
# Convert the image to color.
cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
image = display_image # Re-use display image here
cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
cv.ShowImage( "Target", image )
if self.writer:
cv.WriteFrame( self.writer, image );
frame_t1 = time.time()
t1 = time.time()
time_delta = t1 - t0
processed_fps = float( frame_count ) / time_delta
print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )
if __name__=="__main__":
t = Target()
t.run()
|
[
"rshdpbx@gmail.com"
] |
rshdpbx@gmail.com
|
b57452e38a1139dde3005b8cd198b2ac97024466
|
ca19d3793ac651292a04351aa5328fc6035afe6a
|
/함수버튼연습01.py
|
24076841df4dd2123ba5c7121e2282cef6a58441
|
[] |
no_license
|
lyricalzy/python
|
fd490ab068d290bf94812927ae92aa50ab56f50f
|
cb0e2d712f2a14e34fbe654ba68981623af544dc
|
refs/heads/master
| 2021-01-02T02:10:39.128811
| 2020-02-18T07:29:09
| 2020-02-18T07:29:09
| 239,447,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
from tkinter import *
def file_write():
print("저장")
fruit = ["사과", "바나나", "배"]
# 파일에 저장
fruit_file = open("fruit.txt", "w")
for x in fruit:
fruit_file.write(x + "\n")
fruit_file.close()
def file_write2():
print("저장2")
fruit2 = []
for i in range(0, 5):
fruit2.append(input("과일 입력: "))
print(fruit2)
# 파일에 저장
# fruit_file = open("fruit.txt", "w")
# for x in fruit:
# fruit_file.write(x + "\n")
# fruit_file.close()
def file_read():
print("읽기")
file = open("fruit.txt", "r")
for j in range(0,3):
temp = file.readline()
data = temp.strip()
print(data)
file.close()
w = Tk() # 프레임을 만들어 주는 함수
w.geometry("200x150")
button1 = Button(w, text="저장", bg="green", font=("새 굴림", 30), fg="white", command=file_write)
button1.pack()
button2 = Button(w, text="읽기", bg="green", font=("새 굴림", 30), fg="white", command=file_read)
button2.pack()
w.mainloop()
|
[
"noreply@github.com"
] |
lyricalzy.noreply@github.com
|
fe4f0bb60158fead685319319500402ec7300cde
|
9719df2dc131aa1189acef7273bee090290becd6
|
/Chapter 12/E2.py
|
498cf9b3da0bb1ac95ec433c8cbb8d026b8b1f4e
|
[
"MIT"
] |
permissive
|
hanzhi713/thinkcs-python3-solutions
|
df72e9d76779a5ffb9a8f9a9316c672a021feece
|
290b34df7d9c7f45daebd1af6017a03828ec8eb4
|
refs/heads/master
| 2020-03-31T10:03:03.301775
| 2018-10-08T17:41:10
| 2018-10-08T17:41:10
| 152,120,690
| 13
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import math
help(math)
# (a)
# 44
# (b)
# math.ceil(x)
''' Return the ceiling of x as an Integral
This is the smallest integer >= x.'''
# math.floor(x)
'''Return the floor of x as an Integral
This is the largest integer <=x '''
# (c) By using Newton's algorithm
'''def sqrt(n):
approx = n/2.0
while True:
better = (approx + n/approx)/2.0
if abs(approx - better) < 0.001:
return better
approx = better'''
# (d)
math.pi
math.e
|
[
"hanzhi713@163.com"
] |
hanzhi713@163.com
|
f3dd6f0fa00130a093838604fbcd31efa45166f1
|
d5e4953aae14a27fed713bc2fe21ee41df9c2f92
|
/current_omega.py
|
89af93f5344a07989d76afd9a04a72185dd7fbf3
|
[
"BSD-2-Clause"
] |
permissive
|
j-silver/quantum_dots
|
c7714822741f0b0807ec254a475ddcbf7d2322d4
|
54132a3c7dd0e83e27375f6c5f6ec154065a9695
|
refs/heads/master
| 2021-01-22T12:12:05.048203
| 2017-02-13T17:29:14
| 2017-02-13T17:29:14
| 26,187,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
#fig.suptitle('Asymptotic current as a function of the frequency $\Omega$')
plt.xlabel('$\Omega/\\Delta$')
plt.ylabel('$I/I_0$')
plt.xscale('log')
plt.grid('on')
#plt.text(0.005, 0.15, '$\\frac{\kappa_B T}{\hbar\Delta}=1$\n$\\alpha=0.005$', bbox={'facecolor':'white','alpha':0.8})
R_CURRT = (np.loadtxt('RED-STAT-CURR-O.dat')).T
C_CURRT = (np.loadtxt('CP-STAT-CURR-O.dat')).T
rfig = plt.plot(R_CURRT[0], R_CURRT[1], color='red', label='Redfield dynamics')
cfig = plt.plot(C_CURRT[0], C_CURRT[1], color='blue', label='CP dynamics')
plt.legend(('Redfield dynamics', 'CP dynamics'))
plt.show()
|
[
"giuseppe.argentieri@ts.infn.it"
] |
giuseppe.argentieri@ts.infn.it
|
0f253f10f1a06bb12051b106aaf606e8818c0a2f
|
ad11d4b949d480054c3d6259acca0aed6aa809c7
|
/07-string-manipulation.py
|
c157469dfa0010c72f7f45d77b6f771e016c4bd6
|
[] |
no_license
|
kamranajabbar/python3_practice
|
08263e3a0a49131b38f813c6e79593f8b26bb108
|
312b3225d11cb22b1358185d9b91d971ecdf865d
|
refs/heads/master
| 2023-03-06T07:33:55.471101
| 2021-02-03T13:40:51
| 2021-02-03T13:40:51
| 276,912,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
#Chapter # 24 String changing case
name = "Kamran"
print("String in Upper Case : ", name.upper())
print("String in Lower Case : ", name.lower())
print("String in Capitale Case : ", name.capitalize())
print("String in Title Case : ", name.title())
str = "hello"
print(str[:2])
string = "my name is x"
for i in string.split():
print(i, end=", ")
print("\n")
x=1
y=2
z=x
x=y
y=z
print(x,y)
A = 16
B = 15
print(A % B // A)
print("-------- Q14 -----")
for i in range(10):
if i == 5:
break
else:
print(i)
else:
print("Here")
print("-------- Q15 -----")
list1 = [1, 3, 2]
print(list1 * 2)
print("-------- Q16 -----")
a = {}
a[1] = 1
a['1'] = 2
a[1] = a[1]+1
count = 0
for i in a:
count += a[i]
print(count)
print("-------- Q17 -----")
numbers = [1,2,3,4]
numbers.append([5,6,7,8])
print(len(numbers))
print("-------- Q18 -----")
names = ['Amir','Bear','Chariton','Daman']
print(names[-1][-1])
print("-------- Q19 -----")
names = [1,2,3,4]
print(names[-3:-2])
print("-------- Q20 -----")
abc = {"KJ":78}
xyz = {"AJ":156}
print(abc == xyz)
print("-------- Q22 -----")
x = 1 / 2.0 + 3//3 + 4 ** 1
print(x)
print("-------- Q23 -----")
a = [11,2,23]
b = [11,2,2]
print(a < b)
print("-------- Q24 -----")
a = {1:5,2:3,3:4}
a.pop(3)
print(a)
print("-------- Q27 -----")
abc = {"KJ":40, "AJ":45}
print("KJ" in abc)
print("-------- Q29 -----")
s1 = [3,4]
s2 = [1,2]
s3 = list()
i=0
j=0
for i in s1:
for j in s2:
s3.append((i,j))
i +=1
j +=1
print(s3)
|
[
"kamranajabbar@gmail.com"
] |
kamranajabbar@gmail.com
|
0528d2db08b2589fe1bc93ac70b28626a3b8a954
|
a6d1b9bccdf8b06c7623c786c67d04ca15d3a69b
|
/CCNP1.py
|
72b524e619673b03e67cb555737ddc556b56fbd8
|
[] |
no_license
|
amrkhailr/cbtnuggets
|
f6a584aa4f05dc53af213acfc202fc5caf51bac8
|
d43a52ae72e3725e2eb6b8776ba609b161dca34a
|
refs/heads/master
| 2022-09-16T02:45:34.564816
| 2020-05-30T04:38:48
| 2020-05-30T04:38:48
| 267,768,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
#my_list = [1,2,3,4]
#my_list[2] = 45
#A = my_list *3
#print(A)
#my_list = [1024, 3, True, 6.5]
#my_list.append(False)
#print(my_list)
import datetime
def greet():
dt = datetime.datetime.now()
if dt.hour <= 11 :
greeting = 'morning'
elif dt.hour <= 17:
greeting = 'afternoon'
else:
greeting = 'night'
print("Hello, good",greeting)
greet()
|
[
"romal.amarkhail@gmail.com"
] |
romal.amarkhail@gmail.com
|
67a6acbe758dcaaf23938c3fc0d98b472cb4158a
|
bbc972524d9f0ccf95fbac30de2f7a7c0a1badf2
|
/tests/unit2/test_physics_engine2.py
|
3471b8c35fa6908f0a23c110a0d89d792ff38987
|
[
"MIT"
] |
permissive
|
JFincher42/arcade
|
e0049b84b0317a0fd9eee3702e4e6e843cc2ebac
|
f9eebfc4c6989e0e99d7b6dfe0409f248bfd5a44
|
refs/heads/master
| 2021-06-29T20:41:31.850895
| 2021-03-18T14:43:01
| 2021-03-18T14:43:01
| 226,012,595
| 0
| 0
|
NOASSERTION
| 2021-03-18T14:45:09
| 2019-12-05T04:00:26
|
Python
|
UTF-8
|
Python
| false
| false
| 10,576
|
py
|
""" Physics engine tests. """
import arcade
OUT_OF_THE_WAY = (250, 250)
def basic_tests(moving_sprite, wall_list, physics_engine):
""" Run basic tests that can be done by both engines. """
wall_sprite_1 = wall_list[0]
wall_sprite_2 = wall_list[1]
wall_sprite_2.position = OUT_OF_THE_WAY
# --- Collisions between a moving sprite and one wall block
# Two non-moving sprites side-by-side
wall_sprite_1.position = (10, 0)
wall_sprite_1.angle = 0
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
assert moving_sprite.position == (0, 0)
assert len(collisions) == 0
# Move up to wall
wall_sprite_1.position = (11, 0)
moving_sprite.position = (0, 0)
moving_sprite.change_x = 1
moving_sprite.change_y = 0
collisions = physics_engine.update()
assert moving_sprite.position == (1, 0)
assert len(collisions) == 0
# Move into wall going left to right
for speed in range(2, 10):
wall_sprite_1.position = (11, 0)
moving_sprite.position = (0, 0)
moving_sprite.change_x = speed
moving_sprite.change_y = 0
collisions = physics_engine.update()
assert(moving_sprite.position == (1, 0))
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
# Move into wall going right to left
for speed in range(2, 10):
wall_sprite_1.position = (-11, 0)
moving_sprite.position = (0, 0)
moving_sprite.change_x = -speed
moving_sprite.change_y = 0
collisions = physics_engine.update()
assert(moving_sprite.position == (-1, 0))
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
# Move into wall going downwards
for speed in range(2, 10):
wall_sprite_1.position = (0, -11)
moving_sprite.position = (0, 0)
moving_sprite.change_x = 0
moving_sprite.change_y = -speed
collisions = physics_engine.update()
assert(moving_sprite.position == (0, -1))
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
# Move into wall going up
for speed in range(2, 10, 1):
wall_sprite_1.position = (0, 11)
moving_sprite.position = (0, 0)
moving_sprite.change_x = 0
moving_sprite.change_y = speed
collisions = physics_engine.update()
assert(moving_sprite.position == (0, 1))
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
# --- Check rotating collision
# - Rotate, with block to the right
# Check rotation one degree
wall_sprite_1.position = (10, 0)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 1
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (-1, 0)
# Check rotation 45 degrees
wall_sprite_1.position = (10, 0)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 45
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (-4, 0)
# - Rotate, with block to the left
# Check rotation one degree
wall_sprite_1.position = (-10, 0)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 1
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (1, 0)
# Check rotation 45 degrees
wall_sprite_1.position = (-10, 0)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 45
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (4, 0)
# - Rotate, with block above
# Check rotation one degree
wall_sprite_1.position = (0, 10)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 1
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (0, -1)
# Check rotation 45 degrees
wall_sprite_1.position = (0, 10)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 45
collisions = physics_engine.update()
assert len(collisions) == 1
assert collisions[0] == wall_sprite_1
assert moving_sprite.position == (0, -4)
# - Rotate, between two blocks
# Check rotation one degree
wall_sprite_1.position = (10, 0)
wall_sprite_2.position = (-10, 0)
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 1
collisions = physics_engine.update()
assert len(collisions) == 2
assert wall_sprite_1 in collisions
assert wall_sprite_2 in collisions
assert moving_sprite.position == (0, 0)
# --- Check pre-existing collision
wall_sprite_1.position = (9, 0)
wall_sprite_2.position = OUT_OF_THE_WAY
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = 0
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
assert moving_sprite.position == (-1, 0)
assert len(collisions) == 0
def simple_engine_tests(moving_sprite, wall_list, physics_engine):
wall_sprite_1 = wall_list[0]
wall_sprite_2 = wall_list[1]
wall_sprite_2.position = OUT_OF_THE_WAY
# --- Collide on angle
wall_sprite_1.position = (15, -5)
wall_sprite_1.angle = 45
for speed in range(2, 10):
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = speed
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
assert moving_sprite.position == (2, 0)
if speed == 2:
assert len(collisions) == 0
else:
assert len(collisions) == 1
wall_sprite_1.position = (-15, -5)
wall_sprite_1.angle = 45
for speed in range(2, 10):
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = -speed
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
assert moving_sprite.position == (-2, 0)
if speed == 2:
assert len(collisions) == 0
else:
assert len(collisions) == 1
def platformer_tests(moving_sprite, wall_list, physics_engine):
wall_sprite_1 = wall_list[0]
wall_sprite_2 = wall_list[1]
wall_sprite_2.position = OUT_OF_THE_WAY
wall_sprite_1.position = (15, -5)
wall_sprite_1.angle = 45
moving_sprite.position = (3, 1)
moving_sprite.angle = 0
collisions = arcade.check_for_collision_with_list(moving_sprite, wall_list)
print(f"\n **** {len(collisions)}")
print("")
# --- Collide on angle
wall_sprite_1.position = (15, -5)
wall_sprite_1.angle = 45
for speed in range(2, 7):
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = speed
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
if speed == 2:
assert moving_sprite.position == (2, 0)
elif speed == 3:
assert moving_sprite.position == (3, 1)
elif speed == 4:
assert moving_sprite.position == (4, 2)
elif speed == 5:
assert moving_sprite.position == (5, 3)
elif speed == 6:
assert moving_sprite.position == (6, 4)
wall_sprite_1.position = (-15, -5)
wall_sprite_1.angle = 45
for speed in range(2, 7):
moving_sprite.position = (0, 0)
moving_sprite.angle = 0
moving_sprite.change_x = -speed
moving_sprite.change_y = 0
moving_sprite.change_angle = 0
collisions = physics_engine.update()
if speed == 2:
assert moving_sprite.position == (-2, 0)
elif speed == 3:
assert moving_sprite.position == (-3, 1)
elif speed == 4:
assert moving_sprite.position == (-4, 2)
elif speed == 5:
assert moving_sprite.position == (-5, 3)
elif speed == 6:
assert moving_sprite.position == (-6, 4)
# Move up to wall
wall_sprite_1.position = OUT_OF_THE_WAY
physics_engine.gravity_constant = 1
moving_sprite.position = (0, 0)
moving_sprite.change_x = 1
moving_sprite.change_y = 0
collisions = physics_engine.update()
assert moving_sprite.position == (1, -1)
collisions = physics_engine.update()
assert moving_sprite.position == (2, -3)
collisions = physics_engine.update()
assert moving_sprite.position == (3, -6)
def test_main(twm):
if twm:
assert True
else:
character_list = arcade.SpriteList()
wall_list = arcade.SpriteList()
moving_sprite = arcade.SpriteSolidColor(10, 10, arcade.color.RED)
character_list.append(moving_sprite)
wall_sprite = arcade.SpriteSolidColor(10, 10, arcade.color.BLUE)
wall_list.append(wall_sprite)
wall_sprite = arcade.SpriteSolidColor(10, 10, arcade.color.BLUE)
wall_sprite.position = OUT_OF_THE_WAY
wall_list.append(wall_sprite)
physics_engine = arcade.PhysicsEngineSimple(moving_sprite, wall_list)
basic_tests(moving_sprite, wall_list, physics_engine)
simple_engine_tests(moving_sprite, wall_list, physics_engine)
physics_engine = arcade.PhysicsEnginePlatformer(moving_sprite, wall_list, gravity_constant=0.0)
basic_tests(moving_sprite, wall_list, physics_engine)
platformer_tests(moving_sprite, wall_list, physics_engine)
|
[
"jfincher42@gmail.com"
] |
jfincher42@gmail.com
|
0410e802c2b0556f2ca3888c0be9ecdc86e6ebad
|
bb64ff9d2a6f65afaa44f3be70c42c897f635552
|
/lingtools/util/__init__.py
|
e062e4b70aba09208003b4352fa6809bd430629b
|
[
"Apache-2.0"
] |
permissive
|
lingtools/lingtools
|
c173bfcfa1bcf1c92c94eb424f89754e9e9388aa
|
cd5b44a5552fdc79ec99c61dfe526face8df522b
|
refs/heads/master
| 2021-01-21T04:25:07.160936
| 2015-01-17T19:44:21
| 2015-01-17T19:44:21
| 16,148,868
| 7
| 2
| null | 2016-09-20T14:50:05
| 2014-01-22T18:52:49
|
Python
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
"""
Utility modules.
"""
|
[
"lignos@seas.upenn.edu"
] |
lignos@seas.upenn.edu
|
dc28c4cec7ece6aebe88802c998a185fdf40a5c3
|
28c971e96bc3236bf6988feab8d7f0b41b9d031e
|
/2018_Informatik_l/Ex_03/task_1.py
|
e844534f63ddf8195c7fc33e6ceb49518b6ecf61
|
[] |
no_license
|
Olstra/UZH
|
98cc2ba3e9db0dbe4fafc0b6ac7266f2b4bf7722
|
038d6a2b0b97a7ee22988fd47f4025a70473929c
|
refs/heads/master
| 2020-04-23T12:35:15.064379
| 2019-08-27T20:26:49
| 2019-08-27T20:26:49
| 171,174,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
# Please do not modify this part of the code!
grade = '-'
# Your code goes here
score = int(input("Type in your achieved score: "))
if score in range(0, 101):
score = int(score)
if score >= 90:
grade = 'A'
elif score >= 80:
grade = 'B'
elif score >= 70:
grade = 'C'
elif score >= 60:
grade = 'D'
else:
grade = 'F'
else:
print("\nERROR: Please enter a number from 0-100.")
|
[
"noreply@github.com"
] |
Olstra.noreply@github.com
|
c6c4755f951b1c39f607d3b3cc37c99b18d92b88
|
425b8bb7ea01bfd9e59706f461e9fb7a2550a5fb
|
/manage.py
|
c5f42ea6e4c995c1c2f89f39910985906fd057aa
|
[] |
no_license
|
pwbryant/superno3
|
c8798b9f5caeca47ab1d9ab888832704fe7c5a8e
|
96c2a14ce219b9360e38d98851d907abbd073b28
|
refs/heads/master
| 2021-01-01T03:47:51.129801
| 2016-04-28T21:26:17
| 2016-04-28T21:26:17
| 57,334,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superno3.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"paulwilliambryant@gmail.com"
] |
paulwilliambryant@gmail.com
|
a6252bfecc3583b46f38cb2e68d103cb8493e682
|
f6fd11d996bee6455e8e6cf35a2ad0b615c51665
|
/saengsorn-6230403955-lab2/dict_strings.py.py
|
2d75c075690291ab2e1ced056c49dddc24a1b72a
|
[] |
no_license
|
Saengsorn-Wongpriew/6230403955-oop-labs
|
f4c80679542b44fb36cca62ebbd68038c10fbdb5
|
735dbe1c57e826bce5617884a4ada529025b112b
|
refs/heads/master
| 2023-01-16T02:38:10.830465
| 2020-11-20T09:51:30
| 2020-11-20T09:51:30
| 284,725,036
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
dictionary = {
"manee": "1234",
"mana": "4567",
"chujai": "6789"
}
t = list(dictionary.items())
print(t)
v = list(dictionary.values())
print(v)
k = list(dictionary.keys())
print(k)
word = "antidisestablishmentarianism"
word = sorted(word)
print(''.join(word))
sentence = "the quick brown fox jumped over the lazy dog"
w = sentence.split()
print(w)
|
[
"noreply@github.com"
] |
Saengsorn-Wongpriew.noreply@github.com
|
85267b1060a3e2d33902bc2af850b6da6644a4ed
|
09c96fa897f9a9818c656548d1e234c40648a350
|
/ls/joyous/tests/test_recurrence.py
|
8150de7c95c398674e2384bad33e08d7a7727cf2
|
[
"BSD-3-Clause"
] |
permissive
|
linuxsoftware/ls.joyous
|
45bfbe376c154cdbfea6ae1fa6d5e6cf9a7222b1
|
7c408d52ab935140c69f6c12379655e39e8b5ca5
|
refs/heads/master
| 2023-08-30T02:14:33.518824
| 2022-07-01T03:11:34
| 2022-07-01T03:11:34
| 120,979,220
| 85
| 38
|
BSD-3-Clause
| 2023-08-07T10:43:25
| 2018-02-10T03:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 11,790
|
py
|
# ------------------------------------------------------------------------------
import sys
import datetime as dt
from dateutil.rrule import rrule
from django.test import TestCase
from ls.joyous.utils.recurrence import Recurrence, Weekday
from ls.joyous.utils.recurrence import MO, TU, WE, TH, FR, SA, SU
from ls.joyous.utils.recurrence import YEARLY, MONTHLY, WEEKLY, DAILY
from .testutils import datetimetz
# ------------------------------------------------------------------------------
class TestWeekday(TestCase):
def testStr(self):
self.assertEqual(str(Weekday(0)), "Monday")
self.assertEqual(str(Weekday(4,1)), "first Friday")
self.assertEqual(str(Weekday(4,-1)), "last Friday")
self.assertEqual(str(SA), "Saturday")
self.assertEqual(str(FR(3)), "third Friday")
def testGetWhen(self):
self.assertEqual(Weekday(0)._getWhen(0), "Monday")
self.assertEqual(FR(1)._getWhen(0), "first Friday")
self.assertEqual(SU._getWhen(1), "Monday")
self.assertEqual(WE._getWhen(-2), "Monday")
self.assertEqual(FR(1)._getWhen(-1), "Thursday before the first Friday")
self.assertEqual(SU(1)._getWhen(2), "Tuesday after the first Sunday")
def testRepr(self):
self.assertEqual(repr(Weekday(0)), "MO")
self.assertEqual(repr(Weekday(4,2)), "+2FR")
self.assertEqual(repr(SA), "SA")
self.assertEqual(repr(FR(3)), "+3FR")
self.assertEqual(repr(WE(-2)), "-2WE")
# ------------------------------------------------------------------------------
class TestRecurrence(TestCase):
def testInitStr(self):
with self.assertRaises(ValueError):
Recurrence("DTSTART:19970902T090000\n"
"RRULE:FREQ=DAILY;INTERVAL=3\n"
"RRULE:FREQ=DAILY;INTERVAL=4")
def testInitRecurrence(self):
rr1 = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
byweekday=[MO,TU,WE,TH,FR])
rr2 = Recurrence(rr1)
self.assertEqual(rr2.freq, WEEKLY)
def testInitRrule(self):
rr1 = rrule(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
byweekday=[MO,TU,WE,TH,FR])
rr2 = Recurrence(rr1)
self.assertEqual(rr2.freq, WEEKLY)
def testEq(self):
rr1 = rrule(dtstart=dt.datetime(2009, 1, 1, 0, 0, 1),
freq=WEEKLY,
byweekday=[MO,TU,WE,TH,FR])
rr2 = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
byweekday=[MO,TU,WE,TH,FR])
rr3 = Recurrence("DTSTART:20090101\n"
"RRULE:FREQ=WEEKLY;WKST=SU;BYDAY=MO,TU,WE,TH,FR")
rr4 = rrule(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
byweekday=[MO,TU,WE,TH,FR],
until=dt.date(2009, 1, 10))
self.assertEqual(Recurrence(rr1), rr2)
self.assertEqual(rr2, rr1)
self.assertEqual(rr1, rr2)
self.assertEqual(rr2, rr2)
self.assertEqual(rr2, rr3)
self.assertNotEqual(rr2, 99)
self.assertNotEqual(rr2, rr4)
def testRepr(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
count=9,
byweekday=[MO,TU,WE,TH,FR])
self.assertEqual(repr(rr),
"DTSTART:20090101\n"
"RRULE:FREQ=WEEKLY;WKST=SU;COUNT=9;BYDAY=MO,TU,WE,TH,FR")
self.assertEqual(rr.count, rr.getCount())
rr = Recurrence(dtstart=dt.date(2011, 1, 1),
freq=DAILY,
interval=2,
until=dt.date(2011,1,11))
self.assertEqual(repr(rr),
"DTSTART:20110101\n"
"RRULE:FREQ=DAILY;INTERVAL=2;WKST=SU;UNTIL=20110111")
rr = Recurrence(dtstart=dt.date(2012, 1, 1),
freq=YEARLY,
bymonth=[1,2],
byweekday=range(7),
until=dt.date(2012,1,31))
self.assertEqual(repr(rr),
"DTSTART:20120101\n"
"RRULE:FREQ=YEARLY;WKST=SU;UNTIL=20120131;"
"BYDAY=MO,TU,WE,TH,FR,SA,SU;BYMONTH=1,2")
rr = Recurrence(dtstart=dt.date(2015, 10, 1),
freq=MONTHLY,
bymonth=range(1,12),
byweekday=[(SU(-1))])
self.assertEqual(repr(rr),
"DTSTART:20151001\n"
"RRULE:FREQ=MONTHLY;WKST=SU;BYDAY=-1SU;BYMONTH=1,2,3,4,5,6,7,8,9,10,11")
def testParse(self):
rr = Recurrence("DTSTART:20090101\n"
"RRULE:FREQ=WEEKLY;WKST=SU;BYDAY=MO,TU,WE,TH,FR;COUNT=9")
self.assertEqual(rr.dtstart, dt.date(2009, 1, 1))
self.assertEqual(rr.count, 9)
self.assertCountEqual(rr.byweekday, [MO,TU,WE,TH,FR])
def testParseNoDtstart(self):
rr = Recurrence("RRULE:FREQ=DAILY;WKST=SU")
self.assertEqual(rr.freq, DAILY)
def testRoundtrip(self):
rrStr = "DTSTART:20151001\n" \
"RRULE:FREQ=MONTHLY;WKST=SU;BYDAY=-1SU;BYMONTH=1,2,3,4,5,6,7,8,9,10,11"
self.assertEqual(repr(Recurrence(rrStr)), rrStr)
rrStr = "DTSTART:20141001\n" \
"RRULE:FREQ=MONTHLY;WKST=SU;UNTIL=20141001;BYMONTHDAY=1,-1" # first&last
self.assertEqual(repr(Recurrence(rrStr)), rrStr)
def testFrequency(self):
rr = Recurrence(freq=10)
self.assertEqual(rr.frequency, "unsupported_frequency_10")
def testGetRrule(self):
rr = Recurrence(dtstart=dt.date(2011, 1, 1),
freq=DAILY,
interval=2,
until=dt.date(2011,1,11))
self.assertEqual(rr._getRrule(),
"FREQ=DAILY;INTERVAL=2;WKST=SU;UNTIL=20110111")
with self.assertRaises(TypeError):
rr._getRrule(untilDt=dt.datetime(2011,1,11))
self.assertEqual(rr._getRrule(untilDt=dt.datetime(2011,1,11,23,59,59,
tzinfo=dt.timezone.utc)),
"FREQ=DAILY;INTERVAL=2;WKST=SU;UNTIL=20110111T235959Z")
# ------------------------------------------------------------------------------
class TestGetWhen(TestCase):
def testDaily(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1), freq=DAILY)
self.assertEqual(rr._getWhen(2), "Daily")
def testEvery2Days(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
interval=2,
freq=DAILY)
self.assertEqual(rr._getWhen(3), "Every 2 days")
def testMonEveryFortnight(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
interval=2,
freq=WEEKLY,
byweekday=MO)
self.assertEqual(rr._getWhen(0), "Fortnightly on Mondays")
def testMonEvery6Weeks(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
interval=6,
freq=WEEKLY,
byweekday=MO)
self.assertEqual(rr._getWhen(0), "Every 6 weeks on Mondays")
def testEveryday(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
byweekday=[MO,TU,WE,TH,FR,SA,SU])
self.assertEqual(rr._getWhen(0), "Everyday")
def testFirstMonMonthly(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
byweekday=MO(1))
self.assertEqual(rr._getWhen(0), "The first Monday of the month")
def testMonEvery2Months(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
interval=2,
freq=MONTHLY,
byweekday=MO)
self.assertEqual(rr._getWhen(0), "Every Monday, every 2 months")
def testLastSatSeptEvery2Years(self):
rr = Recurrence(dtstart=dt.date(2018, 9, 29),
interval=2,
freq=YEARLY,
byweekday=SA(-1),
bymonth=9)
self.assertEqual(rr._getWhen(0, numDays=5), "The last Saturday of September, every 2 years for 5 days")
def test1st(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=1)
self.assertEqual(rr._getWhen(0), "The first day of the month")
def test22ndOffsetNeg1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=YEARLY,
bymonthday=22,
bymonth=5)
self.assertEqual(rr._getWhen(-1), "The 21st day of May")
def test30thOffset1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=30)
self.assertEqual(rr._getWhen(1), "The day after the 30th day of the month")
def testMonWedFriOffset1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=WEEKLY,
count=9,
byweekday=[MO,WE,FR])
self.assertEqual(rr._getWhen(1), "Tuesdays, Thursdays and Saturdays")
def test2ndAnd4thFriOffsetNeg1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
byweekday=[FR(2),FR(4)])
self.assertEqual(rr._getWhen(-1), "The Thursday before the second Friday and "
"Thursday before the fourth Friday of the month")
def test1stOffsetNeg1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=1,
until=dt.date(2010,5,1))
self.assertEqual(rr._getWhen(-1), "The last day of the month (until 30 April 2010)")
def test3rdOffset2(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=3)
self.assertEqual(rr._getWhen(2), "The fifth day of the month")
def test1stJanAprMayOffsetNeg1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=YEARLY,
bymonthday=1,
bymonth=[1,4,5])
self.assertEqual(rr._getWhen(-1), "The last day of December, March and April")
def testLastJulAugSepDecOffset1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=YEARLY,
bymonthday=-1,
bymonth=[7,8,9,12])
self.assertEqual(rr._getWhen(1),
"The first day of August, September, October and January")
def test1stAndLast(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=[1,-1])
self.assertEqual(rr._getWhen(0), "The first and the last day of the month")
def test1stAndLastOffsetNeg1(self):
rr = Recurrence(dtstart=dt.date(2009, 1, 1),
freq=MONTHLY,
bymonthday=[1,-1])
self.assertEqual(rr._getWhen(-1), "The day before the first and the last day of the month")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
|
[
"david@linuxsoftware.co.nz"
] |
david@linuxsoftware.co.nz
|
a80034d2228bbfe9990d1fb8127411bc8c05716d
|
709724e7afcd3fb3247e62bcd3542f50a4addbbc
|
/DataDisplay/migrations/0011_auto_20180328_1054.py
|
703262fe4757f91a1547243beade1c1d54f1f3f8
|
[] |
no_license
|
fuckgitb/Design-and-Implementation-of-Multi-source-and-Multi-category-Graphic-Data-Monitoring-Platform
|
16a4e9de61d5f00cba88cc6b46c12416599457ca
|
1af69f652f79cde986d241572f94eb0c2945445c
|
refs/heads/master
| 2021-09-17T18:34:28.947608
| 2018-07-04T12:22:57
| 2018-07-04T12:22:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# Generated by Django 2.0.2 on 2018-03-28 02:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DataDisplay', '0010_auto_20180328_1043'),
]
operations = [
migrations.AlterField(
model_name='headline_images',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"781137149@qq.com"
] |
781137149@qq.com
|
e9a5e3f9fdecab88a9a87cac35f2bb503c83b4a9
|
fd696d4ee723adf8a6ff2ab8cfdd4491bdb6cb9e
|
/usuarios/views.py
|
3fe47cc28a75849bb8b73512783b4f0c7253b0a4
|
[] |
no_license
|
gonzalinismo/wertu
|
b2fb9752c4995ad330c6df7448f2d34adecacb43
|
dfcdafd489a02cdaf3bfad048064e282055c17d3
|
refs/heads/master
| 2020-05-31T13:27:53.403210
| 2019-06-05T14:00:56
| 2019-06-05T14:00:56
| 190,304,668
| 0
| 0
| null | 2019-06-05T14:00:57
| 2019-06-05T01:25:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
from django.shortcuts import render
from .forms import UserForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
@login_required
def special(request):
return HttpResponse("You are logged in !")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
registered = True
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request,'registration.html',
{'user_form':user_form,
'registered':registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect('../../misTiendas')
else:
return HttpResponse("Your account was inactive.")
print("salame")
else:
print("Someone tried to login and failed.")
print("They used username: {} and password: {}".format(username,password))
return HttpResponse("Invalid login details given")
else:
return render(request, 'login.html', {})
|
[
"noreply@github.com"
] |
gonzalinismo.noreply@github.com
|
23134786c8fbf5948cdc7ff5c1655121e2bc1b2a
|
292d6c07588d14385f6875d75adea517f0df4f23
|
/Centralized_Learning/Centralized_Learning.py
|
d5633144153179b671209e3a47d8815afaaa53a4
|
[] |
no_license
|
wangyingwwyy/Privacy-Preserving-Federated-Learning-I
|
17c236fcdda3632beb31a703893e6ff5741e1b93
|
c2f22f3310eaa8dc78492b836e4cf30956eca61a
|
refs/heads/master
| 2023-04-19T13:29:46.580799
| 2021-05-10T13:18:21
| 2021-05-10T13:18:21
| 352,018,276
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
import os
from os import path
from datetime import datetime
import csv
def build_model():
input_dim = 115
model = tf.keras.Sequential()
model.add(layers.Input(shape=(input_dim,)))
model.add(layers.Dense(int(0.75 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.5 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.33 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.25 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.33 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.5 * input_dim), activation='relu'))
model.add(layers.Dense(int(0.75 * input_dim), activation='relu'))
model.add(layers.Dense(input_dim))
return model
def train_model(epochs):
train_data = pd.read_csv('../train_validation_test_minmax/train_centralized.csv')
train_data = train_data.sample(frac = 1)
print('reading train_data')
print(train_data)
validation_data = pd.read_csv('../train_validation_test_minmax/validation_centralized.csv')
validation_data = validation_data.sample(frac=1)
print('reading validation_data')
print(validation_data)
model = build_model()
model.load_weights('../initial_weight/weight.h5')
print('initial weight')
print(model.get_weights())
Optimizer = keras.optimizers.SGD()
model.compile(optimizer=Optimizer, loss="mean_squared_error", metrics=['accuracy'])
print('data for training')
print(train_data.iloc[:, :-2])
print('data for validation')
print(validation_data.iloc[:, :-2])
#os.mkdir('centralized_model')
checkpoint = ModelCheckpoint(filepath='centralized_model/model_{epoch:04d}.h5', period = 1)
history = model.fit(train_data.iloc[:, :-2], train_data.iloc[:, :-2],
batch_size=1024,
epochs=epochs,
validation_data=(validation_data.iloc[:, :-2], validation_data.iloc[:, :-2]),
callbacks=[checkpoint]
)
np.save('history_epoch_{}_batchsize_1024_minmax.npy'.format(epochs), history.history)
def threshold_calculation(Path):
validation_data = pd.read_csv('../train_validation_test_minmax/validation_centralized.csv')
validation_x = validation_data.iloc[:, :-2]
model = keras.models.load_model(Path)
X_predict = model.predict(validation_x)
mse = np.mean(np.power(validation_x - X_predict, 2), axis=1)
print('power of np')
print(np.power(validation_x - X_predict, 2))
# calculate threshold
tr = mse.mean() + mse.std()
print('threshold = ' + str(tr))
def evaluate_model(virus, Path, threshold):
model = keras.models.load_model(Path)
#tr = 0.019348176634629
tr = threshold
if not path.exists('logbook_centralized.csv'):
with open('logbook_centralized.csv', 'w', newline='') as logbook:
writer = csv.writer(logbook)
writer.writerow(['Device Name', 'TN', 'FP', 'FN', 'TP', 'Accuracy', 'Precision', 'Recall'])
logbook.close()
if virus == 'BASHLITE':
devices = ['Ecobee_Thermostat', 'Provision_PT_737E_Security_Camera', 'Philips_B120N10_Baby_Monitor',
'Provision_PT_838_Security_Camera', 'SimpleHome_XCS7_1002_WHT_Security_Camera',
'Danmini_Doorbell',
'SimpleHome_XCS7_1003_WHT_Security_Camera',
'Samsung_SNH_1011_N_Webcam', 'Ennio_Doorbell']
test_data = pd.read_csv('../train_validation_test_minmax/test_BASHLITE.csv')
elif virus == 'mirai':
devices = ['Ecobee_Thermostat', 'Provision_PT_737E_Security_Camera', 'Philips_B120N10_Baby_Monitor',
'Provision_PT_838_Security_Camera', 'SimpleHome_XCS7_1002_WHT_Security_Camera',
'Danmini_Doorbell',
'SimpleHome_XCS7_1003_WHT_Security_Camera']
test_data = pd.read_csv('../train_validation_test_minmax/test_mirai.csv')
test_data = test_data.sample(frac = 1)
test_result = pd.DataFrame()
mse_store = pd.DataFrame()
log = []
for device in devices:
test_data_single_device = test_data[test_data['device'] == device]
mse_store['label'] = test_data_single_device['label']
mse_store['device'] = test_data_single_device['device']
test_predict = model.predict(test_data_single_device.iloc[:, :-2])
mse_test = np.mean(np.power(test_data_single_device.iloc[:, :-2] - test_predict, 2), axis=1)
mse_store['mse'] = mse_test
predictions = (mse_test > tr).astype(int)
print('predicion results: ')
print(predictions)
tn, fp, fn, tp = confusion_matrix(test_data_single_device['label'], predictions,
labels=[0, 1]).ravel()
accuracy = (tp + tn) / (tn + fp + fn + tp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
temp = [device, tn, fp, fn, tp, accuracy, precision, recall,tr]
test_result = pd.concat([test_result, mse_store])
mse_store= pd.DataFrame()
log.append(temp)
with open('logbook_centralized.csv', 'a', newline='') as logbook:
writer = csv.writer(logbook)
writer.writerow(['[' + str(datetime.now()) + ']' + 'testing result of ' + virus])
for i in range(len(log)):
writer.writerow(log[i])
logbook.close()
train_model(100)
|
[
"wangyingwwyy96@hotmail.com"
] |
wangyingwwyy96@hotmail.com
|
bc6205660bf6d926fd613856f972bfe2f182e762
|
8dde10652d0f30f94f5991f2949511bd22085d7d
|
/social_media_project/groups/templates/groups/blog_project/blog/blogapp/models.py
|
7f6e8d0b6590753a83151cd754120b70b4003739
|
[] |
no_license
|
Tambiebarango/social-media-clone
|
43346563bb420888c8f9b84939f6edeafdbed6b7
|
dafc07798fd1fd4da9918df57f5df93097675378
|
refs/heads/master
| 2020-05-18T10:17:03.203980
| 2019-05-01T00:40:04
| 2019-05-01T00:40:04
| 184,349,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.core.urlresolvers import reverse
# Create your models here.
class UserProfileInfo(models.Model):
user = models.OneToOneField(User) #to add more attributes to the user
#additional attributes
# portfolio_site = models.URLField(blank=True)
# profile_pic = models.ImageField(upload_to='profile_pics', blank=True)
def __str__(self):
return self.user.username
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
create_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def approved_comments(self):
return self.comments.filter(approved_comment=True)
def get_absolute_url(self):
return reverse("post_detail",kwargs={'pk':self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
create_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def get_absolute_url(self):
return reverse('post_list')
def __str__(self):
return self.text
|
[
"theodorebarango@Theodores-MacBook-Pro.local"
] |
theodorebarango@Theodores-MacBook-Pro.local
|
d88fce8f8224563faf71fb2b52c67a0816d81051
|
98f96f3efb2a585805756f27483d9e91a75eecf3
|
/GitHub_test_2.py
|
9697ecdbcd25d73e5bf07355117d5f737bada294
|
[] |
no_license
|
Aiden-yang-qq/GitHub_test
|
795f042e295da532c6295f06ba44255aeb2a59be
|
d2cf3a6a900c033ff79e7ef2987f733355d6da46
|
refs/heads/master
| 2022-04-28T07:41:02.633326
| 2020-04-30T07:42:11
| 2020-04-30T07:42:11
| 260,118,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
# 试验使用Pycharm上传文件到GitHub
def main():
print('github_2试验成功!')
if __name__ == '__main__':
main()
|
[
"jayskateboy@outlook.com"
] |
jayskateboy@outlook.com
|
ab8c1ccb0b80f94e1bbe462ae249c17cde0ff235
|
ea6004b96cb045d0174e037bbf2be6261f26eff3
|
/qubit/io/celery/config.py
|
347d5e123d08e0cbb6104dc4abd008fccc74f9e6
|
[] |
no_license
|
RyanKung/qubit
|
5dfd63eaf8874b00dfc00f004f2f54374b4f3622
|
e16efece0753b693868a17cccf0633844d5d1ee0
|
refs/heads/master
| 2020-12-24T06:39:03.559675
| 2017-04-19T09:56:53
| 2017-04-19T09:56:53
| 73,465,795
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
from qubit.config import MQ_BROKER, REDIS_BACKEND
TIMEZONE = 'Europe/London'
ENABLE_UTC = True
BROKER_URL = MQ_BROKER
CELERY_RESULT_BACKEND = REDIS_BACKEND
CELERY_ACCEPT_CONTENT = ['application/json', 'application/x-python-serialize']
CELERY_TASK_RESULT_EXPIRES = 18000 # 5 hours.
CELERY_ALWAYS_EAGER = False
CELERY_DEFAULT_QUEUE = 'qubit.tasks.default'
CELERY_DEFAULT_EXCHANGE = 'qubit.tasks.default'
CELERY_DEFAULT_ROUTING_KEY = 'default'
# These settings is used for fix `celeryev.xxx queue huge length` problem:
# http://stackoverflow.com/questions/30227266/what-is-the-celeryev-queue-for
# http://stackoverflow.com/questions/17778715/celeryev-queue-in-rabbitmq-becomes-very-large
# DOC:
# http://celery.readthedocs.io/en/latest/configuration.html#celery-event-queue-ttl
CELERY_SEND_EVENTS = True
CELERY_EVENT_QUEUE_TTL = 60
CELERY_EVENT_QUEUE_EXPIRES = 60 # Will delete all celeryev. queues without consumers after 1 minute.
|
[
"ryankung@ieee.org"
] |
ryankung@ieee.org
|
5cc99ebec398364037bbd43f6cbea2dbcaec88f0
|
76795255ca4395c0868ad29443c1afe289061acb
|
/colonizer/wish/scripts/run_test.py
|
5b103ebe359d94d5af93c3b00b2e59bfa70e8608
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
vmenajr/mongo
|
337f3c779f2e2469711b051c0063feca76b2500c
|
77cfdb1dfc658c592430d86b1359c6c45e031cf9
|
refs/heads/master
| 2021-04-03T10:04:54.571211
| 2021-03-24T15:15:00
| 2021-03-24T15:15:00
| 125,252,199
| 7
| 7
| null | 2019-11-22T22:59:20
| 2018-03-14T17:50:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 38,406
|
py
|
import json
from bson.json_util import dumps
import paramiko
import argparse
import yaml
import pymongo
import concurrent.futures
import os
import datetime
from pymongo import MongoClient
import time
import subprocess
import multiprocessing
from datetime import timedelta
os_user = 'colonizer'
# for testing
#os_user = 'ubuntu'
home_dir = '/home/' + os_user + '/'
ssh_dir = home_dir + '.ssh/'
ycsb_dir = home_dir + 'ycsb-0.12.0/'
#ycsb_dir = home_dir + 'YCSB-master/ycsb-mongodb/'
test_result_dir = 'test_results/'
dbpath = '/data'
servers = {}
servers_list = []
servers_file = 'cluster.json'
test_file = 'hosts_sharded_cluster.json'
workload_template = 'workload_template'
hosts_per_shard = 3
device_shard = '/dev/nvme0n1'
device_other = '/dev/xvdg'
all_mongodb_roles = ['configs', 'shards', 'mongos', 'replica_set', 'standalone']
all_mongodb_roles_reverse = ['standalone', 'replica_set', 'mongos', 'shards', 'configs']
all_roles = all_mongodb_roles + ['clients']
logpath = '/data/logs/'
other_client = 'false'
num_of_clients = ["4", "8", "16", "32"]
#num_of_clients = ["2", "4"]
def get_hosts():
servers_list = []
mongos_list = []
with open(servers_file) as data_file:
servers = json.load(data_file)
with open(test_file) as tests:
hosts = json.load(tests)
if 'client' in servers['inventory']['value']:
hosts['clients'] = []
for index, mv in enumerate(servers['inventory']['value']['client'][0]['private']):
hosts['clients'].append({'hostname' : mv})
if 'configs' in servers['inventory']['value']:
hosts['configs'] = []
for index, mv in enumerate(servers['inventory']['value']['configs'][0]['private']):
if mv not in servers_list:
servers_list.append(mv)
config = { 'hostname' : mv, 'port': '27017', 'dbpath': '/data', 'logpath': '/data/mongo.log', 'role': 'config' }
hosts['configs'].append(config)
if 'mongos' in servers['inventory']['value']:
hosts['mongos'] = []
for index, mv in enumerate(servers['inventory']['value']['mongos'][0]['private']):
if mv not in servers_list:
servers_list.append(mv)
mongos = { 'hostname' : mv, 'port': '27017', 'dbpath': '/data', 'logpath': '/data/mongo.log', 'role': 'mongos' }
hosts['mongos'].append(mongos)
mongos_list.append(mv + ':' + '27017')
#hosts['mongodbUrl' + str(index+1)] = 'mongodb://' + ','.join(mongos_list) + hosts['url_options'];
# Adjust the test to run with 6 mongos in the connection string
#if index == 5:
# hosts['mongodbUrl' + str(index+1)] = 'mongodb://' + ','.join(mongos_list) + hosts['url_options'];
conn_str = 'mongodb://' + ','.join(mongos_list)
hosts['mongodbUrl'] = conn_str + hosts['url_options'];
if 'shards' in servers['inventory']['value']:
hosts['shards'] = []
shards = []
for index, mv in enumerate(servers['inventory']['value']['shards'][0]['private']):
shard_number = index / hosts_per_shard
i = index - shard_number * hosts_per_shard
shard = { 'hostname' : mv, 'port': '27017', 'dbpath': '/data', 'logpath': '/data/mongo.log', 'role': 'secondary', 'priority': 1 }
if (index % hosts_per_shard == 0):
shard['priority'] = 5
shard['role'] = 'primary'
hosts['shards'].append(shards)
shards = []
hosts['shards'][shard_number].append(shard)
if mv not in servers_list:
servers_list.append(mv)
hosts['servers_list'] = servers_list
with open(args.test_file, 'w') as data_file:
json.dump(hosts, data_file, indent=4, sort_keys=True)
def setup_mongodb(hosts):
config_list = []
shards = []
config_str = ""
if 'configs' in hosts:
repl_name = hosts['cluster_name'] + '_Cfg'
for index, mv in enumerate(hosts['configs']):
dbpath = hosts['dbpath'] + '/' +repl_name + '_' + str(index) + '/'
mv['dbpath'] = dbpath
if (hosts['config_server_type'] == 'CSRS'):
update_mongod_config(mv, dbpath, repl_name, 'configsvr', 'wiredTiger')
ssh_exe(mv['hostname'], 'sudo blockdev --setra 0 ' + device_other)
else:
update_mongod_config(mv, dbpath, '', 'configsvr', hosts['storage_engine'])
if hosts['storage_engine'].lower() == 'wiredtiger':
ssh_exe(mv['hostname'], 'sudo blockdev --setra 0 ' + device_other)
else:
ssh_exe(mv['hostname'], 'sudo blockdev --setra 32 ' + device_other)
config_file_no_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.no_auth.mongod.conf'
config_file_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.auth.mongod.conf'
if 'user' in hosts:
mv['config_file'] = config_file_auth
else:
mv['config_file'] = config_file_no_auth
ssh_exe(mv['hostname'], 'mongod -f ' + config_file_no_auth)
config_list.append(mv['hostname'] + ':' + mv['port'])
if (hosts['config_server_type'] == 'CSRS'):
print('Wait for 10 seconds before initialing CSRS')
time.sleep(10)
init_repl(repl_name, hosts['configs'])
config_str = repl_name + '/' + ','.join(config_list)
else:
config_str = ','.join(config_list)
if 'shards' in hosts:
for num, mv in enumerate(hosts['shards']):
repl_name = hosts['cluster_name'] + '_Shard_' + str(num)
shard_list = []
for index, nv in enumerate(hosts['shards'][num]):
dbpath = hosts['dbpath'] + '/' + repl_name + '_' + str(index) + '/'
nv['dbpath'] = dbpath
update_mongod_config(nv, dbpath, repl_name, 'shardsvr', hosts['storage_engine'])
if hosts['storage_engine'].lower() == 'wiredtiger':
ssh_exe(nv['hostname'], 'sudo blockdev --setra 0 ' + device_shard)
else:
ssh_exe(nv['hostname'], 'sudo blockdev --setra 32 ' + device_shard)
config_file_no_auth = dbpath + nv['hostname'] + '.' + nv['port'] + '.no_auth.mongod.conf'
config_file_auth = dbpath + nv['hostname'] + '.' + nv['port'] + '.auth.mongod.conf'
if 'user' in hosts:
nv['config_file'] = config_file_auth
else:
nv['config_file'] = config_file_no_auth
ssh_exe(nv['hostname'], 'mongod -f ' + config_file_no_auth)
shard_list.append(nv['hostname'] + ':' + nv['port'])
print('Wait for 10 seconds before initialing shards')
time.sleep(10)
init_repl(repl_name, mv)
shard = repl_name + '/' + ','.join(shard_list)
shards.append(shard)
if 'user' in hosts:
conn_str = 'mongodb://' + ','.join(shard_list) + '/replicaSet=' + repl_name
add_user_mongo(conn_str, hosts['user'], hosts['password'])
if 'mongos' in hosts:
mongos_list = []
for index, mv in enumerate(hosts['mongos']):
dbpath = hosts['dbpath'] + '/' + hosts['cluster_name'] + '_' + str(index) + '/'
mv['dbpath'] = dbpath
update_mongos_config(mv, dbpath, config_str)
config_file_no_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.no_auth.mongos.conf'
config_file_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.auth.mongos.conf'
if 'user' in hosts:
mv['config_file'] = config_file_auth
else:
mv['config_file'] = config_file_no_auth
ssh_exe(mv['hostname'], 'mongos -f ' + config_file_no_auth)
if index == 0:
print('Wait for 10 seconds before adding shards')
time.sleep(10)
init_cluster(mv['hostname'], mv['port'], shards)
mongos_list.append(mv['hostname'] + ':' + mv['port'])
#hosts['mongodbUrl' + str(index+1)] = 'mongodb://' + ','.join(mongos_list) + hosts['url_options'];
#if 'user' in hosts:
# hosts['mongodbUrl' + str(index+1)] = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + ','.join(mongos_list) + hosts['url_options'];
conn_str = 'mongodb://' + ','.join(mongos_list) + hosts['url_options'];
if 'user' in hosts:
add_user_mongo(conn_str, hosts['user'], hosts['password'])
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + ','.join(mongos_list) + hosts['url_options'];
hosts['mongodbUrl'] = conn_str
if 'replica_set' in hosts:
member_list = []
for index, mv in enumerate(hosts['replica_set']):
dbpath = hosts['dbpath'] + '/' + hosts['replica_set_name'] + '_' + str(index) + '/'
mv['dbpath'] = dbpath
update_mongod_config(mv, dbpath, hosts['replica_set_name'], '', hosts['storage_engine'])
config_file_no_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.no_auth.mongod.conf'
config_file_auth = dbpath + mv['hostname'] + '.' + mv['port'] + '.auth.mongod.conf'
if 'user' in hosts:
mv['config_file'] = config_file_auth
else:
mv['config_file'] = config_file_no_auth
ssh_exe(mv['hostname'], 'mongod -f ' + config_file_no_auth)
member_list.append(mv['hostname'] + ':' + mv['port'])
init_repl(hosts['replica_set_name'], hosts['replica_set'])
conn_str = 'mongodb://' + ','.join(member_list) + '/replicaSet=' + hosts['replica_set_name']
if 'user' in hosts:
add_user_mongo(conn_str, hosts['user'], hosts['password'])
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + ','.join(member_list) + '/replicaSet=' + hosts['replica_set_name']
hosts['mongodbUrl'] = conn_str
if 'standalone' in hosts:
dbpath = hosts['dbpath'] + '/'
hosts['standalone']['dbpath'] = dbpath
update_mongod_config(hosts['standalone'], dbpath, '', '', hosts['storage_engine'])
config_file_no_auth = dbpath + hosts['standalone']['hostname'] + '.' + hosts['standalone']['port'] + '.no_auth.mongod.conf'
config_file_auth = dbpath + hosts['standalone']['hostname'] + '.' + hosts['standalone']['port'] + '.auth.mongod.conf'
if 'user' in hosts:
hosts['standalone']['config_file'] = config_file_auth
else:
hosts['standalone']['config_file'] = config_file_no_auth
ssh_exe(hosts['standalone']['hostname'], 'mongod -f ' + config_file_no_auth)
conn_str = 'mongodb://' + hosts['standalone']['hostname'] + ':' + hosts['standalone']['port']
if 'user' in hosts:
add_user_mongo(conn_str, hosts['user'], hosts['password'])
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + hosts['standalone']['hostname'] + ':' + hosts['standalone']['port']
hosts['mongodbUrl'] = conn_str
with open(test_file, 'w') as data_file:
json.dump(hosts, data_file, indent=4, sort_keys=True)
if 'user' in hosts:
populate_keyFile(hosts)
shutdown_mongodb_all(hosts)
start_mongodb_all(hosts)
geneate_test_file(hosts, test_file)
def geneate_test_file(hosts, test_file):
cluster_name = hosts['cluster_name']
for index, mv in enumerate(hosts['clients']):
hosts['mongodbUrl1'] = 'mongodb://' + hosts['mongos'][index]['hostname'] + ':' + hosts['mongos'][index]['port'] + hosts['url_options'];
if 'user' in hosts:
hosts['mongodbUrl1'] = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + hosts['mongos'][index]['hostname'] + ':' + hosts['mongos'][index]['port'] + hosts['url_options'];
with open('/tmp/'+ test_file, 'w') as data_file:
json.dump(hosts, data_file, indent=4, sort_keys=True)
scp_file(mv['hostname'], '/tmp/' + test_file, home_dir + test_file)
for index2, nv in enumerate(num_of_clients):
hosts['cluster_name'] = nv + 'clients_' + cluster_name
filename = nv + 'clients_' + test_file
with open('/tmp/'+ filename, 'w') as data_file:
json.dump(hosts, data_file, indent=4, sort_keys=True)
scp_file(mv['hostname'], '/tmp/' + filename, home_dir + filename)
def populate_keyFile(hosts):
os.system('openssl rand -base64 756 > ' + 'keyFile')
os.system('chmod 400 ' + 'keyFile')
for index, mv in enumerate(hosts['servers_list']):
scp_file(mv, 'keyFile', home_dir + 'keyFile')
run_cmd_all(hosts, 'chmod 400 ' + home_dir + 'keyFile', False)
def clean_dbpath(host):
ssh_exe(host['hostname'], 'rm -rf ' + host['dbpath'])
def clean_dbpath_all(hosts):
func_by_roles(hosts, all_mongodb_roles, clean_dbpath)
def shutdown_mongodb(host, user, password):
if user == "":
cmd = 'mongo --port ' + host['port'] + ' admin --eval "db.shutdownServer({force: true});"'
else:
cmd = 'mongo --port ' + host['port'] + ' --username ' + user + ' --password ' + password + ' admin --eval "db.shutdownServer({force: true});"'
ssh_exe(host['hostname'], cmd)
def shutdown_mongodb_all(hosts):
user = password = ''
if 'user' in hosts:
user = hosts['user']
password = hosts['password']
func_by_roles(hosts, all_mongodb_roles_reverse, shutdown_mongodb, user, password)
def start_mongodb(host, options):
if host['role'] == 'mongos':
ssh_exe(host['hostname'], 'mongos -f ' + host['config_file'] + ' ' + options)
else:
ssh_exe(host['hostname'], 'mongod -f ' + host['config_file'] + ' ' + options)
def start_mongodb_all(hosts):
func_by_roles(hosts, all_mongodb_roles, start_mongodb, '')
def scp_file(hostname, source, target):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=os_user)
sftp = ssh.open_sftp()
try:
sftp.put(source, target, callback=None)
except IOError:
pass
ssh.close()
def ssh_exe(hostname, command):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print('host: ' + hostname)
print('command: ' + command)
ssh.connect(hostname, username=os_user)
stdin, stdout, stderr = ssh.exec_command(command)
print stdout.readlines()
ssh.close()
def update_mongod_config(host, dbpath, repl_name, role, engine):
with open("mongod.conf", 'r') as conf_file:
data = yaml.load(conf_file)
try:
data['net']['port'] = host['port']
if (repl_name != ''):
data['replication'] = {}
data['replication']['replSetName'] = repl_name
if (role != ''):
data['sharding'] = {}
data['sharding']['clusterRole'] = role
data['storage']['dbPath'] = dbpath
if (engine == "mmapv1"):
data['storage']['engine'] = "mmapv1"
else:
data['storage']['engine'] = "wiredTiger"
# set the cache size for testing
#data['storage']['wiredTiger'] = { 'engineConfig' : { 'cacheSizeGB' : 1 } }
cmd = 'mkdir -p ' + dbpath
ssh_exe(host['hostname'], cmd)
cmd = 'mkdir -p ' + logpath
ssh_exe(host['hostname'], cmd)
data['systemLog']['path'] = logpath + 'mongodb.log'
except yaml.YAMLError as exc:
print(exc)
mongod_conf_file_no_auth = host['hostname'] + '.' + host['port'] + '.no_auth.mongod.conf'
with open('/tmp/' + mongod_conf_file_no_auth, 'w') as yaml_file:
yaml_file.write( yaml.safe_dump(data, default_flow_style=False))
target = dbpath + mongod_conf_file_no_auth
scp_file(host['hostname'], '/tmp/' + mongod_conf_file_no_auth, target)
data['security'] = {'authorization': 'enabled', 'keyFile': home_dir + 'keyFile' }
mongod_conf_file_auth = host['hostname'] + '.' + host['port'] + '.auth.mongod.conf'
with open('/tmp/' + mongod_conf_file_auth, 'w') as yaml_file:
yaml_file.write( yaml.safe_dump(data, default_flow_style=False))
target = dbpath + mongod_conf_file_auth
scp_file(host['hostname'], '/tmp/' + mongod_conf_file_auth, target)
def init_repl(repl_name, repl_hosts):
print ('initialize replica set')
client = MongoClient(repl_hosts[0]['hostname'], int(repl_hosts[0]['port']))
config = {'_id': repl_name, 'members': [] }
for index, mv in enumerate(repl_hosts):
member = {'_id': index, 'host': mv['hostname'] + ':' + mv['port']}
if 'priority' in mv:
member['priority'] = mv['priority']
config['members'].append(member)
try:
client.admin.command("replSetInitiate", config)
except Exception, e:
print(e)
client.close()
def update_mongos_config(host, dbpath, config_str):
with open("mongos.conf", 'r') as conf_file:
data = yaml.load(conf_file)
try:
cmd = 'mkdir -p ' + dbpath
ssh_exe(host['hostname'], cmd)
cmd = 'mkdir -p ' + logpath
ssh_exe(host['hostname'], cmd)
data['net']['port'] = host['port']
data['systemLog']['path'] = logpath + 'mongodb.log'
data['sharding']['configDB'] = config_str
except yaml.YAMLError as exc:
print(exc)
mongos_conf_file_no_auth = host['hostname'] + '.' + host['port'] + '.no_auth.mongos.conf'
with open('/tmp/' + mongos_conf_file_no_auth, 'w') as yaml_file:
yaml_file.write( yaml.safe_dump(data, default_flow_style=False))
target = dbpath + mongos_conf_file_no_auth
scp_file(host['hostname'], '/tmp/' + mongos_conf_file_no_auth, target)
data['security'] = {'keyFile': home_dir + 'keyFile' }
mongos_conf_file_auth = host['hostname'] + '.' + host['port'] + '.auth.mongos.conf'
with open('/tmp/' + mongos_conf_file_auth, 'w') as yaml_file:
yaml_file.write( yaml.safe_dump(data, default_flow_style=False))
target = dbpath + mongos_conf_file_auth
scp_file(host['hostname'], '/tmp/' + mongos_conf_file_auth, target)
def init_cluster(hostname, port, shards):
client = MongoClient(hostname, int(port))
for index, mv in enumerate(shards):
client.admin.command("addShard", mv)
print ('adding shard: ' + mv)
client.close()
def add_user_mongo(conn_str, user, password):
client = MongoClient(conn_str)
client.admin.add_user(user, password, roles=['root'])
client.close()
def parse_workload_file(workload_file):
workloads = {}
with open(workload_file) as data_file:
for line in data_file:
key, value = line.partition("=")[::2]
if key.strip() != '':
workloads[key.strip()] = value.strip()
return workloads
def write_workload_file(file_name, workloads):
with open(file_name, 'w') as workload_file:
for key, value in sorted(workloads.iteritems()):
if key != '':
workload_file.write(key + '=' + str(value) + '\n')
def load_data(hosts):
workloads = parse_workload_file(workload_template)
del workloads['maxexecutiontime']
# for testing
#workloads['recordcount'] = '10000'
now = datetime.datetime.utcnow().isoformat()
test_run_dir = test_result_dir + hosts['cluster_name'] + '_' + now + '/'
print(test_run_dir)
os.system('mkdir -p ' + test_run_dir + '/workloads')
write_workload_file(test_run_dir + '/workloads/workload_load', workloads)
client = MongoClient(hosts['mongodbUrl'])
try:
client.admin.command({ 'enableSharding' : 'ycsb' })
except Exception, e:
print(e)
try:
client.admin.command({ 'shardCollection' : 'ycsb.usertable', 'key': {'_id':'hashed'}})
except Exception, e:
print(e)
client.config.settings.update( { '_id': 'balancer' }, { '$set' : { 'stopped': 'true' } }, upsert=True )
client.close()
cmd = ycsb_dir + '/bin/ycsb load mongodb -P ' + test_run_dir + 'workloads/workload_load -p mongodb.url=' + hosts['mongodbUrl1']
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process. communicate()
with open (test_run_dir + '/load.ycsb.stdout', 'w') as stdout:
stdout.write(out)
with open (test_run_dir + '/load.ycsb.stderr', 'w') as stderr:
stderr.write(err)
print("data load completed")
def run_cmd_all(hosts, cmd, client):
if 'servers_list' not in hosts:
servers_list = {}
if 'mongos' in hosts:
for index, mv in enumerate(hosts['mongos']):
if mv['hostname'] not in servers_list:
servers_list.append(mv['hostname'])
if 'configs' in hosts:
for index, mv in enumerate(hosts['configs']):
if mv['hostname'] not in servers_list:
servers_list.append(mv['hostname'])
if 'shards' in hosts:
for num, mv in enumerate(hosts['shards']):
for index, nv in enumerate(hosts['shards'][num]):
if nv['hostname'] not in servers_list:
servers_list.append(nv['hostname'])
if 'replica_set' in hosts:
for index, mv in enumerate(hosts['replica_set']):
if mv['hostname'] not in servers_list:
servers_list.append(mv['hostname'])
if 'standalone' in hosts:
if hosts['standalone']['hostname'] not in servers_list:
servers_list.append(hosts['standalone']['hostname'])
for index, mv in enumerate(hosts['servers_list']):
ssh_cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -f ' + mv + ' ' + cmd
print(ssh_cmd)
process = subprocess.Popen(ssh_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if (client == True):
for index, mv in enumerate(hosts['clients']):
ssh_cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -f ' + mv['hostname'] + ' ' + cmd
print(ssh_cmd)
process = subprocess.Popen(ssh_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def rotate_mongodb_logs(host, user, password):
host_port = host['hostname'] + ':' + host['port']
conn_str = 'mongodb://' + host_port
if 'user' != '':
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + host_port
client = MongoClient(conn_str)
client.admin.command("logRotate")
client.close()
def rotate_mongodb_logs_all(hosts):
user = password = ''
if 'user' in hosts:
user = hosts['user']
password = hosts['password']
func_by_roles(hosts, all_mongodb_roles, rotate_mongodb_logs, user, password)
def clean_mongodb_logs(host):
ssh_exe(host['hostname'], 'rm -rf ' + logpath + '/mongodb.log.*')
def clean_mongodb_logs_all(hosts):
func_by_roles(hosts, all_mongodb_roles, clean_mongodb_logs)
def set_slowms(host, user, password):
host_port = host['hostname'] + ':' + host['port']
conn_str = 'mongodb://' + host_port
if 'user' != '':
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + host_port
client = MongoClient(conn_str)
client.admin.command('profile', 0, slowms=-1)
client.close()
def launchRemoteSadc(hosts, maxSeconds):
run_cmd_all(hosts, '/usr/bin/pkill -u ' + os_user + ' sadc', True)
run_cmd_all(hosts, '/bin/rm -f /tmp/sadc.out', True)
run_cmd_all(hosts, '/usr/lib/sysstat/sadc -S XDISK 1 ' + str(maxSeconds) + ' /tmp/sadc.out', True)
def killCaptureRemoteSadc(hosts, test_run_dir):
run_cmd_all(hosts, '/usr/bin/pkill -u ' + os_user + ' sadc', True)
for index, mv in enumerate(hosts['servers_list']):
os.system('mkdir -p ' + test_run_dir + '/' + mv)
scp_get_file(mv, '/tmp/sadc.out', test_run_dir + '/' + mv + '/sadc.out')
for index, mv in enumerate(hosts['clients']):
os.system('mkdir -p ' + test_run_dir + '/' + mv['hostname'])
scp_get_file(mv['hostname'], '/tmp/sadc.out', test_run_dir + '/' + mv['hostname'] + '/sadc.out')
#run_cmd_all(hosts, '/bin/rm -f /tmp/sadc.out', True)
def scp_get_file(hostname, remotepath, localpath):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=os_user)
sftp = ssh.open_sftp()
try:
sftp.get(remotepath, localpath, callback=None)
except IOError:
pass
ssh.close()
def scp_mongodb_logs(host, test_run_dir):
local_log_dir = test_run_dir + '/' + host['role'] + '_' + host['hostname'] + '_' + host['port']
os.system('mkdir -p ' + local_log_dir)
ssh_exe(host['hostname'], 'tar -cvzf ' + '/tmp/mongodblogs.tar.gz ' + logpath)
os.system('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q ' + host['hostname'] + ':/tmp/mongodblogs.tar.gz ' + local_log_dir)
#os.system('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q ' + host['hostname'] + ':' + logpath + '/mongodb.log* ' + local_log_dir)
if host['role'] != 'mongos':
os.system('mkdir -p ' + local_log_dir + '/diagnostic.data')
os.system('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q ' + host['hostname'] + ':' + host['dbpath'] + '/diagnostic.data/* ' + local_log_dir + '/diagnostic.data/')
def scp_mongodb_logs_all(hosts, test_run_dir):
func_by_roles(hosts, all_mongodb_roles, scp_mongodb_logs, test_run_dir)
def check_replication_lags(hosts):
if 'shards' in hosts:
while True:
max_lag = 0
max_lag_host = ''
for num, mv in enumerate(hosts['shards']):
hosts_ports = mv[0]['hostname'] + ':' + mv[0]['port'] + ',' + mv[1]['hostname'] + ':' + mv[1]['port']
conn_str = 'mongodb://' + hosts_ports
if 'user' in hosts:
conn_str = 'mongodb://' + hosts['user'] + ':' + hosts['password'] + '@' + hosts_ports
client = MongoClient(conn_str)
rsStatus = client.admin.command("replSetGetStatus")
client.close()
secondary_optimes = [];
primary_optime = 0;
for index, nv in enumerate(rsStatus['members']):
if (nv['stateStr'] == "PRIMARY"):
primary_optime = nv['optimeDate']
elif ( nv['stateStr'] == "SECONDARY" ):
secondaryStat = {}
secondaryStat['name'] = nv['name']
secondaryStat['optime'] = nv['optimeDate']
secondary_optimes.append(secondaryStat)
for index, nv in enumerate(secondary_optimes):
lag = timedelta.total_seconds(primary_optime - nv['optime'])
if (lag > max_lag):
max_lag = lag
max_lag_host = nv['name']
if (max_lag > 1):
print( max_lag_host + " is lagging " + str(max_lag) + " seconds, waiting for 5 seconds")
time.sleep(5)
else:
break
def collect_conn_pool_stats(host, test_run_dir, testx, user, password):
local_log_dir = test_run_dir + '/' + host['role'] + '_' + host['hostname'] + '_' + host['port']
host_port = host['hostname'] + ':' + host['port']
conn_str = 'mongodb://' + host_port
if 'user' != '':
conn_str = 'mongodb://' + user + ':' + password + '@' + host_port
client = MongoClient(conn_str)
conn_pool_stats = client.admin.command('connPoolStats')
with open (local_log_dir + '/' + testx + '_conn_pool_stats.json', 'a') as stats:
stats.write('\n=== ' + datetime.datetime.utcnow().isoformat() + '\n')
stats.write(dumps(conn_pool_stats, indent=4))
shard_conn_pool_stats = client.admin.command('shardConnPoolStats')
with open (local_log_dir + '/' + testx + '_shard_conn_pool_stats.json', 'a') as shard_stats:
shard_stats.write('\n=== ' + datetime.datetime.utcnow().isoformat() + '\n')
shard_stats.write(dumps(shard_conn_pool_stats, indent=4))
client.close()
def collect_conn_pool_stats_all(hosts, test_run_dir, testx):
user = password = ''
if 'user' in hosts:
user = hosts['user']
password = hosts['password']
while True:
func_by_roles(hosts, ['mongos', 'shards'], collect_conn_pool_stats, test_run_dir, testx, user, password)
time.sleep(30)
def func_by_roles(hosts, roles, func, *args):
for role in roles:
if role in hosts:
if role == 'shards':
for num, mv in enumerate(hosts[role]):
for index, nv in enumerate(hosts[role][num]):
func(nv, *args)
else:
for index, mv in enumerate(hosts[role]):
func(mv, *args)
def create_log_dir(host, test_run_dir):
print(host)
local_log_dir = test_run_dir + '/' + host['role'] + '_' + host['hostname'] + '_' + host['port']
os.system('mkdir -p ' + local_log_dir)
def create_log_dir_all(hosts, test_run_dir):
func_by_roles(hosts, all_mongodb_roles, create_log_dir, test_run_dir)
def run_workloads(hosts, test_run_dir):
num_shard = 1
workload_dir = test_run_dir + 'workloads/'
os.system('mkdir -p ' + workload_dir)
stats_dir = test_run_dir + 'stats/'
os.system('mkdir -p ' + stats_dir)
if 'shards' in hosts:
num_shard = len(hosts['shards'])
i = 1
# Adjust this number to run the tests with 6 mongos in the connection string
#i = 6
while True:
if 'mongodbUrl' + str(i) in hosts:
if i != 1:
# Adjust this number to run the tests with 6 mongos in the connection string
#if i != 6:
time.sleep(120)
print('mongodbUrl: ' + hosts['mongodbUrl' + str(i)])
for index1, mv in enumerate(hosts['workloads']):
workloads = parse_workload_file(workload_template)
for key, value in mv.iteritems():
workloads[key] = value
for index2, nv in enumerate(hosts['threads']):
check_replication_lags(hosts)
time.sleep(30)
if other_client != "true":
rotate_mongodb_logs_all(hosts)
workloads['threadcount'] = nv * num_shard
workload_file = 'workload_' + str(index1) + '_' + str(index2) + '_' + str(nv)
write_workload_file(workload_dir + workload_file, workloads)
with open (stats_dir + '/' + str(i) + '_mongos_' + workload_file + '.ycsb.stats', 'a') as stats:
stats.write('Test started at: ' + datetime.datetime.utcnow().isoformat() + '\n')
print('Running workload:' + str(index1) + ' threads: ' + str(nv))
if other_client != "true":
p = multiprocessing.Process(target=collect_conn_pool_stats_all, args=(hosts, test_run_dir, str(i) + '_mongos_' + workload_file))
p.start()
cmd = ycsb_dir + '/bin/ycsb run mongodb -P ' + workload_dir + workload_file + ' -p mongodb.url="' + hosts['mongodbUrl' + str(i)] + '"'
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process. communicate()
with open (stats_dir + '/' + str(i) + '_mongos_' + workload_file + '.ycsb.stdout', 'w') as stdout:
stdout.write(out)
with open (stats_dir + '/' + str(i) + '_mongos_' + workload_file + '.ycsb.stderr', 'w') as stderr:
stderr.write(err)
with open (stats_dir + '/' + str(i) + '_mongos_' + workload_file + '.ycsb.stats', 'a') as stats:
stats.write('Test completed at: ' + datetime.datetime.utcnow().isoformat() + '\n')
if other_client != "true":
p.terminate()
time.sleep(30)
i += 1
else:
break
def scp_ycsb_stats(hosts, test_run_dir):
all_ycsb_stats_dir = test_run_dir + '/all_ycsb_stats/'
os.system('mkdir -p ' + all_ycsb_stats_dir)
for index, mv in enumerate(hosts['clients']):
stats_dir = all_ycsb_stats_dir + mv['hostname']
os.system('mkdir -p ' + stats_dir)
os.system('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q ' + mv['hostname'] + ':' + test_run_dir + '/stats/* ' + stats_dir)
def run_tests(hosts):
now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S-%f")
test_run_dir = test_result_dir + hosts['cluster_name'] + '/'
os.system('mkdir -p ' + test_run_dir)
if other_client != "true":
rotate_mongodb_logs_all(hosts)
clean_mongodb_logs_all(hosts)
launchRemoteSadc(hosts, 600000)
create_log_dir_all(hosts, test_run_dir)
try:
check = raw_input('Press Enter to start the tests')
except EOFError:
print ("Error: EOF or empty input!")
check = ""
print check
run_workloads(hosts, test_run_dir)
if other_client != "true":
killCaptureRemoteSadc(hosts, test_run_dir);
scp_mongodb_logs_all(hosts, test_run_dir)
if (len(hosts['clients']) > 1):
scp_ycsb_stats(hosts, test_run_dir)
print('!!!!!!!!!!!!!!!!!!!!!')
print('Test results are in ' + hosts['clients'][0]['hostname'] + ':' + home_dir + test_run_dir + ', please copy them to a safe place otherwise they will be lost when the client machine is destroy.')
print('!!!!!!!!!!!!!!!!!!!!!')
def get_logs(hosts, test_run_dir):
os.system('mkdir -p ' + test_run_dir)
killCaptureRemoteSadc(hosts, test_run_dir);
scp_mongodb_logs_all(hosts, test_run_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--servers_file', help='servers json file, generated by "terraform output -json"')
parser.add_argument('-t', '--test_file', help='test json file. It includes the information for the MongoDB deployment, like storage engine, authentication, config server type, and the information related to YCSB testing, like workloads, threads')
parser.add_argument('-a', '--actions', help='the actions: it can be get_hosts, clean, start, stop, setup_mongodb, load, run')
parser.add_argument('-e', '--storage_engine', help='storage engine. It can be mmapv1 or wiredTiger')
parser.add_argument('-u', '--user', help='user name for the MongoDB deployment. If specified, it will create the deployment with authentication enabled')
parser.add_argument('-p', '--password', help='password for the MongoDB deployment. If user name is specified, but password is not specified, it will use the user name as the password')
parser.add_argument('-c', '--config_server_type', help='the type of the config server. It can be SCCC or CSRS')
parser.add_argument('-n', '--cluster_name', help='name for the cluster. It will be used as part of the dbpath, test result path')
parser.add_argument('-w', '--workload_template', help='workload file')
parser.add_argument('-o', '--other_client', help='the flag to set whether this is not the main client. If so, we will not collect stats from mongod')
args = parser.parse_args()
if args.servers_file:
servers_file = args.servers_file
if args.test_file:
test_file = args.test_file
with open(test_file) as tests:
hosts = json.load(tests)
if args.storage_engine:
if args.storage_engine.lower() == "mmap" or args.storage_engine.lower() == "mmapv1":
hosts['storage_engine'] = 'mmapv1'
if args.storage_engine.lower() == "wt" or args.storage_engine.lower() == "wiredtiger":
hosts['storage_engine'] = 'wiredTiger'
if args.user:
hosts['user'] = args.user
if args.password:
hosts['password'] = args.password
else:
hosts['password'] = args.user
if args.cluster_name:
hosts['cluster_name'] = args.cluster_name
if args.config_server_type:
if args.config_server_type.lower() == 'sccc':
hosts['config_server_type'] = 'SCCC'
if args.config_server_type.lower() == 'csrs':
hosts['config_server_type'] = 'CSRS'
if args.workload_template:
workload_template = args.workload_template
if args.other_client:
other_client = args.other_client
if args.actions:
actions = args.actions.split(',')
for index, mv in enumerate(actions):
if mv == "all":
get_hosts(hosts)
setup_mongodb(hosts)
load_data(hosts)
run_tests(hosts)
if mv == "get_hosts":
get_hosts()
if mv == "setup_mongodb":
setup_mongodb(hosts)
if mv == "start":
start_mongodb_all(hosts)
if mv == "shutdown":
shutdown_mongodb_all(hosts)
if mv == "clean":
shutdown_mongodb_all(hosts)
clean_dbpath_all(hosts)
if mv == "load":
load_data(hosts)
if mv == "run":
run_tests(hosts)
if mv == "restart_mongos_no_auto_split":
user = password = ''
if 'user' in hosts:
user = hosts['user']
password = hosts['password']
func_by_roles(hosts, ['mongos'], shutdown_mongodb, user, password)
func_by_roles(hosts, ['mongos'], start_mongodb, '--noAutoSplit')
if mv == "get_logs":
get_logs(hosts)
if mv == "set_slowms":
user = password = ''
if 'user' in hosts:
user = hosts['user']
password = hosts['password']
func_by_roles(hosts, ['shards'], set_slowms, user, password)
|
[
"vickmena@gmail.com"
] |
vickmena@gmail.com
|
509df8769c60d81ad8721b75fff1f40f28ba7b29
|
c78614f196777624efb0663342ec1de7ebd44655
|
/grundlaggande_programvareutveckling/Week1Exercises/Week1Exercises/week1/src/samples/ClassObjects.py
|
066722a1d04109070750f8e44a36684e6b7105f0
|
[] |
no_license
|
Dunkaburk/gruprog_1
|
4cb99b07bd2ba4b96d5f819f02906a9f9982dd8b
|
088e918843c0c3e14690169eb09034a2f3d818c6
|
refs/heads/main
| 2023-08-31T20:50:01.540508
| 2021-10-13T13:35:55
| 2021-10-13T13:35:55
| 407,120,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
# package samples
# To package data of different types we use class objects (remember: arrays may only
# use same type of data). The data should in some sense "belong together".
# I.e. an objects is a package of variables of possibly different types
# describing some concept (a car, a dog, a hero, ... any noun ...)
#
# To create class objects we first must declare a class describing the variables packaged (the
# instance variables). I.e. a class is a blue print of the objects to be created.
#
# When the class is declared we may create objects, using the new-operator.
#
# When declaring a class we also automatically define a new type (so we can declare a variable
# for a class object, remember, must specify type at variable declaration).
#
#
def class_objects_program():
# A class also introduces a new typ (Dog). Use type to declare variable
d1: Dog = Dog() # Must instantiate, i.e. create a dog object named d1 (using class initializer/constructor)
print(d1.age) # Get value of contained variable using '.' "dot"-notation and variable name.
print(f"{d1.name} is {d1.age} years old") # prints default values
d1.name = "Fido" # Assign values to variables in dog object, use "dot"-notation
d1.age = 3
d1.age += 1 # Getting older ...
print(f"{d1.name} is {d1.age} years old")
d2: Dog = Dog() # Create another dog. Same class used (class is a blue print)!
d2.name = "Lassie"
d2.age = 14
if d1.age > d2.age:
print(f"{d1.name} is older")
else:
print(f"{d2.name} is older")
# // --- A class -----
# // Class declaration specifies a name and instance variables.
# // This class captures the concept of a dog
class Dog:
# Two instance variables, with default values
# NOTE: This is not how you normally do it - instead you
# use class initializers / constructors. More on this later.
name = "Sprocket" # A Dog has a name and... (default value null)
age = 0 # ... and age (default value 0)
if __name__ == "__main__":
class_objects_program()
|
[
"jonte8000@hotmail.com"
] |
jonte8000@hotmail.com
|
8cc063e3aace2a9b498abba1696ccf45d3457f7e
|
d01b06289196407adb2eebd37310b3e307965e77
|
/tworaymodel.py
|
d8bba479234fdf8bb696b227b9b200be5b778d67
|
[
"MIT"
] |
permissive
|
Dhanesh-raj/Two-ray-ground-reflection-model
|
fceb2a9d042df9ebb84e34acee827f8ed13efef2
|
7465a100822950d4fdd62fdcf2f50e2b6a1f60b0
|
refs/heads/master
| 2023-03-17T11:12:36.811484
| 2020-03-08T08:00:16
| 2020-03-08T08:00:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import os
# 4 set of bands to simulate (4 subplots)
b1 = ['900']
b2 = ['800', '900', '2100']
b3 = ['800', '900', '1800', '2100']
b4 = ['800', '900', '1800', '2100', '2600']
# u value used to simplified two-ray model, assuming that two rays are always combined coherently, with a reflection
# factor |Γ|, so the sum is multiplied by u=1+Γ. (FSPL is when u=1 (only 1 ray)).
u = 1.6
R = -0.9 # R (or Γ) reflection factor used in two-ray ground-reflection model (in general is
# dependent on the angle of incidence)
ht = 6 # transmitter height
hr = 4 # receiver height
maxd = 25 # Maximum distance from transmitter (m)
d = np.linspace(1, maxd, 2000)
d_ref = np.sqrt((ht + hr) ** 2 + d ** 2)
d_los = np.sqrt((ht - hr) ** 2 + d ** 2)
G_los = 1
G_gr = 1
bandslist = [b1, b2, b3, b4]
fig, ax = plt.subplots(2, 2, figsize=(20,15))
fig.suptitle(f'Models comparison for: ht={ht}m, hr={hr}m, Γ={R} and u={u} simplified model')
ax = ax.ravel()
for i, bands in enumerate(bandslist):
tworayloss = 0
freespaceloss = 0
freq = np.asarray(bands).astype(int)
lam = 3 * 10 ** 2 / freq
for lam in lam:
phi = 2 * np.pi * (d_ref - d_los) / lam
loscoef = np.sqrt(G_los) / d_los
reflcoef = R * np.sqrt(G_gr) * np.exp(-1j * phi) / d_ref
rs = lam * (loscoef + reflcoef) / (4 * np.pi)
tworayloss += 10*np.log10((abs(rs))**2)
freespaceloss += 20*np.log10(lam / (4 * np.pi * d_los))
freespace_u = 10*len(freq)*np.log10(u**2) + freespaceloss
norm = max(tworayloss[0], freespace_u[0])
tworayloss = tworayloss - norm
freespaceloss = freespaceloss - norm
freespace_u = freespace_u-norm
ax[i].semilogx(d, tworayloss, d, freespace_u, d, freespaceloss)
p = (1 - np.sum(tworayloss>freespace_u)/len(d))*100
ax[i].text(1, 0.90*min(tworayloss), f'{p:0.2f}% of values (u={u} model)\ngreater than analytical model')
ax[i].text(1, min(tworayloss), f'Mean diff. [(u={u}), analytical two-ray]:='
f'{np.mean(freespace_u-tworayloss):+.1f}dB\n'
f'Mean diff. [FSPL, analytical two-ray]:='
f'{np.mean(freespaceloss-tworayloss):+.1f}dB)')
ax[i].legend((f'Two-Ray ground-reflection\n analytical model (Γ={R})', f'u={u}', 'Free Space (FSPL)'), loc=6)
bandsstring = ', '.join(freq.astype(str))
bandsstring = '(' + bandsstring + ')'
fname = f'ht = {ht}m, hr={hr}m, Γ={R}, u={u}'
title = f'Bands={bandsstring}MHz'
ax[i].set_title(title)
ax[i].set_xlabel('Distance (m)')
ax[i].set_ylabel('Normalized Path Loss (dB)')
xticks = np.append(1, np.linspace(5, maxd, int(maxd/5)).astype(int))
ax[i].set_xticks(xticks)
ax[i].set_xticklabels(xticks.astype(str))
plt.savefig(os.getcwd()+'\\Figures\\'+fname+'.png')
# plt.show()
|
[
"noreply@github.com"
] |
Dhanesh-raj.noreply@github.com
|
e283276355eca71bd1e71714aca213139b8934d2
|
7b11794013b6c186dad756f5682d038e4eb527d4
|
/fuber/models/taxi.py
|
e234ef47bdbd170d6a9e90b6bda04ef5ebe90fab
|
[] |
no_license
|
himani93/Fuber-Taxi
|
6e9af17d2ecb3877307504fad187d530ba9b8ac8
|
ba3a0e80e17f830a83ae1df51c483250788d471a
|
refs/heads/master
| 2020-03-09T21:59:34.509058
| 2018-04-09T06:42:51
| 2018-04-09T06:42:51
| 129,024,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
import helper
from exceptions import (
InvalidTaxiLicenseNumberException,
InvalidTaxiColorException,
InvalidLocationException,
)
from location import Location
class Taxi(object):
def __init__(self, license_no, color="yellow", location=None):
if not license_no:
raise InvalidTaxiLicenseNumberException("{} is not valid".format(license_no))
self._license_no = license_no
if not color:
raise InvalidTaxiColorException("{} is not valid".format(color))
self._color = color
if color == "pink":
self._category = "pink"
else:
self._category = "default"
self.available = True
self.location = location
self._id = helper.get_id()
def __repr__(self):
return "Taxi({} - {} - {})".format(self.license_no, self.color, self.available)
def __str__(self):
return "Taxi({} - {} - {})".format(self.license_no, self.color, self.available)
def to_dict(self):
return {
"id": self.id,
"license_no": self.license_no,
"color": self.color,
"available": self.available,
"category": self.category,
"location": self.location.to_dict() if self.location else self.location,
}
@property
def license_no(self):
return self._license_no
@property
def color(self):
return self._color
@property
def category(self):
return self._category
@property
def id(self):
return self._id
@property
def location(self):
return self._location
@location.setter
def location(self, loc):
if loc is not None and type(loc) is not Location:
raise InvalidLocationException("{} is not of Location type".format(loc))
self._location = loc
def is_pink(self):
return True if self.category == "pink" else False
|
[
"himani93@gmail.com"
] |
himani93@gmail.com
|
965ff358445f18342fd71501800fa948bcb92a9c
|
f7f0df40f586014cc17961cb3f12cdfa8f87df56
|
/src/Test1/test6.py
|
e723e5e445c659e7cf90e6dc3f64cffbf1570fa9
|
[] |
no_license
|
muxuehen/pythonStuty
|
5274a1a8c97bf67de19f07e07357d9710dc4967b
|
c99f8722218c4a08aefdcbe1987fa567e07cf10c
|
refs/heads/master
| 2020-03-29T17:17:42.790847
| 2014-12-09T09:09:57
| 2014-12-09T09:09:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
#coding=utf-8
'''
Created on 2014年12月4日
@author:
'''
i = 9
print i
|
[
"zhangxuli@boco.com"
] |
zhangxuli@boco.com
|
93e15d15e8a81c03ff3105f917050a361ae6a3d9
|
1bd53b731c7b2fce066204ab6e545020c6213468
|
/code/neural_network.py
|
9a4390e19fe96e6f4b9d8f6b7abbd250c4ae0cfa
|
[] |
no_license
|
dougbrion/machine_learning_cw
|
2624fd794a415323e4d9391653738e2230f83245
|
d1e24df477c66c1b78416bf96c17d77e873abdee
|
refs/heads/master
| 2020-04-19T14:03:22.503598
| 2019-01-29T22:00:55
| 2019-01-29T22:00:55
| 168,232,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
import tensorflow as tf
import helpers as hp
import numpy as np
def softmax_fn(_X, _inputs, _units):
W = tf.Variable(tf.random_normal([_inputs, _units]), name='weight')
b = tf.Variable(tf.random_normal([_units]), name='bias')
y = tf.nn.softmax(tf.matmul(_X, W) + b)
return y, W, b
def selu_fn(_X, _inputs, _units):
W = tf.Variable(tf.random_normal([_inputs, _units]), name='weight')
b = tf.Variable(tf.random_normal([_units]), name='bias')
y = tf.nn.selu(tf.add(tf.matmul(_X, W) , b))
return y, W, b
def relu_fn(_X, _inputs, _units):
W = tf.Variable(tf.random_normal([_inputs, _units]), name='weight')
b = tf.Variable(tf.random_normal([_units]), name='bias')
y = tf.nn.relu(tf.add(tf.matmul(_X, W) , b))
return y, W, b
def sigmoid_fn(_X, _inputs, _units):
W = tf.Variable(tf.random_normal([_inputs, _units]), name='weight')
b = tf.Variable(tf.random_normal([_units]), name='bias')
y = tf.nn.sigmoid(tf.add(tf.matmul(_X, W) , b))
return y, W, b
def tanh_fn(_X, _inputs, _units):
W = tf.Variable(tf.random_normal([_inputs, _units]), name='weight')
b = tf.Variable(tf.random_normal([_units]), name='bias')
y = tf.nn.tanh(tf.add(tf.matmul(_X, W) , b))
return y, W, b
def calc_error_L1(_y, _pred):
print("Loss Function L1")
cost = tf.reduce_mean(tf.abs(_y - _pred))
return cost
def huber_error(_y, _pred, _delta=1.0):
residual = tf.abs(_y - _pred)
cond = tf.less(residual, _delta)
small_res = 0.5 * tf.square(residual)
large_res = _delta * residual - 0.5 * tf.square(_delta)
cost = tf.reduce_mean(tf.where(cond, small_res, large_res))
return cost
def cost_function(_y, _pred):
cost = tf.reduce_mean(tf.square(_y - _pred))
return cost
def layers(_X, _y, _output_layer=0):
inputs = int(hp.num_features(_X))
hidden_layer_nodes = int((inputs + 1) / 2)
hidden_layer, hidden_weight, hidden_bias = relu_fn(_X, inputs, hidden_layer_nodes)
if _output_layer == 0:
print("Ouput Layer is ReLU")
pred, weight, bias = relu_fn(hidden_layer, hidden_layer_nodes, 1)
elif _output_layer == 1:
print("Ouput Layer is SeLU")
pred, weight, bias = selu_fn(hidden_layer, hidden_layer_nodes, 1)
elif _output_layer == 2:
print("Ouput Layer is Softmax")
pred, weight, bias = softmax_fn(hidden_layer, hidden_layer_nodes, 1)
elif _output_layer == 3:
print("Ouput Layer is TanH")
pred, weight, bias = tanh_fn(hidden_layer, hidden_layer_nodes, 1)
elif _output_layer == 4:
print("Ouput Layer is Sigmoid")
pred, weight, bias = sigmoid_fn(hidden_layer, hidden_layer_nodes, 1)
else:
print("Ouput Layer is ReLU")
pred, weight, bias = relu_fn(hidden_layer, hidden_layer_nodes, 1)
cost = cost_function(_y, pred)
W = [hidden_weight, weight]
b = [hidden_bias, bias]
return pred, cost, W, b
def neural_network(_train_X, _train_y, _test_X, _test_y, _epochs, _rate, _regularisation, _cross_val, _output_layer=0):
reg_type, reg_scale = _regularisation
X = tf.placeholder(tf.float32, [None, hp.num_features(_train_X)], name="input")
y = tf.placeholder(tf.float32, name="output")
pred, cost, W, b = layers(X, y, _output_layer)
lad = calc_error_L1(y, pred)
huber_loss = huber_error(y, pred)
print("Regularisation: ", _regularisation)
if reg_type == 1:
L1 = tf.contrib.layers.l1_regularizer(scale=reg_scale)
reg_cost = tf.contrib.layers.apply_regularization(L1, W)
elif reg_type == 2:
L2 = tf.contrib.layers.l2_regularizer(scale=reg_scale)
reg_cost = tf.contrib.layers.apply_regularization(L2, W)
else:
reg_cost = 0
cost += reg_cost
optimizer = tf.train.GradientDescentOptimizer(_rate).minimize(cost)
XyWb = [X, y, W, b]
with tf.Session() as sess:
if _cross_val == True:
return hp.cross_validation(sess, XyWb, _train_X, _train_y, _test_X, _test_y, optimizer, cost, huber_loss, _epochs, "nn")
else:
return hp.run(sess, XyWb, _train_X, _train_y, _test_X, _test_y, optimizer, cost, huber_loss, _epochs, "nn")
|
[
"db1415@ic.ac.uk"
] |
db1415@ic.ac.uk
|
84e879737e214d539d0b8ef455b15232c549954d
|
bdd738e4190ec53532a7278a7896a8053706d713
|
/Contents/Code/siteLubed.py
|
c48ec08070a44ca4cfd13ad450d1ea658dcdc7b1
|
[] |
no_license
|
mutluerol/PhoenixAdult.bundle
|
c7709a2572631bb4033634a3cb21f1960f1e3929
|
6305158702d6e14fd5fe85e5bac9d8d3a96ba604
|
refs/heads/master
| 2020-04-15T07:49:09.118583
| 2019-01-07T23:02:01
| 2019-01-07T23:02:01
| 164,502,693
| 0
| 1
| null | 2019-01-07T21:55:18
| 2019-01-07T21:55:18
| null |
UTF-8
|
Python
| false
| false
| 3,529
|
py
|
import PAsearchSites
import PAgenres
def search(results,encodedTitle,title,searchTitle,siteNum,lang,searchByDateActor,searchDate,searchAll,searchSiteID):
url = 'http://lubed.com/video/' + searchTitle.lower().replace(" ","-")
searchResults = HTML.ElementFromURL(url)
searchResult = searchResults.xpath('//div[@class="details col-sm-6 col-md-3 order-md-2 mb-2"]')[0]
titleNoFormatting = searchResult.xpath('.//div[@class="row"]//div[@class="col-6 col-md-12"]//h1')[0].text_content()
Log("Result Title: " + titleNoFormatting)
cur = "/video/" + searchTitle.lower().replace(" ","-")
curID = cur.replace('/','_')
Log("ID: " + curID)
releasedDate = searchResult.xpath('.//div[@class="row"]//div[@class="col-6 col-md-12"]//p')[0].text_content()
girlName = searchResult.xpath('.//div[@class="row"]//div[@class="col-6 col-md-12"]//a')[0].text_content()
Log("CurID" + str(curID))
lowerResultTitle = str(titleNoFormatting).lower()
titleNoFormatting = girlName + " - " + titleNoFormatting + " [Lubed, " + releasedDate +"]"
score = 100
results.Append(MetadataSearchResult(id = curID + "|" + str(siteNum), name = titleNoFormatting, score = score, lang = lang))
return results
def update(metadata,siteID,movieGenres,movieActors):
temp = str(metadata.id).split("|")[0].replace('_', '/')
url = PAsearchSites.getSearchBaseURL(siteID) + temp
Log('url :' + url)
detailsPageElements = HTML.ElementFromURL(url)
metadata.studio = "Lubed"
# Summary
# paragraph = detailsPageElements.xpath('//p[@class="desc"]')[0].text_content()
# paragraph = paragraph.replace('&13;', '').strip(' \t\n\r"').replace('\n', '').replace(' ', '') + "\n\n"
# metadata.summary = paragraph[:-10]
tagline = "Lubed"
metadata.collections.clear()
metadata.tagline = tagline
metadata.collections.add(tagline)
metadata.title = detailsPageElements.xpath('//div[@class="details col-sm-6 col-md-3 order-md-2 mb-2"]//div[@class="row"]//div[@class="col-6 col-md-12"]//h1')[0].text_content()
# Genres
movieGenres.clearGenres()
for genreName in ['60FPS', 'Lube', 'Raw', 'Wet', 'Sex', 'Ass', 'Pussy', 'Sex', 'Cumshot']:
movieGenres.addGenre(genreName)
# Actors
movieActors.clearActors()
titleActors = ""
actors = detailsPageElements.xpath('//div[@class="details col-sm-6 col-md-3 order-md-2 mb-2"]//div[@class="row"]//div[@class="col-6 col-md-12"]//a')
if len(actors) > 0:
for actorLink in actors:
actorPageURL = 'http://lubed.com' + actorLink.get("href")
actorPage = HTML.ElementFromURL(actorPageURL)
actorName = actorPage.xpath('//div[@class="col-md-3 order-md-2 mb-2 details"]//h1')[0].text_content()
titleActors = titleActors + actorName + " & "
actorPhotoURL = "http:" + actorPage.xpath('//div[@class="col-md-6 order-md-1 mb-4 image"]//a//img')[0].get("src")
movieActors.addActor(actorName,actorPhotoURL)
titleActors = titleActors[:-3]
metadata.title = metadata.title
# Posters
background = "http:" + detailsPageElements.xpath('//video[@id="player"]')[0].get('poster')
Log("BG DL: " + background)
metadata.art[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
metadata.posters[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
return metadata
|
[
"pahelper@sahasrahla.com"
] |
pahelper@sahasrahla.com
|
92988e5abacf6500ac71f7c3cc407f34fa9882ab
|
3b5f28ed1505c68f94ec1df496fe061d110294ce
|
/lixian.py
|
cd8a2380685cb4b548c5c15174ac20dd5d8358d0
|
[
"MIT"
] |
permissive
|
yuanlizbyy/xunlei-lixian
|
089d388fbf4023bfae217906268c19dde43528e1
|
fe96ee19c1af8a268dc39818a5e8d33ff71e50ee
|
refs/heads/master
| 2021-01-17T21:48:13.932068
| 2012-12-10T05:35:43
| 2012-12-10T05:35:43
| 7,854,959
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,808
|
py
|
__all__ = ['XunleiClient']
import urllib
import urllib2
import cookielib
import re
import time
import os.path
import json
from ast import literal_eval
def retry(f):
#retry_sleeps = [1, 1, 1]
retry_sleeps = [1, 2, 3, 5, 10, 20, 30, 60] + [60] * 60
def withretry(*args, **kwargs):
for second in retry_sleeps:
try:
return f(*args, **kwargs)
except:
import traceback
import sys
print "Exception in user code:"
traceback.print_exc(file=sys.stdout)
time.sleep(second)
raise
return withretry
class XunleiClient:
page_size = 100
bt_page_size = 9999
def __init__(self, username=None, password=None, cookie_path=None, login=True):
self.cookie_path = cookie_path
if cookie_path:
self.cookiejar = cookielib.LWPCookieJar()
if os.path.exists(cookie_path):
self.load_cookies()
else:
self.cookiejar = cookielib.CookieJar()
self.set_page_size(self.page_size)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
if login:
if not self.has_logged_in():
if not username and self.has_cookie('.xunlei.com', 'usernewno'):
username = self.get_username()
if not username:
import lixian_config
username = lixian_config.get_config('username')
# if not username:
# raise NotImplementedError('user is not logged in')
if not password:
raise NotImplementedError('user is not logged in')
self.login(username, password)
else:
self.id = self.get_userid()
@retry
def urlopen(self, url, **args):
#print url
if 'data' in args and type(args['data']) == dict:
args['data'] = urlencode(args['data'])
return self.opener.open(urllib2.Request(url, **args), timeout=60)
def urlread(self, url, **args):
args.setdefault('headers', {})
headers = args['headers']
headers.setdefault('Accept-Encoding', 'gzip, deflate')
# headers.setdefault('Referer', 'http://lixian.vip.xunlei.com/task.html')
# headers.setdefault('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0')
# headers.setdefault('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
# headers.setdefault('Accept-Language', 'zh-cn,zh;q=0.7,en-us;q=0.3')
response = self.urlopen(url, **args)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
return data
def load_cookies(self):
self.cookiejar.load(self.cookie_path, ignore_discard=True, ignore_expires=True)
def save_cookies(self):
if self.cookie_path:
self.cookiejar.save(self.cookie_path, ignore_discard=True)
def get_cookie(self, domain, k):
if self.has_cookie(domain, k):
return self.cookiejar._cookies[domain]['/'][k].value
def has_cookie(self, domain, k):
return domain in self.cookiejar._cookies and k in self.cookiejar._cookies[domain]['/']
def get_userid(self):
if self.has_cookie('.xunlei.com', 'userid'):
return self.get_cookie('.xunlei.com', 'userid')
else:
raise Exception('Probably login failed')
def get_userid_or_none(self):
return self.get_cookie('.xunlei.com', 'userid')
def get_username(self):
return self.get_cookie('.xunlei.com', 'usernewno')
def get_gdriveid(self):
return self.get_cookie('.vip.xunlei.com', 'gdriveid')
def has_gdriveid(self):
return self.has_cookie('.vip.xunlei.com', 'gdriveid')
def get_referer(self):
return 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s' % self.id
def set_cookie(self, domain, k, v):
c = cookielib.Cookie(version=0, name=k, value=v, port=None, port_specified=False, domain=domain, domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={}, rfc2109=False)
self.cookiejar.set_cookie(c)
def set_gdriveid(self, id):
self.set_cookie('.vip.xunlei.com', 'gdriveid', id)
def set_page_size(self, n):
self.set_cookie('.vip.xunlei.com', 'pagenum', str(n))
def get_cookie_header(self):
def domain_header(domain):
root = self.cookiejar._cookies[domain]['/']
return '; '.join(k+'='+root[k].value for k in root)
return domain_header('.xunlei.com') + '; ' + domain_header('.vip.xunlei.com')
def is_login_ok(self, html):
return len(html) > 512
def has_logged_in(self):
id = self.get_userid_or_none()
if not id:
return False
#print self.urlopen('http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id).read().decode('utf-8')
self.set_page_size(1)
url = 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id
#url = 'http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d' % current_timestamp()
r = self.is_login_ok(self.urlread(url))
self.set_page_size(self.page_size)
return r
def login(self, username, password):
cachetime = current_timestamp()
check_url = 'http://login.xunlei.com/check?u=%s&cachetime=%d' % (username, cachetime)
login_page = self.urlopen(check_url).read()
verifycode = self.get_cookie('.xunlei.com', 'check_result')[2:].upper()
password = encypt_password(password)
password = md5(password+verifycode)
login_page = self.urlopen('http://login.xunlei.com/sec2login/', data={'u': username, 'p': password, 'verifycode': verifycode})
self.id = self.get_userid()
self.set_page_size(1)
login_page = self.urlopen('http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d&from=0'%current_timestamp()).read()
self.set_page_size(self.page_size)
assert self.is_login_ok(login_page), 'login failed'
self.save_cookies()
def logout(self):
#session_id = self.get_cookie('.xunlei.com', 'sessionid')
#timestamp = current_timestamp()
#url = 'http://login.xunlei.com/unregister?sessionid=%s&cachetime=%s&noCacheIE=%s' % (session_id, timestamp, timestamp)
#self.urlopen(url).read()
#self.urlopen('http://dynamic.vip.xunlei.com/login/indexlogin_contr/logout/').read()
ckeys = ["vip_isvip","lx_sessionid","vip_level","lx_login","dl_enable","in_xl","ucid","lixian_section"]
ckeys1 = ["sessionid","usrname","nickname","usernewno","userid"]
for k in ckeys:
self.set_cookie('.vip.xunlei.com', k, '')
for k in ckeys1:
self.set_cookie('.xunlei.com', k, '')
self.save_cookies()
def read_task_page_url(self, url):
page = self.urlread(url).decode('utf-8', 'ignore')
if not self.has_gdriveid():
gdriveid = re.search(r'id="cok" value="([^"]+)"', page).group(1)
self.set_gdriveid(gdriveid)
self.save_cookies()
tasks = parse_tasks(page)
for t in tasks:
t['client'] = self
pginfo = re.search(r'<div class="pginfo">.*?</div>', page)
match_next_page = re.search(r'<li class="next"><a href="([^"]+)">[^<>]*</a></li>', page)
return tasks, match_next_page and 'http://dynamic.cloud.vip.xunlei.com'+match_next_page.group(1)
def read_task_page(self, st, pg=None):
if pg is None:
url = 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=%d' % (self.id, st)
else:
url = 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=%d&p=%d' % (self.id, st, pg)
return self.read_task_page_url(url)
def read_tasks(self, st=0):
'''read one page'''
tasks = self.read_task_page(st)[0]
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_tasks(self, st=0):
'''read all pages'''
all_tasks = []
tasks, next_link = self.read_task_page(st)
all_tasks.extend(tasks)
while next_link:
tasks, next_link = self.read_task_page_url(next_link)
all_tasks.extend(tasks)
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_completed(self):
'''read first page of completed tasks'''
return self.read_tasks(2)
def read_all_completed(self):
'''read all pages of completed tasks'''
return self.read_all_tasks(2)
def read_history_page_url(self, url):
self.set_cookie('.vip.xunlei.com', 'lx_nf_all', urllib.quote('page_check_all=history&fltask_all_guoqi=1&class_check=0&page_check=task&fl_page_id=0&class_check_new=0&set_tab_status=11'))
page = self.urlread(url).decode('utf-8', 'ignore')
if not self.has_gdriveid():
gdriveid = re.search(r'id="cok" value="([^"]+)"', page).group(1)
self.set_gdriveid(gdriveid)
self.save_cookies()
tasks = parse_history(page)
for t in tasks:
t['client'] = self
pginfo = re.search(r'<div class="pginfo">.*?</div>', page)
match_next_page = re.search(r'<li class="next"><a href="([^"]+)">[^<>]*</a></li>', page)
return tasks, match_next_page and 'http://dynamic.cloud.vip.xunlei.com'+match_next_page.group(1)
def read_history_page(self, type=0, pg=None):
if pg is None:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&type=%d' % (self.id, type)
else:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&p=%d&type=%d' % (self.id, pg, type)
return self.read_history_page_url(url)
def read_history(self, type=0):
'''read one page'''
tasks = self.read_history_page(type)[0]
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_history(self, type=0):
'''read all pages of deleted/expired tasks'''
all_tasks = []
tasks, next_link = self.read_history_page(type)
all_tasks.extend(tasks)
while next_link:
tasks, next_link = self.read_history_page_url(next_link)
all_tasks.extend(tasks)
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_deleted(self):
return self.read_history()
def read_all_deleted(self):
return self.read_all_history()
def read_expired(self):
return self.read_history(1)
def read_all_expired(self):
return self.read_all_history(1)
def list_bt(self, task):
assert task['type'] == 'bt'
url = 'http://dynamic.cloud.vip.xunlei.com/interface/fill_bt_list?callback=fill_bt_list&tid=%s&infoid=%s&g_net=1&p=1&uid=%s&noCacheIE=%s' % (task['id'], task['bt_hash'], self.id, current_timestamp())
self.set_page_size(self.bt_page_size)
html = remove_bom(self.urlread(url)).decode('utf-8')
self.set_page_size(self.page_size)
sub_tasks = parse_bt_list(html)
for t in sub_tasks:
t['date'] = task['date']
return sub_tasks
def get_torrent_file_by_info_hash(self, info_hash):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, info_hash.upper())
response = self.urlopen(url)
torrent = response.read()
if torrent == "<meta http-equiv='Content-Type' content='text/html; charset=utf-8' /><script>alert('\xe5\xaf\xb9\xe4\xb8\x8d\xe8\xb5\xb7\xef\xbc\x8c\xe6\xb2\xa1\xe6\x9c\x89\xe6\x89\xbe\xe5\x88\xb0\xe5\xaf\xb9\xe5\xba\x94\xe7\x9a\x84\xe7\xa7\x8d\xe5\xad\x90\xe6\x96\x87\xe4\xbb\xb6!');</script>":
raise Exception('Torrent file not found on xunlei cloud: '+info_hash)
assert response.headers['content-type'] == 'application/octet-stream'
return torrent
def get_torrent_file(self, task):
return self.get_torrent_file_by_info_hash(task['bt_hash'])
def add_task(self, url):
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'ftp', 'thunder', 'Flashget', 'qqdl', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
from lixian_url import url_unmask
url = url_unmask(url)
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'ftp', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
if protocol == 'bt':
return self.add_torrent_task_by_info_hash(url[5:])
elif protocol == 'magnet':
return self.add_magnet_task(url)
random = current_random()
check_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_check?callback=queryCid&url=%s&random=%s&tcache=%s' % (urllib.quote(url), random, current_timestamp())
js = self.urlopen(check_url).read().decode('utf-8')
qcid = re.match(r'^queryCid(\(.+\))\s*$', js).group(1)
qcid = literal_eval(qcid)
if len(qcid) == 8:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random = qcid
elif len(qcid) == 9:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
elif len(qcid) == 10:
cid, gcid, size_required, some_key, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
else:
raise NotImplementedError(qcid)
assert goldbean_need == 0
assert silverbean_need == 0
if url.startswith('http://') or url.startswith('ftp://'):
task_type = 0
elif url.startswith('ed2k://'):
task_type = 2
else:
raise NotImplementedError()
task_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_commit?'+urlencode(
{'callback': 'ret_task',
'uid': self.id,
'cid': cid,
'gcid': gcid,
'size': size_required,
'goldbean': goldbean_need,
'silverbean': silverbean_need,
't': filename,
'url': url,
'type': task_type,
'o_page': 'task',
'o_taskid': '0',
})
response = self.urlopen(task_url).read()
assert response == 'ret_task(Array)', response
def add_batch_tasks(self, urls, old_task_ids=None):
assert urls
urls = list(urls)
for url in urls:
if parse_url_protocol(url) not in ('http', 'ftp', 'ed2k', 'bt', 'thunder', 'magnet'):
raise NotImplementedError('Unsupported: '+url)
urls = filter(lambda u: parse_url_protocol(u) in ('http', 'ftp', 'ed2k', 'thunder'), urls)
if not urls:
return
#self.urlopen('http://dynamic.cloud.vip.xunlei.com/interface/batch_task_check', data={'url':'\r\n'.join(urls), 'random':current_random()})
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/batch_task_commit?callback=%s' % jsonp
if old_task_ids:
batch_old_taskid = ','.join(old_task_ids)
else:
batch_old_taskid = '0' + ',' * (len(urls) - 1) # XXX: what is it?
data = {}
for i in range(len(urls)):
data['cid[%d]' % i] = ''
data['url[%d]' % i] = urllib.quote(to_utf_8(urls[i])) # fix per request #98
data['batch_old_taskid'] = batch_old_taskid
response = self.urlopen(url, data=data).read()
assert_response(response, jsonp, len(urls))
def add_torrent_task_by_content(self, content, path='attachment.torrent'):
assert re.match(r'd\d+:', content), 'Probably not a valid content file [%s...]' % repr(content[:17])
upload_url = 'http://dynamic.cloud.vip.xunlei.com/interface/torrent_upload'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
content_type, body = encode_multipart_formdata([], [('filepath', path, content)])
response = self.urlopen(upload_url, data=body, headers={'Content-Type': content_type}).read().decode('utf-8')
upload_success = re.search(r'<script>document\.domain="xunlei\.com";var btResult =(\{.*\});</script>', response, flags=re.S)
if upload_success:
bt = json.loads(upload_success.group(1))
bt_hash = bt['infoid']
bt_name = bt['ftitle']
bt_size = bt['btsize']
data = {'uid':self.id, 'btname':bt_name, 'cid':bt_hash, 'tsize':bt_size,
'findex':''.join(f['id']+'_' for f in bt['filelist']),
'size':''.join(f['subsize']+'_' for f in bt['filelist']),
'from':'0'}
response = self.urlopen(commit_url, data=data).read()
#assert_response(response, jsonp)
assert re.match(r'%s\({"id":"\d+","progress":1}\)' % jsonp, response), repr(response)
return bt_hash
already_exists = re.search(r"parent\.edit_bt_list\((\{.*\}),''\)", response, flags=re.S)
if already_exists:
bt = json.loads(already_exists.group(1))
bt_hash = bt['infoid']
return bt_hash
raise NotImplementedError()
def add_torrent_task_by_info_hash(self, sha1):
return self.add_torrent_task_by_content(self.get_torrent_file_by_info_hash(sha1), sha1.upper()+'.torrent')
def add_torrent_task(self, path):
with open(path, 'rb') as x:
return self.add_torrent_task_by_content(x.read(), os.path.basename(path))
def add_torrent_task_by_info_hash2(self, sha1, old_task_id=None):
'''similar to add_torrent_task_by_info_hash, but faster. I may delete current add_torrent_task_by_info_hash completely in future'''
link = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, sha1)
return self.add_torrent_task_by_link(link, old_task_id=old_task_id)
def add_magnet_task(self, link):
return self.add_torrent_task_by_link(link)
def add_torrent_task_by_link(self, link, old_task_id=None):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/url_query?callback=queryUrl&u=%s&random=%s' % (urllib.quote(link), current_timestamp())
response = self.urlopen(url).read()
success = re.search(r'queryUrl(\(1,.*\))\s*$', response, flags=re.S)
if not success:
already_exists = re.search(r"queryUrl\(-1,'([^']{40})", response, flags=re.S)
if already_exists:
return already_exists.group(1)
raise NotImplementedError(repr(response))
args = success.group(1).decode('utf-8')
args = literal_eval(args.replace('new Array', ''))
_, cid, tsize, btname, _, names, sizes_, sizes, _, types, findexes, timestamp = args
def toList(x):
if type(x) in (list, tuple):
return x
else:
return [x]
data = {'uid':self.id, 'btname':btname, 'cid':cid, 'tsize':tsize,
'findex':''.join(x+'_' for x in toList(findexes)),
'size':''.join(x+'_' for x in toList(sizes)),
'from':'0'}
if old_task_id:
data['o_taskid'] = old_task_id
data['o_page'] = 'history'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
response = self.urlopen(commit_url, data=data).read()
#assert_response(response, jsonp)
assert re.match(r'%s\({"id":"\d+","progress":1}\)' % jsonp, response), repr(response)
return cid
def readd_all_expired_tasks(self):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/delay_once?callback=anything'
response = self.urlopen(url).read()
def delete_tasks_by_id(self, ids):
jsonp = 'jsonp%s' % current_timestamp()
data = {'taskids': ','.join(ids)+',', 'databases': '0,'}
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_delete?callback=%s&type=%s&noCacheIE=%s' % (jsonp, 2, current_timestamp()) # XXX: what is 'type'?
response = self.urlopen(url, data=data).read()
response = remove_bom(response)
assert_response(response, jsonp, '{"result":1,"type":2}')
def delete_task_by_id(self, id):
self.delete_tasks_by_id([id])
def delete_task(self, task):
self.delete_task_by_id(task['id'])
def delete_tasks(self, tasks):
self.delete_tasks_by_id([t['id'] for t in tasks])
def pause_tasks_by_id(self, ids):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_pause?tid=%s&uid=%s&noCacheIE=%s' % (','.join(ids)+',', self.id, current_timestamp())
assert self.urlopen(url).read() == 'pause_task_resp()'
def pause_task_by_id(self, id):
self.pause_tasks_by_id([id])
def pause_task(self, task):
self.pause_task_by_id(task['id'])
def pause_tasks(self, tasks):
self.pause_tasks_by_id(t['id'] for t in tasks)
def restart_tasks(self, tasks):
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/redownload?callback=%s' % jsonp
form = []
for task in tasks:
assert task['type'] in ('ed2k', 'http', 'ftp', 'https', 'bt'), "'%s' is not tested" % task['type']
data = {'id[]': task['id'],
'cid[]': '', # XXX: should I set this?
'url[]': task['original_url'],
'download_status[]': task['status']}
if task['type'] == 'ed2k':
data['taskname[]'] = task['name'].encode('utf-8') # XXX: shouldn't I set this for other task types?
form.append(urlencode(data))
form.append(urlencode({'type':1}))
data = '&'.join(form)
response = self.urlopen(url, data=data).read()
assert_response(response, jsonp)
def rename_task(self, task, new_name):
assert type(new_name) == unicode
url = 'http://dynamic.cloud.vip.xunlei.com/interface/rename'
taskid = task['id']
bt = '1' if task['type'] == 'bt' else '0'
url = url+'?'+urlencode({'taskid':taskid, 'bt':bt, 'filename':new_name.encode('utf-8')})
response = self.urlopen(url).read()
assert '"result":0' in response, response
def restart_task(self, task):
self.restart_tasks([task])
def get_task_by_id(self, id):
tasks = self.read_all_tasks(0)
for x in tasks:
if x['id'] == id:
return x
raise Exception('Not task found for id '+id)
def current_timestamp():
return int(time.time()*1000)
def current_random():
from random import randint
return '%s%06d.%s' % (current_timestamp(), randint(0, 999999), randint(100000000, 9999999999))
def parse_task(html):
inputs = re.findall(r'<input[^<>]+/>', html)
def parse_attrs(html):
return dict((k, v1 or v2) for k, v1, v2 in re.findall(r'''\b(\w+)=(?:'([^']*)'|"([^"]*)")''', html))
info = dict((x['id'], unescape_html(x['value'])) for x in map(parse_attrs, inputs))
mini_info = {}
mini_map = {}
#mini_info = dict((re.sub(r'\d+$', '', k), info[k]) for k in info)
for k in info:
mini_key = re.sub(r'\d+$', '', k)
mini_info[mini_key] = info[k]
mini_map[mini_key] = k
taskid = mini_map['taskname'][8:]
url = mini_info['f_url']
task_type = re.match(r'[^:]+', url).group()
task = {'id': taskid,
'type': task_type,
'name': mini_info['taskname'],
'status': int(mini_info['d_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[mini_info['d_status']],
'size': int(mini_info.get('ysfilesize', 0)),
'original_url': mini_info['f_url'],
'xunlei_url': mini_info.get('dl_url', None),
'bt_hash': mini_info['dcid'],
'dcid': mini_info['dcid'],
'gcid': parse_gcid(mini_info.get('dl_url', None)),
}
m = re.search(r'<em class="loadnum"[^<>]*>([^<>]*)</em>', html)
task['progress'] = m and m.group(1) or ''
m = re.search(r'<em [^<>]*id="speed\d+">([^<>]*)</em>', html)
task['speed'] = m and m.group(1).replace(' ', '') or ''
m = re.search(r'<span class="c_addtime">([^<>]*)</span>', html)
task['date'] = m and m.group(1) or ''
return task
def parse_tasks(html):
rwbox = re.search(r'<div class="rwbox".*<!--rwbox-->', html, re.S).group()
rw_lists = re.findall(r'<div class="rw_list".*?<!-- rw_list -->', rwbox, re.S)
return map(parse_task, rw_lists)
def parse_history(html):
rwbox = re.search(r'<div class="rwbox" id="rowbox_list".*?<!--rwbox-->', html, re.S).group()
rw_lists = re.findall(r'<div class="rw_list".*?<input id="d_tasktype\d+"[^<>]*/>', rwbox, re.S)
return map(parse_task, rw_lists)
def parse_bt_list(js):
result = json.loads(re.match(r'^fill_bt_list\((.+)\)\s*$', js).group(1))['Result']
files = []
for record in result['Record']:
files.append({
'id': int(record['taskid']),
'index': record['id'],
'type': 'bt',
'name': record['title'], # TODO: support folder
'status': int(record['download_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed'}[record['download_status']],
'size': int(record['filesize']),
'original_url': record['url'],
'xunlei_url': record['downurl'],
'dcid': record['cid'],
'gcid': parse_gcid(record['downurl']),
'speed': '',
'progress': '%s%%' % record['percent'],
'date': '',
})
return files
def parse_gcid(url):
if not url:
return
m = re.search(r'&g=([A-F0-9]{40})&', url)
if not m:
return
return m.group(1)
def urlencode(x):
def unif8(u):
if type(u) == unicode:
u = u.encode('utf-8')
return u
return urllib.urlencode([(unif8(k), unif8(v)) for k, v in x.items()])
def encode_multipart_formdata(fields, files):
#http://code.activestate.com/recipes/146306/
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
import mimetypes
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def assert_default_page(response, id):
#assert response == "<script>top.location='http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0'</script>" % id
assert re.match(r"^<script>top\.location='http://dynamic\.cloud\.vip\.xunlei\.com/user_task\?userid=%s&st=0(&cache=\d+)?'</script>$" % id, response), response
def remove_bom(response):
if response.startswith('\xef\xbb\xbf'):
response = response[3:]
return response
def assert_response(response, jsonp, value=1):
response = remove_bom(response)
assert response == '%s(%s)' % (jsonp, value), repr(response)
def parse_url_protocol(url):
m = re.match(r'([^:]+)://', url)
if m:
return m.group(1)
elif url.startswith('magnet:'):
return 'magnet'
else:
return url
def unescape_html(html):
import xml.sax.saxutils
return xml.sax.saxutils.unescape(html)
def to_utf_8(s):
if type(s) == unicode:
return s.encode('utf-8')
else:
return s
def md5(s):
import hashlib
return hashlib.md5(s).hexdigest().lower()
def encypt_password(password):
if not re.match(r'^[0-9a-f]{32}$', password):
password = md5(md5(password))
return password
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
|
[
"iambus@gmail.com"
] |
iambus@gmail.com
|
a034cf49cfa642beaac238623dc1c694bb4c2ef7
|
e94c3050f391380848b3ab82992e9b1bb3803b49
|
/node_modules/node-sass/build/config.gypi
|
d2df84ecde9a71790827afc1e98440e4362ba0ba
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JaneGav/Template_email
|
cf6a8e0dd490697a763d28ee1067c8280ead0fe4
|
5b4880eabe822ca40a704d18ffcc79dc567c8b70
|
refs/heads/master
| 2020-04-19T17:05:38.243447
| 2019-01-30T10:33:57
| 2019-01-30T10:33:57
| 168,323,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,614
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt62l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "62",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/dzhein/.node-gyp/10.14.2",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"sign_git_commit": "",
"rollback": "true",
"usage": "",
"audit": "true",
"globalignorefile": "/Users/dzhein/.npm-global/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"preid": "",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"noproxy": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Users/dzhein/.npm-global/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/dzhein/.npmrc",
"init_module": "/Users/dzhein/.npm-init.js",
"cidr": "",
"user": "501",
"node_version": "10.14.2",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/Users/dzhein/.npm-global/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/dzhein/.npm",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.7.0 node/v10.14.2 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/s1/h9zjrylj1_b1bpwfrv8brhjr0000gn/T",
"onload_script": "",
"link": "",
"prefix": "/Users/dzhein/.npm-global"
}
}
|
[
"eugeniavad@gmail.com"
] |
eugeniavad@gmail.com
|
d57d6288a18c44ff8f681c65beb55e60255251a6
|
73868ce5fe6fd1e0c7c88f0779f2dfacab22d6e3
|
/skiplist.py
|
51d525fe284669e71824695a6dee9dab2531d58f
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
vincentvbh/pqm3
|
f73185b878e6a6c5bb40d39a1031fda4ea201058
|
138ea1b3fee78814fac680da596ecb31ea11fdc3
|
refs/heads/master
| 2023-08-29T20:15:14.497159
| 2021-10-22T02:12:57
| 2021-10-22T02:12:57
| 395,266,784
| 0
| 0
| null | 2021-08-12T09:35:06
| 2021-08-12T09:35:05
| null |
UTF-8
|
Python
| false
| false
| 9,968
|
py
|
skip_list = [
{'scheme': 'kyber1024', 'implementation': 'm3', 'estmemory': 12288},
{'scheme': 'kyber512', 'implementation': 'm3', 'estmemory': 7168},
{'scheme': 'kyber768', 'implementation': 'm3', 'estmemory': 9216},
{'scheme': 'saber', 'implementation': 'm3', 'estmemory': 22528},
{'scheme': 'sikep434', 'implementation': 'opt', 'estmemory': 10240},
{'scheme': 'sikep503', 'implementation': 'opt', 'estmemory': 10240},
{'scheme': 'sikep610', 'implementation': 'opt', 'estmemory': 14336},
{'scheme': 'sikep751', 'implementation': 'opt', 'estmemory': 16384},
{'scheme': 'bikel1', 'implementation': 'opt', 'estmemory': 90112},
{'scheme': 'bikel3', 'implementation': 'opt', 'estmemory': 175104},
{'scheme': 'firesaber', 'implementation': 'clean', 'estmemory': 28672},
{'scheme': 'frodokem1344aes', 'implementation': 'clean', 'estmemory': 3853312},
{'scheme': 'frodokem1344aes', 'implementation': 'opt', 'estmemory': 305152},
{'scheme': 'frodokem1344shake', 'implementation': 'clean', 'estmemory': 3852288},
{'scheme': 'frodokem1344shake', 'implementation': 'opt', 'estmemory': 252928},
{'scheme': 'frodokem640aes', 'implementation': 'clean', 'estmemory': 932864},
{'scheme': 'frodokem640aes', 'implementation': 'opt', 'estmemory': 144384},
{'scheme': 'frodokem640shake', 'implementation': 'clean', 'estmemory': 932864},
{'scheme': 'frodokem640shake', 'implementation': 'opt', 'estmemory': 119808},
{'scheme': 'frodokem976aes', 'implementation': 'clean', 'estmemory': 2080768},
{'scheme': 'frodokem976aes', 'implementation': 'opt', 'estmemory': 222208},
{'scheme': 'frodokem976shake', 'implementation': 'clean', 'estmemory': 2079744},
{'scheme': 'frodokem976shake', 'implementation': 'opt', 'estmemory': 185344},
{'scheme': 'kyber1024-90s', 'implementation': 'clean', 'estmemory': 28672},
{'scheme': 'kyber1024', 'implementation': 'clean', 'estmemory': 28672},
{'scheme': 'kyber512-90s', 'implementation': 'clean', 'estmemory': 15360},
{'scheme': 'kyber512', 'implementation': 'clean', 'estmemory': 14336},
{'scheme': 'kyber768', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'lightsaber', 'implementation': 'clean', 'estmemory': 15360},
{'scheme': 'kyber768-90s', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'mceliece348864', 'implementation': 'clean', 'estmemory': 833536},
{'scheme': 'mceliece348864f', 'implementation': 'clean', 'estmemory': 833536},
{'scheme': 'mceliece460896', 'implementation': 'clean', 'estmemory': 4733952},
{'scheme': 'mceliece460896f', 'implementation': 'clean', 'estmemory': 4733952},
{'scheme': 'mceliece6688128', 'implementation': 'clean', 'estmemory': 5255168},
{'scheme': 'mceliece6688128f', 'implementation': 'clean', 'estmemory': 5255168},
{'scheme': 'mceliece6960119', 'implementation': 'clean', 'estmemory': 5257216},
{'scheme': 'mceliece6960119f', 'implementation': 'clean', 'estmemory': 5257216},
{'scheme': 'mceliece8192128', 'implementation': 'clean', 'estmemory': 5568512},
{'scheme': 'mceliece8192128f', 'implementation': 'clean', 'estmemory': 5568512},
{'scheme': 'ntruhps2048509', 'implementation': 'clean', 'estmemory': 29696},
{'scheme': 'ntruhps2048677', 'implementation': 'clean', 'estmemory': 38912},
{'scheme': 'ntruhps4096821', 'implementation': 'clean', 'estmemory': 47104},
{'scheme': 'ntruhrss701', 'implementation': 'clean', 'estmemory': 38912},
{'scheme': 'saber', 'implementation': 'clean', 'estmemory': 20480},
{'scheme': 'hqc-rmrs-128', 'implementation': 'clean', 'estmemory': 81920},
{'scheme': 'hqc-rmrs-192', 'implementation': 'clean', 'estmemory': 161792},
{'scheme': 'hqc-rmrs-256', 'implementation': 'clean', 'estmemory': 257024},
{'scheme': 'ntrulpr653', 'implementation': 'clean', 'estmemory': 18432},
{'scheme': 'ntrulpr761', 'implementation': 'clean', 'estmemory': 19456},
{'scheme': 'ntrulpr857', 'implementation': 'clean', 'estmemory': 23552},
{'scheme': 'sntrup653', 'implementation': 'clean', 'estmemory': 15360},
{'scheme': 'sntrup761', 'implementation': 'clean', 'estmemory': 18432},
{'scheme': 'sntrup857', 'implementation': 'clean', 'estmemory': 20480},
{'scheme': 'dilithium2', 'implementation': 'clean', 'estmemory': 60416},
{'scheme': 'dilithium3', 'implementation': 'clean', 'estmemory': 91136},
{'scheme': 'falcon-1024', 'implementation': 'clean', 'estmemory': 90112},
{'scheme': 'falcon-512', 'implementation': 'clean', 'estmemory': 47104},
{'scheme': 'sphincs-haraka-128f-robust', 'implementation': 'clean', 'estmemory': 23552},
{'scheme': 'sphincs-haraka-128f-simple', 'implementation': 'clean', 'estmemory': 23552},
{'scheme': 'sphincs-haraka-128s-robust', 'implementation': 'clean', 'estmemory': 13312},
{'scheme': 'sphincs-haraka-128s-simple', 'implementation': 'clean', 'estmemory': 13312},
{'scheme': 'sphincs-haraka-192f-robust', 'implementation': 'clean', 'estmemory': 43008},
{'scheme': 'sphincs-haraka-192f-simple', 'implementation': 'clean', 'estmemory': 43008},
{'scheme': 'sphincs-haraka-192s-robust', 'implementation': 'clean', 'estmemory': 23552},
{'scheme': 'sphincs-haraka-192s-simple', 'implementation': 'clean', 'estmemory': 23552},
{'scheme': 'sphincs-haraka-256f-robust', 'implementation': 'clean', 'estmemory': 59392},
{'scheme': 'sphincs-haraka-256f-simple', 'implementation': 'clean', 'estmemory': 59392},
{'scheme': 'sphincs-haraka-256s-robust', 'implementation': 'clean', 'estmemory': 38912},
{'scheme': 'sphincs-haraka-256s-simple', 'implementation': 'clean', 'estmemory': 38912},
{'scheme': 'sphincs-sha256-128f-robust', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'sphincs-sha256-128f-simple', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'sphincs-sha256-128s-robust', 'implementation': 'clean', 'estmemory': 12288},
{'scheme': 'sphincs-sha256-128s-simple', 'implementation': 'clean', 'estmemory': 12288},
{'scheme': 'sphincs-sha256-192f-robust', 'implementation': 'clean', 'estmemory': 41984},
{'scheme': 'sphincs-sha256-192f-simple', 'implementation': 'clean', 'estmemory': 41984},
{'scheme': 'sphincs-sha256-192s-robust', 'implementation': 'clean', 'estmemory': 22528},
{'scheme': 'sphincs-sha256-192s-simple', 'implementation': 'clean', 'estmemory': 22528},
{'scheme': 'sphincs-sha256-256f-robust', 'implementation': 'clean', 'estmemory': 57344},
{'scheme': 'sphincs-sha256-256f-simple', 'implementation': 'clean', 'estmemory': 57344},
{'scheme': 'sphincs-sha256-256s-robust', 'implementation': 'clean', 'estmemory': 37888},
{'scheme': 'sphincs-sha256-256s-simple', 'implementation': 'clean', 'estmemory': 37888},
{'scheme': 'sphincs-shake256-128f-robust', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'sphincs-shake256-128f-simple', 'implementation': 'clean', 'estmemory': 21504},
{'scheme': 'sphincs-shake256-128s-robust', 'implementation': 'clean', 'estmemory': 12288},
{'scheme': 'sphincs-shake256-128s-simple', 'implementation': 'clean', 'estmemory': 12288},
{'scheme': 'sphincs-shake256-192f-robust', 'implementation': 'clean', 'estmemory': 41984},
{'scheme': 'sphincs-shake256-192f-simple', 'implementation': 'clean', 'estmemory': 41984},
{'scheme': 'sphincs-shake256-192s-robust', 'implementation': 'clean', 'estmemory': 22528},
{'scheme': 'sphincs-shake256-192s-simple', 'implementation': 'clean', 'estmemory': 22528},
{'scheme': 'sphincs-shake256-256f-robust', 'implementation': 'clean', 'estmemory': 57344},
{'scheme': 'sphincs-shake256-256f-simple', 'implementation': 'clean', 'estmemory': 57344},
{'scheme': 'sphincs-shake256-256s-robust', 'implementation': 'clean', 'estmemory': 37888},
{'scheme': 'sphincs-shake256-256s-simple', 'implementation': 'clean', 'estmemory': 37888},
{'scheme': 'dilithium2aes', 'implementation': 'clean', 'estmemory': 61440},
{'scheme': 'dilithium3aes', 'implementation': 'clean', 'estmemory': 92160},
{'scheme': 'dilithium5', 'implementation': 'clean', 'estmemory': 136192},
{'scheme': 'dilithium5aes', 'implementation': 'clean', 'estmemory': 138240},
{'scheme': 'rainbowI-circumzenithal', 'implementation': 'clean', 'estmemory': 490496},
{'scheme': 'rainbowI-classic', 'implementation': 'clean', 'estmemory': 445440},
{'scheme': 'rainbowI-compressed', 'implementation': 'clean', 'estmemory': 387072},
{'scheme': 'rainbowIII-circumzenithal', 'implementation': 'clean', 'estmemory': 5087232},
{'scheme': 'rainbowIII-classic', 'implementation': 'clean', 'estmemory': 5704704},
{'scheme': 'rainbowIII-compressed', 'implementation': 'clean', 'estmemory': 4460544},
{'scheme': 'rainbowV-circumzenithal', 'implementation': 'clean', 'estmemory': 6140928},
{'scheme': 'rainbowV-classic', 'implementation': 'clean', 'estmemory': 7535616},
{'scheme': 'rainbowV-compressed', 'implementation': 'clean', 'estmemory': 4732928},
{'scheme': 'falcon-1024-tree', 'implementation': 'opt-ct', 'estmemory': 186368},
{'scheme': 'falcon-1024-tree', 'implementation': 'opt-leaktime', 'estmemory': 186368},
{'scheme': 'falcon-1024', 'implementation': 'opt-ct', 'estmemory': 90112},
{'scheme': 'falcon-1024', 'implementation': 'opt-leaktime', 'estmemory': 90112},
{'scheme': 'falcon-512-tree', 'implementation': 'opt-ct', 'estmemory': 91136},
{'scheme': 'falcon-512-tree', 'implementation': 'opt-leaktime', 'estmemory': 91136},
{'scheme': 'falcon-512', 'implementation': 'opt-ct', 'estmemory': 47104},
{'scheme': 'falcon-512', 'implementation': 'opt-leaktime', 'estmemory': 47104},
{'scheme': 'dilithium2', 'implementation': 'm3', 'estmemory': 46080},
{'scheme': 'dilithium3', 'implementation': 'm3', 'estmemory': 62464},
{'scheme': 'dilithium4', 'implementation': 'm3', 'estmemory': 79872},
]
|
[
"git@rpls.de"
] |
git@rpls.de
|
7e106c21d729df1b192289fad2ebdb0b3a916922
|
d49d0428835d9def6a6eb8f255b4f352b2f913bd
|
/GF_company.py
|
ed6ab043da835db4e393c1c6326fd8fd4253bddd
|
[] |
no_license
|
UCanCallMeJia/wechat_GF
|
2e6b88dc1e4cb8c3c0ad7a94349666af27e46940
|
831c6f85c37b82e39b4ff23301d201a12978cc5c
|
refs/heads/master
| 2020-08-30T03:30:21.878953
| 2020-07-01T02:08:49
| 2020-07-01T02:08:49
| 218,248,883
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,721
|
py
|
from __future__ import unicode_literals
from wxpy import *
from requests import get
from requests import post
from platform import system
from os import chdir
from random import choice
from threading import Thread
import configparser
import time
import sys
# 获取每日励志精句
def get_message():
r = get("http://open.iciba.com/dsapi/")
note = r.json()['note']
content = r.json()['content']
return note,content
# 发送消息给她
def send_message(your_message):
try:
# 对方的微信名称
my_friend = bot.friends().search('咸鱼不会翻身吗')[0]
# 发送消息给对方
my_friend.send(your_message)
except:
# 出问题时,发送信息到文件传输助手
bot.file_helper.send(u"守护女友出问题了,赶紧去看看咋回事~")
# 在规定时间内进行关心她操作
def start_care():
# 待发送的内容,先置为空
message = ""
# 来个死循环,24小时关心她
while(True):
# 提示
print("守护中,时间:%s"% time.ctime())
# 每天定时问候,早上起床,中午吃饭,晚上吃饭,晚上睡觉
# 获取时间,只获取时和分,对应的位置为倒数第13位到倒数第8位
now_time = time.ctime()[-13:-8]
if (now_time == say_good_morning):
# 随机取一句问候语
message = choice(str_list_good_morning)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友早上起床:%s" % time.ctime())
elif (now_time == say_good_lunch):
message = choice(str_list_good_lunch)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message('智能男友用法:消息以‘_’开始,例如:‘_在吗’')
send_message(message)
print("提醒女友中午吃饭:%s" % time.ctime())
elif (now_time == say_good_dinner):
message = choice(str_list_good_dinner)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上吃饭:%s" % time.ctime())
elif (now_time == say_good_dream):
# 是否在结尾加上每日学英语
if(flag_learn_english):
note, content = get_message()
message = choice(str_list_good_dream) + "\n\n" + "顺便一起来学英语哦:\n" + "原文: " + content + "\n\n翻译: " + note
else:
message = choice(str_list_good_dream)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上睡觉:%s" % time.ctime())
# 节日问候语
festival_month = time.strftime('%m', time.localtime())
festival_day = time.strftime('%d', time.localtime())
if(festival_month == '02' and festival_day == '14' and now_time == "08:00"):
send_message(str_Valentine)
print("发送情人节祝福:%s" % time.ctime())
elif(festival_month == '03' and festival_day == '08' and now_time == "08:00"):
send_message(str_Women)
print("发送三八妇女节祝福:%s" % time.ctime())
elif(festival_month == '12' and festival_day == '24' and now_time == "00:00"):
send_message(str_Christmas_Eve)
print("发送平安夜祝福:%s" % time.ctime())
elif(festival_month == '12' and festival_day == '25' and now_time == "00:00"):
send_message(str_Christmas)
print("发送圣诞节祝福:%s" % time.ctime())
# 生日问候语
if(festival_month == birthday_month and festival_day == birthday_day and now_time == "00:00"):
send_message(str_birthday)
print("发送生日祝福:%s" % time.ctime())
# 每60秒检测一次
time.sleep(60)
if __name__ == "__main__":
# 若发现读取取配置文件出错,可以取消注释下面这行,一般在pycharm环境下才需要增加
# 设置当前文件所在的目录为当前工作路径
# chdir(sys.path[0])
# 启动微信机器人,自动根据操作系统执行不同的指令
# windows系统或macOS Sierra系统使用bot = Bot()
# linux系统或macOS Terminal系统使用bot = Bot(console_qr=2)
if('Windows' in system()):
# Windows
bot = Bot()
elif('Darwin' in system()):
# MacOSX
bot = Bot()
elif('Linux' in system()):
# Linux
bot = Bot(console_qr=2,cache_path=True)
else:
# 自行确定
print("无法识别你的操作系统类型,请自己设置")
# 读取配置文件
cf = configparser.ConfigParser()
cf.read(r"D:\python_ws\GirlFriend\config.ini",encoding='UTF-8')
# 设置女友的微信名称,记住,不是微信ID也不是微信备注
# 你女友的微信名称,记住,不是微信ID也不是微信备注
my_lady_wechat_name = cf.get("configuration", "my_lady_wechat_name")
# 设置早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间
say_good_morning = cf.get("configuration", "say_good_morning")
say_good_lunch = cf.get("configuration", "say_good_lunch")
say_good_dinner = cf.get("configuration", "say_good_dinner")
say_good_dream = cf.get("configuration", "say_good_dream")
# 设置女友生日信息
# 几月,注意补全数字,为两位数,比如6月必须写成06
birthday_month = cf.get("configuration", "birthday_month")
# 几号,注意补全数字,为两位数,比如6号必须写成08
birthday_day = cf.get("configuration", "birthday_day")
# 读取早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间的随机提示语
# 一般这里的代码不要改动,需要增加提示语可以自己打开对应的文件修改
#早上起床问候语列表,数据来源于新浪微博
str_list_good_morning = ''
with open(r"D:\python_ws\GirlFriend\sentence_good_morning.txt", "r",encoding='UTF-8') as f:
str_list_good_morning = f.readlines()
print(str_list_good_morning)
#中午吃饭问候语列表,数据来源于新浪微博
str_list_good_lunch = ''
with open(r"D:\python_ws\GirlFriend\sentence_good_lunch.txt", "r",encoding='UTF-8') as f:
str_list_good_lunch = f.readlines()
print(str_list_good_lunch)
#晚上吃饭问候语列表,数据来源于新浪微博
str_list_good_dinner = ''
with open(r"D:\python_ws\GirlFriend\sentence_good_dinner.txt", "r",encoding='UTF-8') as f:
str_list_good_dinner = f.readlines()
print(str_list_good_dinner)
#晚上睡觉问候语列表,数据来源于新浪微博
str_list_good_dream = ''
with open(r"D:\python_ws\GirlFriend\sentence_good_dream.txt", "r",encoding='UTF-8') as f:
str_list_good_dream = f.readlines()
print(str_list_good_dream)
# 设置晚上睡觉问候语是否在原来的基础上再加上每日学英语精句
# False表示否 True表示是
if((cf.get("configuration", "flag_learn_english")) == '1'):
flag_learn_english = True
else:
flag_learn_english = False
print(flag_learn_english)
# 设置所有问候语结束是否加上表情符号
# False表示否 True表示是
str_emoj = "(•‾̑⌣‾̑•)✧˖°----(๑´ڡ`๑)----(๑¯ิε ¯ิ๑)----(๑•́ ₃ •̀๑)----( ∙̆ .̯ ∙̆ )----(๑˘ ˘๑)----(●′ω`●)----(●・̆⍛・̆●)----ಥ_ಥ----_(:qゝ∠)----(´;ω;`)----( `)3')----Σ((( つ•̀ω•́)つ----╰(*´︶`*)╯----( ´´ิ∀´ิ` )----(´∩`。)----( ื▿ ื)----(。ŏ_ŏ)----( •ิ _ •ิ )----ヽ(*΄◞ิ౪◟ิ‵ *)----( ˘ ³˘)----(; ´_ゝ`)----(*ˉ﹃ˉ)----(◍'౪`◍)ノ゙----(。◝‿◜。)----(ಠ .̫.̫ ಠ)----(´◞⊖◟`)----(。≖ˇェˇ≖。)----(◕ܫ◕)----(`◕‸◕´+)----(▼ _ ▼)----( ◉ืൠ◉ื)----ㄟ(◑‿◐ )ㄏ----(●'◡'●)ノ♥----(。◕ˇ∀ˇ◕)----( ◔ ڼ ◔ )----( ´◔ ‸◔`)----(☍﹏⁰)----(♥◠‿◠)----ლ(╹◡╹ლ )----(๑꒪◞౪◟꒪๑)"
str_list_emoj = str_emoj.split('----')
if ((cf.get("configuration", "flag_wx_emoj")) == '1'):
flag_wx_emoj = True
else:
flag_wx_emoj = False
print(str_list_emoj)
# 设置节日祝福语
# 情人节祝福语
str_Valentine = cf.get("configuration", "str_Valentine")
print(str_Valentine)
# 三八妇女节祝福语
str_Women = cf.get("configuration", "str_Women")
print(str_Women)
# 平安夜祝福语
str_Christmas_Eve = cf.get("configuration", "str_Christmas_Eve")
print(str_Christmas_Eve)
# 圣诞节祝福语
str_Christmas = cf.get("configuration", "str_Christmas")
print(str_Christmas)
# 她生日的时候的祝福语
str_birthday = cf.get("configuration", "str_birthday")
print(str_birthday)
# 开始守护女友
t = Thread(target=start_care, name='start_care')
t.start()
# 接收女友消息监听器
# 女友微信名
my_girl_friend = bot.friends().search(my_lady_wechat_name)[0]
@bot.register(chats=my_girl_friend, except_self=False)
def print_others(msg):
# 输出聊天内容
print(msg.text)
# 可采用snownlp或者jieba等进行分词、情感分析,由于打包后文件体积太大,故暂时不采用这种方式
# 仅仅是直接调用网络接口
# 做极其简单的情感分析
# 结果仅供参考,请勿完全相信
postData = {'data':msg.text}
response = post('https://bosonnlp.com/analysis/sentiment?analysisType=',data=postData)
data = response.text
# 情感评分指数(越接近1表示心情越好,越接近0表示心情越差)
now_mod_rank = (data.split(',')[0]).replace('[[','')
print("来自女友的消息:%s\n当前情感得分:%s\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n" % (msg.text, now_mod_rank))
# 发送信息到文件传输助手
mood_message = u"来自女友的消息:" + msg.text + "\n当前情感得分:" + now_mod_rank + "\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n"
bot.file_helper.send(mood_message)
text = msg.text[1:]
if msg.text[0] == '_':
print(text)
api_url = 'http://www.tuling123.com/openapi/api' # 图灵机器人网址
data = {
'key': '453b2da4ec4f4bec947fda36f6e1eedf', # 如果这个 apiKey 如不能用,那就注册一次
'info': text, # 这是我们从好友接收到的消息 然后转发给图灵机器人
'userid': 'wechat-robot', # 这里你想改什么都可以
}
r = post(api_url, data=data).json() # 把data数据发
print(r.get('text')) # 机器人回复给好友的消息
return r['text']
|
[
"noreply@github.com"
] |
UCanCallMeJia.noreply@github.com
|
d8e4ec2df7ef5c4033d1ea233f4c92691b53493b
|
d6235a8215b0b57105336039c49ccb78400d5376
|
/src/utils/inference.py
|
4f9182b0b8879b2fa3e8c8abcb5910b063501c3a
|
[] |
no_license
|
Ibrahimkhawaja/Facial_Emotion_Recognision
|
61d40da83e9015af805ac6479f647caea9f33219
|
9fab1117b9e95a40a50813bcc28cc6778238f423
|
refs/heads/master
| 2022-12-01T11:43:26.440616
| 2020-02-05T17:09:31
| 2020-02-05T17:09:31
| 238,501,241
| 0
| 0
| null | 2022-11-21T21:16:18
| 2020-02-05T16:52:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
def load_image(image_path, grayscale=False, color_mode="rgb", target_size=None):
pil_image = image.load_img(image_path, grayscale, color_mode, target_size)
return image.img_to_array(pil_image)
def load_detection_model(model_path):
detection_model = cv2.CascadeClassifier(model_path)
return detection_model
def detect_faces(detection_model, gray_image_array):
return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
def draw_bounding_box(face_coordinates, image_array, color):
x, y, w, h = face_coordinates
cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
def apply_offsets(face_coordinates, offsets):
x, y, width, height = face_coordinates
x_off, y_off = offsets
return (x - x_off, x + width + x_off, y - y_off, y + height + y_off)
def draw_text(coordinates, image_array, text, color, x_offset=0, y_offset=0,
font_scale=2, thickness=2):
x, y = coordinates[:2]
cv2.putText(image_array, text, (x + x_offset, y + y_offset),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale, color, thickness, cv2.LINE_AA)
def get_colors(num_classes):
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
colors = np.asarray(colors) * 255
return colors
|
[
"khawajaibrahim2011@hotmail.com"
] |
khawajaibrahim2011@hotmail.com
|
339e40ac12cbd66cb7e200816179b447721ff4d6
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/common/lib/curses/has_key.py
|
7260fcff66f2171899806164e7f08e769cb9a4e2
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 5,057
|
py
|
# 2015.11.10 21:35:00 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/curses/has_key.py
import _curses
_capability_names = {_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
elif _curses.tigetstr(capability_name):
return True
else:
return False
return
if __name__ == '__main__':
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = key in _curses
python = has_key(key)
if system != python:
L.append('Mismatch for key %s, system=%i, Python=%i' % (_curses.keyname(key), system, python))
finally:
_curses.endwin()
for i in L:
print i
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\curses\has_key.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:35:00 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
93758cce46271f9984e7c318b94f42934b31a49c
|
6c412b6f6c112b40e80587a17db537c4683d9dd0
|
/angle/predict.py
|
9a57c713468df723c5f4a5de33866fba64bfc78d
|
[] |
no_license
|
laugha/Chinese-OCR-3
|
50df8904e654413b8816c78866e554579de85678
|
5bc9f201bacf95af38b4eadf471a231f6c93aa1b
|
refs/heads/master
| 2021-01-26T11:03:11.677400
| 2020-02-11T04:28:15
| 2020-02-11T04:28:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# _Author_: xiaofeng
# Date: 2018-04-22 18:13:46
# Last Modified by: xiaofeng
# Last Modified time: 2018-04-22 18:13:46
'''
根据给定的图形,分析文字的朝向
'''
# from keras.models import load_model
import numpy as np
from PIL import Image
from keras.applications.vgg16 import preprocess_input, VGG16
from keras.layers import Dense
from keras.models import Model
# 编译模型,以较小的学习参数进行训练
from keras.optimizers import SGD
def load():
vgg = VGG16(weights=None, input_shape=(224, 224, 3))
# 修改输出层 3个输出
x = vgg.layers[-2].output
predictions_class = Dense(
4, activation='softmax', name='predictions_class')(x)
prediction = [predictions_class]
model = Model(inputs=vgg.input, outputs=prediction)
sgd = SGD(lr=0.00001, momentum=0.9)
model.compile(
optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights(
r'/home/rayjue/MyWayOnOCR/frist/chinese_ocr/angle/modelAngle.h5')
return model
# 加载模型
model = None
def predict(path=None, img=None):
global model
if model is None:
model = load()
"""
图片文字方向预测
"""
ROTATE = [0, 90, 180, 270]
if path is not None:
im = Image.open(path).convert('RGB')
elif img is not None:
im = Image.fromarray(img).convert('RGB')
w, h = im.size
# 对图像进行剪裁
# 左上角(int(0.1 * w), int(0.1 * h))
# 右下角(w - int(0.1 * w), h - int(0.1 * h))
xmin, ymin, xmax, ymax = int(0.1 * w), int(
0.1 * h), w - int(0.1 * w), h - int(0.1 * h)
im = im.crop((xmin, ymin, xmax, ymax)) # 剪切图片边缘,清除边缘噪声
# 对图片进行剪裁之后进行resize成(224,224)
im = im.resize((224, 224))
# 将图像转化成数组形式
img = np.array(im)
img = preprocess_input(img.astype(np.float32))
pred = model.predict(np.array([img]))
index = np.argmax(pred, axis=1)[0]
return ROTATE[index]
|
[
"noreply@github.com"
] |
laugha.noreply@github.com
|
0b6a1ba7dbe457aa088b69afbd6d89460cdb29b7
|
fbc756ec0ee27e0a262f3bcb30954c1f7e10b6cd
|
/eb-cli/lib/python2.7/site-packages/ebcli/controllers/console.py
|
581f8ecc714ad6aac1919cc8f32bb97f28a16f14
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
workivate/step-elastic-beanstalk-deploy
|
a541e2521a77f049c7a03e6e391a73cc1c45c06f
|
5b0bcfac4e8557ce1dc84c0254e2264708da396c
|
refs/heads/master
| 2020-02-26T17:18:00.125354
| 2015-09-17T10:26:25
| 2015-09-17T10:26:25
| 24,529,988
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings
from ..core import fileoperations, operations, io
class ConsoleController(AbstractBaseController):
class Meta:
label = 'console'
description = strings['console.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
def do_command(self):
app_name = self.get_app_name()
region = self.get_region()
env_name = self.get_env_name(noerror=True)
operations.open_console(app_name, env_name, region)
|
[
"teengenerate@gmail.com"
] |
teengenerate@gmail.com
|
32d77a5dfc12bcdd2213bbf1a7edc386cfbd04fa
|
3b74ecbc020ba5d5db436e248200cab14a0d13f4
|
/spider/spiders/OLD/spiderGenerico.py
|
3ddccf28cd18f83a6247f7b8f5c3c51e3ffbad06
|
[] |
no_license
|
paologastaldi/spider
|
ec37577435c6138f60c2514ee36429bfcf6b4bae
|
eae624dd7ad56646d51458b274aada91dd537390
|
refs/heads/master
| 2021-01-21T13:34:03.270087
| 2016-05-14T08:43:48
| 2016-05-14T08:43:48
| 55,077,930
| 1
| 1
| null | 2016-05-14T08:43:49
| 2016-03-30T16:12:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
import scrapy
from scrapy.selector import Selector
from spider.items import ElementoDaEsaminare
class DmozSpider(scrapy.Spider):
name = ""
allowed_domains = []
start_urls = []
def __init__(self, name, allowed_domains, start_urls):
self.name = name
self.allowed_domains = allowed_domains
self.start_urls = start_urls
def parse(self, response):
analizzatorePagina = Selector(response)
elencoPagineSito = analizzatorePagina.xpath('//body')
elencoElementiDaEsaminare = []
for paginaSito in elencoPagineSito
elementoDaEsaminare = ElementoDaEsaminare()
pagineSito = site.xpath('text()').extract()
elencoElementiDaEsaminare.append(elementoDaEsaminare)
return elencoElementiDaEsaminare
|
[
"paolo97.g@gmail.com"
] |
paolo97.g@gmail.com
|
f2fdb6bcb053fac10b934794a703af5abc5ba306
|
30acccad28c4e353ab6e38eef209c1752c18719f
|
/lambda_functions.py
|
de83ae4d75cc08575013cae4b816330fb8652eec
|
[] |
no_license
|
phumidea/dropbox_app_mockup
|
ede58674f176b635a873641c08b54edadba1bdda
|
1395a33032926df0cb95a80f79831cd0f3ec41d8
|
refs/heads/main
| 2023-04-06T13:35:34.748967
| 2021-04-02T10:32:09
| 2021-04-02T10:32:09
| 353,984,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,029
|
py
|
import json
import boto3
from datetime import datetime, timedelta
BUCKET_NAME = "phum2021" # Bucket name keep user data
s3 = boto3.client('s3') # Connect with S3
dynamodb = boto3.resource('dynamodb') # Connect with DynamoDB
table = dynamodb.Table('myDropboxUsers') # Connect table
def lambda_handler(event, context):
command = str(event["queryStringParameters"]["command"]) # Classify what request want from command parameter
#############################################################################################
if command == "newuser": # Create new user
username = str(event["queryStringParameters"]["username"]) # Get username from request
password = str(event["queryStringParameters"]["password"]) # Get password from request
try:
response = table.get_item(Key={'username': username})["Item"]
return {
'statusCode': 200,
'body': json.dumps("This username alredy existing.")
}
except:
response = table.put_item(Item = {'username':username,'password':password})
return {
'statusCode': 200,
'body': json.dumps("Create newuser finish.")
}
#############################################################################################
elif command == "login": # Login to the application
username = str(event["queryStringParameters"]["username"]) # Get username from request
password = str(event["queryStringParameters"]["password"]) # Get password from request
try:
response = table.get_item(Key={'username': username})["Item"]
if (response["username"] == username) and (response["password"] == password):
return {
'statusCode': 200,
'body': json.dumps("Login successfull")
}
else:
return {
'statusCode': 200,
'body': json.dumps("Wrong password! Please try again")
}
except:
return {
'statusCode': 200,
'body': json.dumps("No username in database.")
}
#############################################################################################
elif command == "put": # Upload file to S3
fileName = str(event["queryStringParameters"]["fileName"]) # Get filename from request
content = str(event["queryStringParameters"]["content"]) # Get file content from request
path = "/tmp/" + fileName # Prepare path to save new file
with open(path, 'w+') as file: # Write content in new file
file.write(content)
s3.upload_file(path, BUCKET_NAME, fileName) # Upload file to S3
return {
'statusCode': 200,
'body': json.dumps("Put finish")
}
#############################################################################################
elif command == "view": # List all file in bucket
username = str(event["queryStringParameters"]["username"])
all_file = [] # Create empty list for containing object information
for obj in s3.list_objects(Bucket=BUCKET_NAME)["Contents"]: # Check loop for each object in bucket
key = str(obj["Key"])
key_list = key.split("+")
if username in key_list[1:]:
file = dict()
file["Key"] = key_list[0]
file["LastModified"] = (obj["LastModified"] + timedelta(hours=7)).strftime("%Y-%m-%d %H:%M:%S")
file["Size"] = obj["Size"]
all_file.append(file)
return {
'statusCode': 200,
'body': json.dumps(all_file)
}
#############################################################################################
elif command == "get": # Download file from S3
fileName = str(event["queryStringParameters"]["fileName"]) # get file name from request
username = str(event["queryStringParameters"]["username"])
for obj in s3.list_objects(Bucket=BUCKET_NAME)["Contents"]: # check each object
key_str = str(obj["Key"])
key_list = key_str.split("+")
if (key_list[0] == fileName) and (username in key_list[1:]): # Match obj with file name
path = "/tmp/"+key_str # create tmp path
s3.download_file(BUCKET_NAME, key_str, path) # dwonload file from s3
file = open(path,"r") # open and read content inside
content = file.read()
return {
'statusCode': 200,
'body': content
}
return {
'statusCode': 200,
'body': json.dumps("Type wrong filename")
}
#############################################################################################
elif command == "share":
fileName = str(event["queryStringParameters"]["fileName"]) # get file name from request
username = str(event["queryStringParameters"]["username"])
share_user = str(event["queryStringParameters"]["share_user"])
for obj in s3.list_objects(Bucket=BUCKET_NAME)["Contents"]:
key = str(obj["Key"])
key_list = key.split("+")
if (key_list[0] == fileName) and (key_list[1] == username):
copy_source = {"Bucket":BUCKET_NAME, "Key":key}
s3.copy(copy_source,BUCKET_NAME,key+"+"+share_user)
s3.delete_object(Bucket=BUCKET_NAME, Key=key)
return {
'statusCode': 200,
'body': "Finish"
}
#############################################################################################
else:
return {
'statusCode': 500,
'body': json.dumps("AWS Service broken")
}
|
[
"noreply@github.com"
] |
phumidea.noreply@github.com
|
26a12bd802e8a93364e938b58d788e87da974974
|
5d095521eb9a4e3a58c98938775da390b988098e
|
/Python/BatterUp.py
|
54369fef4c202b26c5a31429b0750a7430bf7421
|
[] |
no_license
|
houstonwalley/Kattis
|
c50932ccba12f7a7777c01caff1383d11d90d698
|
c825e5f612f2c9f11de1fad610f6b830a4be45af
|
refs/heads/master
| 2021-07-21T10:38:11.580292
| 2020-09-01T19:37:38
| 2020-09-01T19:37:38
| 210,641,358
| 0
| 0
| null | 2020-04-28T03:34:39
| 2019-09-24T15:51:00
|
Python
|
UTF-8
|
Python
| false
| false
| 115
|
py
|
n = int(input())
s = map(int, input().split())
v = []
for m in s:
if m >= 0:
v.append(m)
print(sum(v)/len(v))
|
[
"noreply@github.com"
] |
houstonwalley.noreply@github.com
|
6861c429f30b05eaab663c0de04d4e13e8a6785c
|
a7bf172fc3ece3fd103a4d295c0c68636a99236f
|
/vDictEspn.py
|
670d05a708a30373c64bc4a0487d3366efe8aeb7
|
[] |
no_license
|
thedaynos/fantasyDraftHighlights
|
e781e098bf63bed31f28a2ce911bb0208c82b3d8
|
3cf0b3e63896183bbf03ea7cbcaff16ccf161024
|
refs/heads/master
| 2023-06-07T11:52:06.066918
| 2021-07-20T22:25:45
| 2021-07-20T22:25:45
| 197,640,436
| 21
| 7
| null | 2023-05-30T18:58:19
| 2019-07-18T18:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 17,396
|
py
|
vDictEspn={
"RBNYGSaquonBarkley":"",
"RBDalEzekielElliott":"",
"RBCarChristianMcCaffrey":"",
"RBNOAlvinKamara":"",
"RBNYJLeVeonBell":"",
"WRHouDeAndreHopkins":"",
"RBLARToddGurleyII":"",
"WRGBDavanteAdams":"",
"RBLACMelvinGordon":"",
"WRNOMichaelThomas":"",
"WRAtlJulioJones":"",
"RBAriDavidJohnson":"",
"TEKCTravisKelce":"",
"WRKCTyreekHill":"",
"WRCleOdellBeckhamJr":"",
"RBPitJamesConner":"",
"WROakAntonioBrown":"",
"QBKCPatrickMahomes":"",
"WRPitJuJuSmithSchuster":"",
"RBCinJoeMixon":"",
"WRLACKeenanAllen":"",
"RBMinDalvinCook":"",
"WRTBMikeEvans":"",
"RBJaxLeonardFournette":"",
"TEPhiZachErtz":"",
"TESFGeorgeKittle":"",
"WRIndTYHilton":"",
"WRDalAmariCooper":"",
"RBCleNickChubb":"",
"WRMinAdamThielen":"",
"RBAtlDevontaFreeman":"",
"WRNEJulianEdelman":"",
"RBTenDerrickHenry":"",
"WRLARBrandinCooks":"",
"RBOakJoshJacobs":"",
"WRMinStefonDiggs":"",
"WRCinAJGreen":"",
"QBHouDeshaunWatson":"",
"RBIndMarlonMack":"",
"RBDetKerryonJohnson":"",
"QBGBAaronRodgers":"",
"RBGBAaronJones":"",
"RBKCDamienWilliams":"",
"WRLARRobertWoods":"",
"TENYGEvanEngram":"",
"RBNESonyMichel":"",
"RBDenPhillipLindsay":"",
"WRLARCooperKupp":"",
"WRPhiAlshonJeffery":"",
"RBNEJamesWhite":"",
"WRDetKennyGolladay":"",
"RBBalMarkIngramII":"",
"QBIndAndrewLuck":"",
"RBSeaChrisCarson":"",
"DSTChiBearsDST":"M3_q67ABSSw",
"QBAtlMattRyan":"",
"WRSeaTylerLockett":"",
"TETBOJHoward":"",
"QBCleBakerMayfield":"",
"WRCleJarvisLandry":"",
"TELACHunterHenry":"",
"WRTBChrisGodwin":"",
"WRAtlCalvinRidley":"",
"WRCinTylerBoyd":"",
"WRKCSammyWatkins":"",
"TENOJaredCook":"",
"QBNODrewBrees":"",
"WRCarDJMoore":"",
"RBMiaKenyanDrake":"",
"RBChiTarikCohen":"",
"RBChiDavidMontgomery":"",
"WRLACMikeWilliams":"",
"RBWshDerriusGuice":"",
"WRChiAllenRobinson":"",
"QBCarCamNewton":"",
"DSTLARRamsDST":"2QujiGePXUk",
"RBHouLamarMiller":"",
"KLARGregZuerlein":"",
"WRNYJRobbyAnderson":"",
"RBSFTevinColeman":"",
"TEIndEricEbron":"",
"RBPhiMilesSanders":"",
"QBPhiCarsonWentz":"",
"WRHouWillFullerV":"",
"WRNYGSterlingShepard":"",
"TECleDavidNjoku":"",
"RBBufLeSeanMcCoy":"",
"WRDetMarvinJonesJr":"",
"DSTJaxJaguarsDST":"ozYX_neURXs",
"KBalJustinTucker":"",
"WRJaxDedeWestbrook":"",
"WRAriChristianKirk":"",
"TETenDelanieWalker":"",
"TEPitVanceMcDonald":"",
"DSTBufBillsDST":"yIarX0CYrec",
"RBLACAustinEkeler":"",
"WRSFDantePettis":"",
"WRAriLarryFitzgerald":"",
"QBSeaRussellWilson":"",
"QBNETomBrady":"",
"RBPhiJordanHoward":"",
"QBPitBenRoethlisberger":"",
"RBNOLataviusMurray":"",
"WRTenCoreyDavis":"",
"RBLARDarrellHenderson":"",
"QBDalDakPrescott":"",
"KKCHarrisonButker":"",
"WRNYGGoldenTate":"",
"QBAriKylerMurray":"",
"RBSeaRashaadPenny":"",
"DSTMinVikingsDST":"CeT1zNWs5k",
"QBLARJaredGoff":"",
"KNEStephenGostkowski":"",
"DSTNOSaintsDST":"DMz24zixNMQ",
"WRNENKealHarry":"",
"KNOWilLutz":"",
"RBTBPeytonBarber":"",
"RBIndNyheimHines":"",
"WRDenEmmanuelSanders":"",
"WRPhiDeSeanJackson":"",
"DSTLACChargersDST":"jmhi-gUoRwI",
"WRDenCourtlandSutton":"",
"KHouKaimiFairbairn":"",
"RBTBRonaldJones":"",
"RBSFJerickMcKinnon":"",
"WRHouKekeCoutee":"",
"TEChiTreyBurton":"",
"RBCleKareemHunt":"",
"WRCarCurtisSamuel":"",
"DSTNEPatriotsDST":"Mj7AfBA6VZQ",
"TECarGregOlsen":"",
"WRDalMichaelGallup":"",
"KSFRobbieGould":"",
"WRPitDonteMoncrief":"",
"WRGBGeronimoAllison":"",
"TEAtlAustinHooper":"",
"RBHouDOntaForeman":"",
"RBDenRoyceFreeman":"",
"WRSeaDKMetcalf":"",
"RBKCCarlosHyde":"",
"DSTIndColtsDST":"k3-H03xkRX4",
"QBLACPhilipRivers":"",
"QBChiMitchellTrubisky":"",
"RBSFMattBreida":"",
"TEMinKyleRudolph":"",
"DSTDenBroncosDST":"YmOKim16x4s",
"KLACMikeBadgley":"",
"KIndAdamVinatieri":"",
"TEGBJimmyGraham":"",
"WRPitJamesWashington":"",
"DSTHouTexansDST":"mWWB8Y1gDx8",
"DSTPhiEaglesDST":"4PRndm6u38M",
"WROakTyrellWilliams":"",
"DSTCleBrownsDST":"W2pGqDeNYu4",
"WRGBMarquezValdesScantling":"",
"QBBalLamarJackson":"",
"WRIndDevinFunchess":"",
"RBNEDamienHarris":"",
"TEIndJackDoyle":"",
"KDalBrettMaher":"",
"DSTBalRavensDST":"hb6EV_FnFwE",
"TEWshJordanReed":"",
"WRBalMarquiseBrown":"",
"WRDenDaeSeanHamilton":"",
"QBTBJameisWinston":"",
"DSTDalCowboysDST":"OnOTXPmaFl8",
"RBTenDionLewis":"",
"WRNYJJamisonCrowder":"",
"RBMiaKalenBallage":"",
"QBMinKirkCousins":"",
"TEDetTJHockenson":"",
"RBWshAdrianPeterson":"",
"WRTenAdamHumphries":"",
"WRIndParrisCampbell":"",
"KPhiJakeElliott":"",
"QBSFJimmyGaroppolo":"",
"WRChiAnthonyMiller":"",
"DSTPitSteelersDST":"s2MJWK5zxv4",
"WRAtlMohamedSanu":"",
"KDetMattPrater":"",
"TEDenNoahFant":"",
"DSTKCChiefsDST":"NotMr7-z6tQ",
"WRMiaKennyStills":"",
"TEDalJasonWitten":"",
"KGBMasonCrosby":"",
"DSTTenTitansDST":"57cjzFe9qhM",
"RBDetCJAnderson":"",
"RBPitJaylenSamuels":"",
"QBOakDerekCarr":"",
"QBBufJoshAllen":"",
"KNYGAldrickRosas":"",
"WRDalRandallCobb":"",
"RBOakJalenRichard":"",
"TEBalMarkAndrews":"",
"QBNYJSamDarnold":"",
"DSTCarPanthersDST":"tfAA7MJvXYU",
"QBDetMatthewStafford":"",
"RBGBJamaalWilliams":"",
"WRKCMecoleHardman":"",
"QBJaxNickFoles":"",
"RBAtlItoSmith":"",
"RBCleDukeJohnsonJr":"",
"QBWshDwayneHaskins":"",
"TECinTylerEifert":"",
"DSTNYJJetsDST":"1PdNL8Br3uM",
"DSTGBPackersDST":"BJ-7mCLfZBQ",
"TENERobGronkowski":"",
"RBCinGiovaniBernard":"",
"DSTSeaSeahawksDST":"BfwnvukHwBM",
"RBBufDevinSingletary":"",
"WRNOTedGinnJr":"",
"WRMiaAlbertWilson":"",
"RBWshChrisThompson":"",
"DSTSFersDST":"Glh2C1nUkP0",
"KCarGrahamGano":"",
"KMinDanBailey":"",
"KSeaJasonMyers":"",
"RBLACJustinJackson":"",
"DSTDetLionsDST":"L35Exibkvfg",
"KPitChrisBoswell":"",
"TEPhiDallasGoedert":"",
"TENYJChrisHerndon":"",
"DSTAtlFalconsDST":"Fr7PdwOzgn8",
"RBMinAlexanderMattison":"",
"QBTenMarcusMariota":"",
"DSTOakRaidersDST":"jnsv58g-lRQ",
"WRSFDeeboSamuel":"",
"WRNEDemaryiusThomas":"",
"TENEBenjaminWatson":"",
"DSTWshRedskinsDST":"AjtdTXK8_EA",
"WRTenAJBrown":"",
"WRBufColeBeasley":"",
"RBPitBennySnellJr":"",
"TEMiaMikeGesicki":"",
"QBNYGEliManning":"",
"WRAriAndyIsabella":"",
"QBOakNathanPeterman":"",
"WRNEJoshGordon":"",
"WRSFMarquiseGoodwin":"",
"KTenRyanSuccop":"",
"DSTNYGGiantsDST":"axSSb-HoT8Y",
"WRBufJohnBrown":"",
"KCleAustinSeibert":"",
"KAtlGiorgioTavecchio":"",
"WRPhiNelsonAgholor":"",
"KDenBrandonMcManus":"",
"QBDenJoeFlacco":"",
"RBDalTonyPollard":"",
"RBKCDarwinThompson":"",
"WRNOTreQuanSmith":"",
"KWshDustinHopkins":"",
"WRCleAntonioCallaway":"",
"KBufStephenHauschka":"",
"WRTBAnthonyJohnson":"",
"QBNYGDanielJones":"",
"KTBMattGay":"",
"RBPhiDarrenSproles":"",
"DSTAriCardinalsDST":"cY4Os6h2p1k",
"TEMinIrvSmithJr":"",
"KLACNickNovak":"",
"RBBufFrankGore":"",
"DSTTBBuccaneersDST":"6Qvo_yVtIE8",
"KSeaSebastianJanikowski":"",
"RBOakMarshawnLynch":"",
"KAtlMattBryant":"",
"KChiCodyParkey":"",
"RBDalAlfredMorris":"",
"RBNERexBurkhead":"",
"DSTMiaDolphinsDST":"59y91tJ-XfA",
"KJaxJoshLambo":"",
"QBWshMarkSanchez":"",
"QBFAJayCutler":"",
"QBNOTaysomHill":"",
"TESeaWillDissly":"",
"KLACYounghoeKoo":"",
"QBAriSamBradford":"",
"RBBalGusEdwards":"",
"WRChiTaylorGabriel":"",
"TEOakDarrenWaller":"",
"WRNOLilJordanHumphrey":"",
"KChiEddyPineiro":"",
"RBPhiJayAjayi":"",
"WRNODezBryant":"",
"KOakMikeNugent":"",
"WRNOBrandonMarshall":"",
"TENYGCJConrad":"",
"TELACAntonioGates":"",
"RBBalJusticeHill":"",
"RBKCCJSpiller":"",
"WRChiRileyRidley":"",
"KAriZaneGonzalez":"",
"WRDetDannyAmendola":"",
"KOakDanielCarlson":"",
"QBCinAndyDalton":"",
"WROakJordyNelson":"",
"WRMiaDeVanteParker":"",
"TEDenJakeButt":"",
"KSeaBlairWalsh":"",
"WRSeaDougBaldwin":"",
"KMiaJasonSanders":"",
"WRCleRashardHiggins":"",
"WRCinJohnRoss":"",
"QBBalRobertGriffinIII":"",
"WRDalTavonAustin":"",
"WRBalMichaelCrabtree":"",
"RBNEGlennGronkowski":"",
"RBBufTJYeldon":"",
"RBOakIsaiahCrowell":"",
"DSTCinBengalsDST":"aSpB5PkgcyQ",
"WRCarChrisHogan":"",
"QBLACKellenClemens":"",
"RBKCAkeemHunt":"",
"WRAriHakeemButler":"",
"KChiConnorBarth":"",
"WRChiCordarrellePatterson":"",
"TEMiaAnthonyFasano":"",
"QBFACarsonPalmer":"",
"TENOJoshHill":"",
"TECleDemetriusHarris":"",
"TEFAMartellusBennett":"",
"RBDetLeGarretteBlount":"",
"RBChiMikeDavis":"",
"KNYJChandlerCatanzaro":"",
"RBKCDarrelWilliams":"",
"QBMiaRyanFitzpatrick":"",
"TEChiZachMiller":"",
"RBNOJohnKuhn":"",
"KCinRandyBullock":"",
"WRNOBrandonTate":"",
"TENEMattLaCosse":"",
"WRPitRyanSwitzer":"",
"RBMinCJHam":"",
"TEDetJesseJames":"",
"RBNEJamesDevelin":"",
"KTBNickFolk":"",
"WRPhiJJArcegaWhiteside":"",
"WRMinChadBeebe":"",
"RBKCTremonSmith":"",
"TEKCBlakeBell":"",
"RBDetTyJohnson":"",
"WRBufZayJones":"",
"WROakJJNelson":"",
"TEDalBlakeJarwin":"",
"RBDalMikeWeber":"",
"TENOMichaelHoomanawanui":"",
"KClePhilDawson":"",
"RBFAMattForte":"",
"RBKCAnthonySherman":"",
"WRKCKelvinBenjamin":"",
"RBDetZachZenner":"",
"KLACTravisCoons":"",
"KCleGregJoseph":"",
"KTBCairoSantos":"",
"TELARGeraldEverett":"",
"QBPitJoshuaDobbs":"",
"WROakHunterRenfrow":"",
"RBDalDariusJackson":"",
"QBNEJarrettStidham":"",
"TEKCNealSterling":"",
"WRJaxTyreBrady":"",
"TENODanArnold":"",
"WRSFLouisMurphy":"",
"WRKCDemarcusRobinson":"",
"RBLACDerekWatt":"",
"WRPhiMikeWallace":"",
"TENELanceKendricks":"",
"WRNEBraxtonBerrios":"",
"KGBSamFicken":"",
"WROakBrandonLaFell":"",
"WRDalNoahBrown":"",
"WRPitDiontaeJohnson":"",
"WRLARMikeThomas":"",
"QBLACTyrodTaylor":"",
"WRNOKeithKirkwood":"",
"KLACRobertoAguayo":"",
"RBPhiWendellSmallwood":"",
"WRFAKennyBritt":"",
"QBLARBlakeBortles":"",
"TEAriRickySealsJones":"",
"WRKCGehrigDieter":"",
"QBChiChaseDaniel":"",
"WRGBJakeKumerow":"",
"RBTenAkrumWadley":"",
"RBKCAlgernonBrown":"",
"QBJaxGardnerMinshew":"",
"RBAtlRickyOrtiz":"",
"QBGBTimBoyle":"",
"TEPitXavierGrimble":"",
"RBCleDontrellHilliard":"",
"RBTenChrisJohnson":"",
"QBHouBrandonWeeden":"",
"WRSFMaxMcCaffrey":"",
"TEFACobyFleener":"",
"RBFADeMarcoMurray":"",
"WRIndMarcusJohnson":"",
"TEGBJaceSternberger":"",
"WRNYGRussellShepard":"",
"WRPhiCarltonAgudosi":"",
"TELACVirgilGreen":"",
"WRNOAustinCarr":"",
"WRHouDeAndreCarter":"",
"RBMinAmeerAbdullah":"",
"WRMinLaquonTreadwell":"",
"WRNEDontrelleInman":"",
"RBHouJoshFerguson":"",
"WRAriKevinWhite":"",
"WRCinJoshMalone":"",
"WRBalMilesBoykin":"",
"WRAtlJustinHardy":"",
"TELARTylerHigbee":"",
"WRAtlRussellGage":"",
"QBMiaBrockOsweiler":"",
"QBFATJYates":"",
"QBTBRyanGriffin":"",
"RBPitRooseveltNix":"",
"RBBufSamRogers":"",
"TEAtlLoganPaulsen":"",
"RBSFJoeWilliams":"",
"RBFAEddieLacy":"",
"WRChiMarvinHall":"",
"WRTenHarryDouglas":"",
"QBKCChadHenne":"",
"RBJaxTommyBohanon":"",
"TENYGRhettEllison":"",
"KTBPatrickMurray":"",
"RBAriTJLogan":"",
"KJaxKaiForbath":"",
"TETenJonnuSmith":"",
"WRDenRiverCracraft":"",
"WRBufAndreRoberts":"",
"TEHouJordanAkins":"",
"QBLACCardaleJones":"",
"TESeaEdDickson":"",
"WRCleDamionRatley":"",
"RBNYGPaulPerkins":"",
"WRDenAaronBurbridge":"",
"WRKCMarcusKemp":"",
"WRTenJalenTolliver":"",
"RBBufMikeTolbert":"",
"KLACNickRose":"",
"RBCarElijahHood":"",
"WRNORishardMatthews":"",
"TESeaTyroneSwoopes":"",
"RBNODevineOzigbo":"",
"RBNYGRodSmith":"",
"QBPhiCodyKessler":"",
"RBNYGJonathanStewart":"",
"WRBufRayRayMcCloudIII":"",
"RBChiFreddieStevenson":"",
"RBPhiMattJones":"",
"TEChiAdamShaheen":"",
"RBGBDonJackson":"",
"QBPitMasonRudolph":"",
"WRWshRobertDavis":"",
"RBSFKyleJuszczyk":"",
"RBHouBuddyHowell":"",
"WRFAEricDecker":"",
"RBPitStevanRidley":"",
"QBDetTomSavage":"",
"RBDenKhalfaniMuhammad":"",
"WRMinBrandonZylstra":"",
"RBJaxJamaalCharles":"",
"WRCarTorreySmith":"",
"TECinCJUzomah":"",
"KMiaAndrewFranks":"",
"QBSeaPaxtonLynch":"",
"QBDalMikeWhite":"",
"TENOAlizeMack":"",
"WRKCByronPringle":"",
"WRNEMatthewSlater":"",
"QBCleGarrettGilbert":"",
"QBNYJTrevorSiemian":"",
"WRWshCamSims":"",
"WRPitDarriusHeywardBey":"",
"TEJaxJamesOShaughnessy":"",
"QBWshJoshJohnson":"",
"QBNYJJoshMcCown":"",
"QBSeaGenoSmith":"",
"QBBufMattBarkley":"",
"QBWshAlexSmith":"",
"RBFADannyWoodhead":"",
"WRDetChrisLacy":"",
"RBCinTrayveonWilliams":"",
"WRWshJehuChesson":"",
"TEMiaNickOLeary":"",
"RBHouTaiwanJones":"",
"KLACCalebSturgis":"",
"RBFAOrleansDarkwa":"",
"WRMinJordanTaylor":"",
"TEPhiBrentCelek":"",
"RBNOZachLine":"",
"QBSFCJBeathard":"",
"WRMiaIsaiahFord":"",
"RBNYGShaneSmith":"",
"RBAriDJFoster":"",
"TEDalDaltonSchultz":"",
"TEMiaDwayneAllen":"",
"TEWshVernonDavis":"",
"TENEStephenAnderson":"",
"TEPitZachGentry":"",
"WRFAJeremyMaclin":"",
"WRSeaAmaraDarboh":"",
"WRDetJaceBillingsley":"",
"QBSeaTrevoneBoykin":"",
"QBAtlMattSchaub":"",
"WRHouVyncintSmith":"",
"QBCinJeffDriskel":"",
"TEBufTylerKroft":"",
"WRLACDylanCantrell":"",
"QBChiTylerBray":"",
"TEBufLeeSmith":"",
"WRLARJoJoNatson":"",
"TESeaNickVannett":"",
"QBMiaJakeRudock":"",
"WRPhiCharlesJohnson":"",
"RBPhiJoshAdams":"",
"WRSeaMalikTurner":"",
"WRSFPierreGarcon":"",
"WRMiaAllenHurns":"",
"WRHouJoeWebbIII":"",
"WRTenCameronBatson":"",
"RBCarAlexArmah":"",
"QBHouAJMcCarron":"",
"RBNEBrandonBolden":"",
"QBCleDrewStanton":"",
"TEMinColeHikutini":"",
"WRNEMauriceHarris":"",
"WRChiJavonWims":"",
"WRDalDevinSmith":"",
"WRAriKeeSeanJohnson":"",
"WRBalQuincyAdeboyejo":"",
"WRDalCedrickWilson":"",
"RBDenAndyJanovich":"",
"QBLARBrandonAllen":"",
"QBMiaJoshRosen":"",
"TEMinDavidMorgan":"",
"WRKCSammieCoatesJr":"",
"RBSeaBoScarbrough":"",
"RBCarDevonJohnson":"",
"WRNYJQuadreeHenderson":"",
"TENYJTrevonWesco":"",
"RBJaxThomasRawls":"",
"QBNOTeddyBridgewater":"",
"WRDenBrendanLangley":"",
"TESFLevineToilolo":"",
"RBKCAaronRipkowski":"",
"TENYGGarrettDickerson":"",
"WRNYJQuincyEnunwa":"",
"RBNYJTyMontgomery":"",
"WRCinAudenTate":"",
"QBSFNickMullens":"",
"TENERyanIzzo":"",
"QBTBNickFitzgerald":"",
"WRNEDannyEtling":"",
"WRLARMarquezNorth":"",
"TETenPhillipSupernaw":"",
"TELACJeffCumberland":"",
"WRSeaDamoreeaStringfellow":"",
"RBPhiDonnelPumphrey":"",
"TEMinTylerConklin":"",
"TEGBMarcedesLewis":"",
"WRNECameronMeredith":"",
"TELARHenryKriegerCoble":"",
"WRCinAlexErickson":"",
"WROakDwayneHarris":"",
"RBDenDavidWilliams":"",
"TEWshJeremySprinkle":"",
"TEDenJeffHeuerman":"",
"TELACBraedonBowman":"",
"WRNYJLuckyWhitehead":"",
"RBSeaCJProsise":"",
"TEKCAlexEllis":"",
"TENYGScottSimonson":"",
"RBAtlJeremyLangford":"",
"KChiChrisBlewitt":"",
"RBGBDannyVitale":"",
"WRPitJohnnyHolton":"",
"RBChiKerrithWhyteJr":"",
"WRPhiMackHollins":"",
"WRIndDauriceFountain":"",
"TECleSethDeValve":"",
"RBCleKelvinTaylor":"",
"WRCarKeyarrisGarrett":"",
"WRChiTannerGentry":"",
"WRTBBoboWilson":"",
"WRSFJalenHurd":"",
"WRAriDamiereByrd":"",
"TELACAustinRoberts":"",
"QBPhiChristianHackenberg":"",
"QBLACEastonStick":"",
"QBFAMattMoore":"",
"TEAriIfeanyiMomah":"",
"WRIndZachPascal":"",
"WRGBJawillDavis":"",
"TENYJEricTomlinson":"",
"TEJaxDonnieErnsberger":"",
"RBCinQuintonFlowers":"",
"TEOakDerekCarrier":"",
"TEAtlEricSaubert":"",
"TECinDrewSample":"",
}
|
[
"noreply@github.com"
] |
thedaynos.noreply@github.com
|
8939b092b0f7a5321485af541808b9e5b0c6e4e9
|
30f88ef95fc374335574835342f84b75a722d0b5
|
/tools/vcoco/train_net_pd.py
|
dbc8fffa66de8933c3229e661372dd1bb971c82e
|
[] |
no_license
|
MuchHair/PD-Net-Extended-Version
|
8f4c1a69d97e1d95167555dcf1af638aad3f8c14
|
84814817efdd0700570ddcdbbddbb25cdefbb8c6
|
refs/heads/master
| 2023-04-22T05:30:48.832382
| 2021-04-25T12:27:48
| 2021-04-25T12:27:48
| 292,491,878
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,131
|
py
|
import sys
sys.path.insert(0, "lib")
from dataset.dataset import Features_PD_VCOCO
from net.model import PD_Net
import os
import itertools
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
tqdm.monitor_interval = 0
import torch.optim as optim
from torch.autograd import Variable
from tensorboard_logger import configure, log_value
from torch.utils.data.sampler import RandomSampler
import utils.io as io
def train_model(model, dataset_train, dataset_val, lr, num_epochs, model_dir, exp_name, parm_need_train=None):
if parm_need_train is None:
params = itertools.chain(
model.parameters())
optimizer = optim.Adam(params, lr=lr)
else:
optimizer = optim.Adam(parm_need_train, lr=lr)
criterion = nn.BCELoss()
model.train()
step = 0
optimizer.zero_grad()
for epoch in range(0, num_epochs):
sampler = RandomSampler(dataset_train)
for i, sample_id in enumerate(sampler):
data = dataset_train[sample_id]
feats = {
'human_feats': Variable(torch.cuda.FloatTensor(data['human_feat'])),
'object_feats': Variable(torch.cuda.FloatTensor(data['object_feat'])),
'union_feats': Variable(torch.cuda.FloatTensor(data['union_features'])),
'box': Variable(torch.cuda.FloatTensor(data['box_feat'])),
'absolute_pose': Variable(torch.cuda.FloatTensor(data['absolute_pose'])),
'relative_pose': Variable(torch.cuda.FloatTensor(data['relative_pose'])),
'human_prob_vec': Variable(torch.cuda.FloatTensor(data['human_prob_vec'])),
'object_prob_vec': Variable(torch.cuda.FloatTensor(data['object_prob_vec'])),
'object_one_hot': Variable(torch.cuda.FloatTensor(data['object_one_hot'])),
'prob_mask': Variable(torch.cuda.FloatTensor(data['prob_mask'])),
"human_prob": Variable(torch.cuda.FloatTensor(data['human_prob'])),
"object_prob": Variable(torch.cuda.FloatTensor(data['object_prob'])),
"verb_object_vec": Variable(torch.cuda.FloatTensor(data["verb_obj_vec"])),
"hoi_label": Variable(torch.cuda.FloatTensor(data['hoi_label']))
}
verb_scores, hoi_scores = model(feats)
hoi_labels = Variable(torch.cuda.FloatTensor(data['hoi_label_vec']))
loss1 = criterion(verb_scores, hoi_labels)
loss2 = criterion(hoi_scores, hoi_labels)
loss = loss1 + loss2
loss.backward()
if step % 1 == 0:
optimizer.step()
optimizer.zero_grad()
max_prob = hoi_scores.max().data[0]
max_prob_tp = torch.max(hoi_scores * hoi_labels).data[0]
if step % 20 == 0 and step != 0:
num_tp = np.sum(data['hoi_label'])
num_fp = data['hoi_label'].shape[0] - num_tp
log_str = \
'Epoch: {} | Iter: {} | Step: {} | ' + \
'Train Loss: {:.8f} | TPs: {} | FPs: {} | ' + \
'Max TP Prob: {:.8f} | Max Prob: {:.8f} | lr:{}'
log_str = log_str.format(
epoch,
i,
step,
loss.data[0],
num_tp,
num_fp,
max_prob_tp,
max_prob,
optimizer.param_groups[0]["lr"])
print(log_str)
if step % 100 == 0:
log_value('train_loss', loss.data[0], step)
log_value('max_prob', max_prob, step)
log_value('max_prob_tp', max_prob_tp, step)
print(exp_name)
if step % 1000 == 0 and step > 9000:
val_loss, val_loss_1, val_loss_2 = eval_model(model, dataset_val)
log_value('val_loss', val_loss, step)
log_value('val_loss_1', val_loss_1, step)
log_value('val_loss_2', val_loss_2, step)
print(exp_name)
if step == 10 or (step % 1000 == 0 and step > 9000):
hoi_classifier_pth = os.path.join(
model_dir, "model",
f'hoi_classifier_{step}')
torch.save(
model.state_dict(),
hoi_classifier_pth)
step += 1
def eval_model(model, dataset):
model.eval()
criterion = nn.BCELoss()
step = 0
val_loss = 0
val_loss1 = 0
val_loss2 = 0
count = 0
sampler = RandomSampler(dataset)
torch.manual_seed(0)
for sample_id in tqdm(sampler):
data = dataset[sample_id]
feats = {
'human_feats': Variable(torch.cuda.FloatTensor(data['human_feat'])),
'union_feats': Variable(torch.cuda.FloatTensor(data['union_features'])),
'object_feats': Variable(torch.cuda.FloatTensor(data['object_feat'])),
'box': Variable(torch.cuda.FloatTensor(data['box_feat'])),
'absolute_pose': Variable(torch.cuda.FloatTensor(data['absolute_pose'])),
'relative_pose': Variable(torch.cuda.FloatTensor(data['relative_pose'])),
'human_prob_vec': Variable(torch.cuda.FloatTensor(data['human_prob_vec'])),
'object_prob_vec': Variable(torch.cuda.FloatTensor(data['object_prob_vec'])),
'object_one_hot': Variable(torch.cuda.FloatTensor(data['object_one_hot'])),
'prob_mask': Variable(torch.cuda.FloatTensor(data['prob_mask'])),
"human_prob": Variable(torch.cuda.FloatTensor(data['human_prob'])),
"object_prob": Variable(torch.cuda.FloatTensor(data['object_prob'])),
"verb_object_vec": Variable(torch.cuda.FloatTensor(data["verb_obj_vec"])),
}
verb_scores, hoi_scores = model(feats)
hoi_labels = Variable(torch.cuda.FloatTensor(data['hoi_label_vec']))
loss1 = criterion(verb_scores, hoi_labels)
loss2 = criterion(hoi_scores, hoi_labels)
loss = loss1 + loss2
batch_size = verb_scores.size(0)
val_loss1 += (batch_size * loss1.data[0])
val_loss2 += (batch_size * loss2.data[0])
val_loss += (batch_size * loss.data[0])
count += batch_size
step += 1
val_loss = val_loss / float(count)
val_loss1 = val_loss1 / float(count)
val_loss2 = val_loss2 / float(count)
return val_loss, val_loss1, val_loss2
def main_PD_net():
model = PD_Net(True, 4).cuda()
lr = 1e-4
num_epochs = 10
model_dir = "output/vcoco/PD"
io.mkdir_if_not_exists(model_dir, recursive=True)
io.mkdir_if_not_exists(os.path.join(model_dir, "log"))
io.mkdir_if_not_exists(os.path.join(model_dir, "model"))
configure(os.path.join(model_dir, "log"))
dataset_train = Features_PD_VCOCO(subset="trainval", fp_to_tp_ratio=1000)
dataset_val = Features_PD_VCOCO(subset="test")
print(model)
train_model(model, dataset_train, dataset_val, lr, num_epochs, model_dir, model_dir)
if __name__ == "__main__":
main_PD_net()
|
[
"eexubin@mail.scut.edu.cn"
] |
eexubin@mail.scut.edu.cn
|
69ad3dd86512c379e348acbd6e38a601c72bb95e
|
3da29484b8d6d598c75dc5446be89c96c8209e3e
|
/data/BK_MC_2012_27163003_Beam4000GeV-2012-MagDown-Nu2.5-Pythia8_Sim08a_Digi13_Trig0x409f0045_Reco14a_Stripping20NoPrescalingFlagged_ALLSTREAMS.DST.py
|
f2e099a204b7805f9a7737402bc9f7313e7a5e16
|
[] |
no_license
|
betatim/glowing-wookie
|
71560dd14cd23466e629dadc6de270c04d848016
|
fb327714cd066c7c58864d6aec8424271d147f06
|
refs/heads/master
| 2016-09-05T19:39:30.174535
| 2014-08-08T16:25:21
| 2014-08-08T16:25:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,844
|
py
|
#-- GAUDI jobOptions generated on Thu Jul 18 16:26:43 2013
#-- Contains event types :
#-- 27163003 - 206 files - 3513735 events - 736.90 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125079
#-- StepId : 125079
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r6
#-- Visible : Y
#-- Processing Pass Step-125336
#-- StepId : 125336
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r8
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000035_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000036_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000037_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000038_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000039_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000040_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000041_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000042_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000043_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000044_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000046_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000047_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000048_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000049_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000050_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000051_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000052_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000053_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000054_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000056_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000059_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000060_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000061_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000062_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000063_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000064_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000065_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000066_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000067_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000068_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000069_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000070_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000071_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000072_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000073_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000074_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000075_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000076_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000077_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000078_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000079_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000080_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000081_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000082_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000083_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000084_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000085_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000086_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000087_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000088_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000089_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000090_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000091_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000092_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000093_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000094_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000095_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000096_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000097_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000098_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000099_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000100_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000101_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000102_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000103_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000104_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000105_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000106_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000107_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000108_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000109_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000110_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000111_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000112_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000113_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000114_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000115_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000116_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000117_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000118_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000119_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000120_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000121_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000122_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000123_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000124_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000125_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000126_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000127_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000128_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000129_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000130_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000131_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000132_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000133_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000134_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000135_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000136_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000137_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000138_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000139_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000140_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000141_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000142_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000143_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000144_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000147_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000148_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000149_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000150_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025316/0000/00025316_00000151_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000035_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000036_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000037_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000038_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000040_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000041_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000042_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000043_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000044_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000046_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000048_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000050_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000051_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000052_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000054_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000055_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000056_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000059_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000060_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000061_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000063_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000064_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00025931/0000/00025931_00000065_1.allstreams.dst'
], clear=True)
|
[
"thomas.bird@cern.ch"
] |
thomas.bird@cern.ch
|
f88bc082dba510ad310965033149a2cd4b5b9749
|
c3d0011d7842db09a57d185fb38a54e7da2db698
|
/app/tests/conftest.py
|
07bfbf51f34fc971d201b0291afd1a69596004fc
|
[] |
no_license
|
ChiaYinChen/fastapi-practice
|
eebc988cd59ffa37e8c54f0cdf39094bfc4ead8f
|
61d07502945d6a3b71c842b9fd47062ee4a412cd
|
refs/heads/master
| 2023-09-02T03:30:39.634621
| 2021-11-10T03:25:56
| 2021-11-10T03:25:56
| 410,732,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
"""Pytest's conftest.py."""
import logging
from typing import Dict, Generator
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from app import color_formatter
from app.core.config import settings
from app.db.session import SessionLocal
from app.main import app
from .utils.user import (authentication_token_from_username,
user_authentication_headers)
@pytest.fixture(scope="session")
def db() -> Generator:
"""Postgres test session."""
yield SessionLocal()
@pytest.fixture(scope="module")
def client() -> Generator:
"""Mock client."""
with TestClient(app) as c:
yield c
@pytest.fixture(scope="module")
def superuser_token_headers(
client: TestClient,
db: Session
) -> Dict[str, str]:
"""Token headers for superuser."""
return user_authentication_headers(
client=client,
username=settings.FIRST_SUPERUSER,
password=settings.FIRST_SUPERUSER_PASSWORD
)
@pytest.fixture(scope="module")
def normal_user_token_headers(
client: TestClient,
db: Session
) -> Dict[str, str]:
"""Token headers for normal user."""
return authentication_token_from_username(
client=client,
username=settings.TEST_USER_USERNAME,
db=db
)
@pytest.fixture(scope='session', autouse=True)
def setup_logging():
"""Set custom logging handler for pytest."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
_handler = logging.StreamHandler()
_handler.setFormatter(color_formatter)
logger.addHandler(_handler)
|
[
"awdrg1210@gmail.com"
] |
awdrg1210@gmail.com
|
39998b9a9178ac4d0bdf68c6631a1fd94392ee68
|
517756b136e1a2f7fb1929adab09cd6db900f9bf
|
/web/pages/migrations/0003_wikipage_wikipagerelatedlink_delete_freeformpage.py
|
c698b953c06790f4e36ad180d61f7f98939e0f5e
|
[] |
no_license
|
andreiavram/scoutfile
|
6b67d07693a9b5d5f4d78247f2ec88eacc52dcc5
|
999ee76d82590af4b7c9f067eb949a48ffda5500
|
refs/heads/develop
| 2023-07-22T21:16:17.099526
| 2023-03-01T09:24:38
| 2023-03-01T09:24:38
| 21,454,635
| 0
| 0
| null | 2019-02-18T00:51:20
| 2014-07-03T08:07:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
# Generated by Django 4.1.7 on 2023-02-24 17:05
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.blocks
import wagtail.contrib.table_block.blocks
import wagtail.documents.blocks
import wagtail.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0083_workflowcontenttype'),
('wagtailimages', '0025_alter_image_file_alter_rendition_file'),
('pages', '0002_freeformpage'),
]
operations = [
migrations.CreateModel(
name='WikiPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('body', wagtail.fields.StreamField([('heading', wagtail.blocks.CharBlock(form_classname='title')), ('paragraph', wagtail.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('document', wagtail.documents.blocks.DocumentChooserBlock()), ('table', wagtail.contrib.table_block.blocks.TableBlock())], use_json_field=True)),
('cover_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='WikiPageRelatedLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('name', models.CharField(max_length=255)),
('url', models.URLField()),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='pages.wikipage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.DeleteModel(
name='FreeFormPage',
),
]
|
[
"andrei.avram@gmail.com"
] |
andrei.avram@gmail.com
|
2746759beec23a93c8483f12f67e4e24dfdbd05c
|
2a256bce43ae0dcdd0699cb89744d7dfefda53e7
|
/genres.py
|
1e0de8859bf36a1d096c3f79047544cdfcb88c23
|
[] |
no_license
|
mainul123/Project-for-Compsci
|
addb0534d9ff17d19f31ae7450b7c4a3296cf54c
|
137fb65da794bb1ac762627f065b8d619d9778ef
|
refs/heads/master
| 2021-08-30T19:25:01.428367
| 2017-12-19T05:41:31
| 2017-12-19T05:41:31
| 110,990,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,655
|
py
|
genres = [{"rock": ["symphonic rock", "jazz-rock", "heartland rock", "rap rock", "garage rock", "folk-rock", "roots rock", "adult alternative pop rock", "rock roll", "punk rock", "arena rock", "pop-rock", "glam rock", "southern rock", "indie rock", "funk rock", "country rock", "piano rock", "art rock", "rockabilly", "acoustic rock", "progressive rock", "folk rock", "psychedelic rock", "rock & roll", "blues rock", "alternative rock", "rock and roll", "soft rock", "rock and indie", "hard rock", "pop/rock", "pop rock", "rock", "classic pop and rock", "psychedelic", "british psychedelia", "punk", "metal", "heavy metal"]},
{"alternative/indie": ["adult alternative pop rock", "alternative rock", "alternative metal", "alternative", "lo-fi indie", "indie", "indie folk", "indietronica", "indie pop", "indie rock", "rock and indie"]},
{"electronic/dance": ["dance and electronica", "electro house", "electronic", "electropop", "progressive house", "hip house", "house", "eurodance", "dancehall", "dance", "trap"]},
{"soul": ["psychedelic soul", "deep soul", "neo-soul", "neo soul", "southern soul", "smooth soul", "blue-eyed soul", "soul and reggae", "soul"]},
{"classical": ["classical", "orchestral", "film soundtrack", "composer"]},
{"pop": ["country-pop", "latin pop", "classical pop", "pop-metal", "orchestral pop", "instrumental pop", "indie pop", "sophisti-pop", "pop punk", "pop reggae", "britpop", "traditional pop", "power pop", "sunshine pop", "baroque pop", "synthpop", "art pop", "teen pop", "psychedelic pop", "folk pop", "country pop", "pop rap", "pop soul", "pop and chart", "dance-pop", "pop", "top 40"]},
{"hip-hop/rnb": ["conscious hip hop", "east coast hip hop", "hardcore hip hop", "west coast hip hop", "hiphop", "southern hip hop", "hip-hop", "hip hop", "hip hop rnb and dance hall", "contemporary r b", "gangsta rap", "rapper", "rap", "rhythm and blues", "contemporary rnb", "contemporary r&b", "rnb", "rhythm & blues","r&b", "blues"]},
{"disco": ["disco"]},
{"swing": ["swing"]},
{"folk": ["contemporary folk", "folk"]},
{"country": ["country rock", "country-pop", "country pop", "contemporary country", "country"]},
{"jazz": ["vocal jazz", "jazz", "jazz-rock"]},
{"religious": ["christian", "christmas music", "gospel"]},
{"blues": ["delta blues", "rock blues", "urban blues", "electric blues", "acoustic blues", "soul blues", "country blues", "jump blues", "classic rock. blues rock", "jazz and blues", "piano blues", "british blues", "british rhythm & blues", "rhythm and blues", "blues", "blues rock", "rhythm & blues"]},
{"reggae": ["reggae fusion", "roots reggae", "reggaeton", "pop reggae", "reggae", "soul and reggae"]}]
|
[
"you@example.com"
] |
you@example.com
|
db77a8b668dc1cbb0fb88e903388300504290f7c
|
41c1417ff294878ab3b46d5d3db1cc9d63ba07e5
|
/UI_data_Alert.py
|
264a0798b5936c11506ff806ed669889c9f8d34f
|
[] |
no_license
|
eong93/QGIS_DataAlertPlugin
|
006404b14d174f3d552d32cd17ca1b2d8af82c95
|
5aec2a5ad44e95f8fcf177c8cc7825ffa64d1258
|
refs/heads/master
| 2021-01-10T12:37:40.688394
| 2015-10-01T19:09:42
| 2015-10-01T19:09:42
| 43,501,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'data_Alert_dockwidget_base.ui'
#
# Created: Tue Sep 29 12:29:50 2015
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_dataAlertDockWidgetBase(object):
def setupUi(self, dataAlertDockWidgetBase):
dataAlertDockWidgetBase.setObjectName(_fromUtf8("dataAlertDockWidgetBase"))
dataAlertDockWidgetBase.resize(350, 548)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.endButton = QtGui.QPushButton(self.dockWidgetContents)
self.endButton.setGeometry(QtCore.QRect(240, 480, 75, 23))
self.endButton.setObjectName(_fromUtf8("endButton"))
self.startButton = QtGui.QPushButton(self.dockWidgetContents)
self.startButton.setGeometry(QtCore.QRect(30, 480, 75, 23))
self.startButton.setObjectName(_fromUtf8("startButton"))
self.textBrowser = QtGui.QTextBrowser(self.dockWidgetContents)
self.textBrowser.setGeometry(QtCore.QRect(15, 110, 321, 351))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.upper = QtGui.QLineEdit(self.dockWidgetContents)
self.upper.setGeometry(QtCore.QRect(120, 10, 113, 20))
self.upper.setObjectName(_fromUtf8("upper"))
self.left = QtGui.QLineEdit(self.dockWidgetContents)
self.left.setGeometry(QtCore.QRect(20, 40, 113, 20))
self.left.setObjectName(_fromUtf8("left"))
self.right = QtGui.QLineEdit(self.dockWidgetContents)
self.right.setGeometry(QtCore.QRect(220, 40, 113, 20))
self.right.setObjectName(_fromUtf8("right"))
self.lower = QtGui.QLineEdit(self.dockWidgetContents)
self.lower.setGeometry(QtCore.QRect(120, 70, 113, 20))
self.lower.setObjectName(_fromUtf8("lower"))
dataAlertDockWidgetBase.setWidget(self.dockWidgetContents)
self.retranslateUi(dataAlertDockWidgetBase)
QtCore.QMetaObject.connectSlotsByName(dataAlertDockWidgetBase)
def retranslateUi(self, dataAlertDockWidgetBase):
dataAlertDockWidgetBase.setWindowTitle(_translate("dataAlertDockWidgetBase", "UVI Alert", None))
self.endButton.setText(_translate("dataAlertDockWidgetBase", "End", None))
self.startButton.setText(_translate("dataAlertDockWidgetBase", "Start", None))
|
[
"eric.ong@digitalglobe.com"
] |
eric.ong@digitalglobe.com
|
c9135529a7f8dec9f1c3f1914cd91e165f7eab43
|
b252d1f8ec5f68bf5f935c000e0bb011718ea691
|
/virtualenvs/ninetyseven/src/savoy/core/template_pages/middleware.py
|
117c03647f31b1ef61820c131b21d2f9d5190506
|
[] |
no_license
|
syncopated/97bottles
|
2ceace7ed6a852bef61796733a08eb878b045152
|
08f4210e3de77c4564fcc8c1a2e9b47a0088249f
|
refs/heads/master
| 2016-08-05T07:48:51.109089
| 2012-12-02T17:38:35
| 2012-12-02T17:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
from django.http import Http404
from django.conf import settings
from savoy.core.template_pages.views import templatepage
class TemplatepageFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return templatepage(request, request.path)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except:
if settings.DEBUG:
raise
return response
|
[
"keith@dkeithrobinson.com"
] |
keith@dkeithrobinson.com
|
e634b0b58082403c727066d3ea4845f10e9599f7
|
ad31c7890508030c41699f4d98a47aa4b2cd1765
|
/models/ndpm/priors.py
|
473d31285df93dd7ffb72100aa3e08b3526c3111
|
[] |
no_license
|
Chandan-IITI/online-continual-learning
|
cd821f7f189251b5b183b75c2db50087930f731a
|
1050d1b716c51edc83799e2ecee38da66a169931
|
refs/heads/main
| 2023-03-05T18:10:08.085657
| 2021-02-10T19:42:27
| 2021-02-10T19:42:27
| 340,447,633
| 2
| 1
| null | 2021-02-19T17:52:42
| 2021-02-19T17:52:42
| null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
from abc import ABC, abstractmethod
import torch
from utils.utils import maybe_cuda
class Prior(ABC):
def __init__(self, params):
self.params = params
@abstractmethod
def add_expert(self):
pass
@abstractmethod
def record_usage(self, usage, index=None):
pass
@abstractmethod
def nl_prior(self, normalize=False):
pass
class CumulativePrior(Prior):
def __init__(self, params):
super().__init__(params)
self.log_counts = maybe_cuda(torch.tensor(
params.log_alpha
)).float().unsqueeze(0)
def add_expert(self):
self.log_counts = torch.cat(
[self.log_counts, maybe_cuda(torch.zeros(1))],
dim=0
)
def record_usage(self, usage, index=None):
"""Record expert usage
Args:
usage: Tensor of shape [K+1] if index is None else scalar
index: expert index
"""
if index is None:
self.log_counts = torch.logsumexp(torch.stack([
self.log_counts,
usage.log()
], dim=1), dim=1)
else:
self.log_counts[index] = torch.logsumexp(torch.stack([
self.log_counts[index],
maybe_cuda(torch.tensor(usage)).float().log()
], dim=0), dim=0)
def nl_prior(self, normalize=False):
nl_prior = -self.log_counts
if normalize:
nl_prior += torch.logsumexp(self.log_counts, dim=0)
return nl_prior
@property
def counts(self):
return self.log_counts.exp()
|
[
"zhedamai0126@gmail.com"
] |
zhedamai0126@gmail.com
|
0bfb21e8e2ff9add1ccd3f4e5d69776fe7878c58
|
712a7acca58d9f4b5dc2a107f92354aaa5caec65
|
/app/new_feature.py
|
3e10145111e40f254ebe8966e05ba5cb4286bc34
|
[] |
no_license
|
myd10/testing-123-2.0
|
2332ad5f529765be30509f4b35cb60371569a892
|
59de9dbee3e506b9769dd7c94d2e5f179ecd2fc0
|
refs/heads/master
| 2022-04-23T07:02:31.742780
| 2020-04-19T21:21:06
| 2020-04-19T21:21:06
| 255,473,162
| 0
| 0
| null | 2020-04-19T21:21:08
| 2020-04-14T00:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
#new feature on my-new-feature branch
def announce():
return "Hello World"
|
[
"myd10@georgetown.edu"
] |
myd10@georgetown.edu
|
dd37637d40b6aa1f04a8fc1d4cec182d38ff2386
|
f9ac779ee4de9f66da3c9d9585785fa95ca0e7a2
|
/h5pyd/_apps/hscopy.py
|
022cf90d09e0337cf4bb865de2f851d3b82c043a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
t-sommer/h5pyd
|
e4cb145e7b523972062de7ce86d500fe3473003a
|
a99860928f5845079800f761480e51ca9be0d759
|
refs/heads/master
| 2020-04-20T06:09:58.717543
| 2019-01-31T07:15:09
| 2019-01-31T07:15:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,691
|
py
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import sys
import logging
try:
import h5pyd
except ImportError as e:
sys.stderr.write("ERROR : %s : install it to use this utility...\n" % str(e))
sys.exit(1)
try:
import pycurl as PYCRUL
except ImportError as e:
PYCRUL = None
if __name__ == "__main__":
from config import Config
from utillib import load_file
else:
from .config import Config
from .utillib import load_file
cfg = Config()
#----------------------------------------------------------------------------------
def usage():
print("Usage:\n")
print((" {} [ OPTIONS ] source destination".format(cfg["cmd"])))
print("")
print("Description:")
print(" Copy domain")
print(" source: domain to be copied ")
print(" destination: target domain")
print("")
print("Options:")
print(" -v | --verbose :: verbose output")
print(" -e | --endpoint <domain> :: The HDF Server endpoint, e.g. http://hsdshdflab.hdfgroup.org")
print(" -u | --user <username> :: User name credential")
print(" -p | --password <password> :: Password credential")
print(" -c | --conf <file.cnf> :: A credential and config file")
print(" -z[n] :: apply compression filter to any non-compressed datasets, n: [0-9]")
print(" --cnf-eg :: Print a config file and then exit")
print(" --logfile <logfile> :: logfile path")
print(" --loglevel debug|info|warning|error :: Change log level")
print(" --nodata :: Do not upload dataset data")
print(" -h | --help :: This message.")
print("")
#end print_usage
#----------------------------------------------------------------------------------
def print_config_example():
print("# default")
print("hs_username = <username>")
print("hs_password = <passwd>")
print("hs_endpoint = http://hsdshdflab.hdfgroup.org")
#print_config_example
#----------------------------------------------------------------------------------
def main():
loglevel = logging.ERROR
verbose = False
nodata = False
deflate = None
cfg["cmd"] = sys.argv[0].split('/')[-1]
if cfg["cmd"].endswith(".py"):
cfg["cmd"] = "python " + cfg["cmd"]
cfg["logfname"] = None
logfname=None
src_files = []
argn = 1
while argn < len(sys.argv):
arg = sys.argv[argn]
val = None
if arg[0] == '-' and len(src_files) > 0:
# options must be placed before filenames
print("options must precead source files")
usage()
sys.exit(-1)
if len(sys.argv) > argn + 1:
val = sys.argv[argn+1]
if arg in ("-v", "--verbose"):
verbose = True
argn += 1
elif arg == "--nodata":
nodata = True
argn += 1
elif arg == "--loglevel":
if val == "debug":
loglevel = logging.DEBUG
elif val == "info":
loglevel = logging.INFO
elif val == "warning":
loglevel = logging.WARNING
elif val == "error":
loglevel = logging.ERROR
else:
print("unknown loglevel")
usage()
sys.exit(-1)
argn += 2
elif arg == '--logfile':
logfname = val
argn += 2
elif arg in ("-h", "--help"):
usage()
sys.exit(0)
elif arg in ("-e", "--endpoint"):
cfg["hs_endpoint"] = val
argn += 2
elif arg in ("-u", "--username"):
cfg["hs_username"] = val
argn += 2
elif arg in ("-p", "--password"):
cfg["hs_password"] = val
argn += 2
elif arg == '--cnf-eg':
print_config_example()
sys.exit(0)
elif arg.startswith("-z"):
compressLevel = 4
if len(arg) > 2:
try:
compressLevel = int(arg[2:])
except ValueError:
print("Compression Level must be int between 0 and 9")
sys.exit(-1)
deflate = compressLevel
argn += 1
elif arg[0] == '-':
usage()
sys.exit(-1)
else:
src_files.append(arg)
argn += 1
# setup logging
logging.basicConfig(filename=logfname, format='%(asctime)s %(filename)s:%(lineno)d %(message)s', level=loglevel)
logging.debug("set log_level to {}".format(loglevel))
# end arg parsing
logging.info("username: {}".format(cfg["hs_username"]))
logging.info("password: {}".format(cfg["hs_password"]))
logging.info("endpoint: {}".format(cfg["hs_password"]))
logging.info("verbose: {}".format(verbose))
if len(src_files) < 2:
# need at least a src and destination
usage()
sys.exit(-1)
src_domain = src_files[0]
des_domain = src_files[1]
logging.info("source domain: {}".format(src_domain))
logging.info("target domain: {}".format(des_domain))
if src_domain[0] != '/' or src_domain[-1] == '/':
print("source domain must be an absolute path, non-folder domain")
usage()
sys.exit(-1)
if des_domain[0] != '/' or des_domain[-1] == '/':
print("source domain must be an absolute path, non-folder domain")
usage()
sys.exit(-1)
if cfg["hs_endpoint"] is None:
logging.error('No endpoint given, try -h for help\n')
sys.exit(1)
logging.info("endpoint: {}".format(cfg["hs_endpoint"]))
try:
# get a handle to input file
try:
fin = h5pyd.File(src_domain, mode='r')
except IOError as ioe:
logging.error("Error opening file {}: {}".format(src_domain, ioe))
sys.exit(1)
# create the output domain
try:
username = cfg["hs_username"]
password = cfg["hs_password"]
endpoint = cfg["hs_endpoint"]
fout = h5pyd.File(des_domain, 'x', endpoint=endpoint, username=username, password=password)
except IOError as ioe:
if ioe.errno == 403:
logging.error("No write access to domain: {}".format(des_domain))
else:
logging.error("Error creating file {}: {}".format(des_domain, ioe))
sys.exit(1)
# do the actual load
load_file(fin, fout, verbose=verbose, nodata=nodata, deflate=deflate)
msg = "File {} uploaded to domain: {}".format(src_domain, des_domain)
logging.info(msg)
if verbose:
print(msg)
except KeyboardInterrupt:
logging.error('Aborted by user via keyboard interrupt.')
sys.exit(1)
# __main__
if __name__ == "__main__":
main()
|
[
"jreadey@hdfgroup.org"
] |
jreadey@hdfgroup.org
|
1dca4c5c4032b6d7106218048591403bb413149b
|
2cafc4981f85e9a25cceb18af1e936e19268e0ee
|
/scapy_tcp_ACK_discovery.py
|
ffc67cf055318e0203ce98a8c94e2e35b256b78e
|
[] |
no_license
|
lapinrepository/ethicalhacking
|
fdd0647bffeb87544ede182eb62544ee922579fd
|
14fac0bee8ca5f58c5499e4e91323e005a5e6c25
|
refs/heads/master
| 2021-10-09T15:14:29.976534
| 2018-12-30T09:30:19
| 2018-12-30T09:30:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#!/usr/bin/python
from scapy.all import *
import logging
import subprocess
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import threading
screenlock = threading.Semaphore(value=1)
def tcpackscan(prefix, addr):
try:
answer = sr1(IP(dst=prefix+str(addr))/TCP(dport=80, flags = 'A'), timeout=1, verbose=0)
screenlock.acquire()
if int(answer[TCP].flags) == 4:
print("[+] Host " + prefix + str(addr) + " is alive")
else:
pass
except:
pass
finally:
screenlock.release()
if len(sys.argv) != 2:
print("Usage scapy_tcp_ACK_discovery.py [interface]")
print("Example: scapy_tcp_ACK_discovery.py eth0")
sys.exit()
interface = str(sys.argv[1])
ip = subprocess.check_output("ifconfig " + interface + " | grep 'inet' | cut -d ' ' -f 1 | cut -d 'n' -f 2 | cut -d ' ' -f 2", shell=True).strip()
prefix = ip.split('.')[0] + '.' + ip.split('.')[1] + '.' + ip.split('.')[2] + '.'
reply_ip = list()
for addr in range(100,254):
t = threading.Thread(target=tcpackscan, args=(prefix, addr))
t.start()
#for addr in range(100,110):
# answer = sr1(IP(dst=prefix + str(addr)) / TCP(dport=80, flags = 'A'), timeout=1, verbose=0)
# try:
# if int(answer[TCP].flags) == 4:
# reply_ip.append(prefix + str(addr))
# except:
# pass
#for elt in reply_ip:
# print(elt)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
b8e40021cad5f7464ecf970db88946677e9f121d
|
3bf1480a1a00209bc8ef8a66e1995549987ae70e
|
/utils/scripts/OOOlevelGen/src/levels/level_2_1.py
|
b1a083b100c10f780210b20af5f57d1be6d5f578
|
[
"MIT"
] |
permissive
|
fullscreennl/bullettime
|
284a8ea320fb4adabc07c3639731a80fc4db5634
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
refs/heads/master
| 2020-03-29T01:56:26.627283
| 2018-10-11T19:09:48
| 2018-10-11T19:09:48
| 149,414,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,698
|
py
|
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Bullet.BulletSprite(x=0, y=0,width=10,height=10,angle='0',restitution=0.5,static='false',friction=0.5,density=3,spawnEvent='onShoot'))
lb.addObject(Hero.HeroSprite(x=34, y=48,width=42,height=74))
lb.addObject(Teleporter.TeleporterSprite( level_id='leveldata/level_2_2'))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=20,y=250,width=100,height=500,zoom_fact=1.0))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=185,y=320-60,width=128,height=100,zoom_fact=0.1666))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=350,y=250,width=100,height=500,zoom_fact=1.0))
lb.addObject(WatchtowerVisual.WatchtowerVisualSprite(x=185, y=92,width=128,height=235-50,angle='0',restitution=0.2,static='true',friction=0.5,density=20,firstframe='watchtower.png' ))
lb.addObject(Crate.CrateSprite(x=2343,y=53,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2305,y=53,width=32, height=32, static='false',angle=0))
lb.addObject(Beam.BeamSprite(x=1642, y=103,width=160,height=36,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=1156, y=220,width=160,height=36,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=2180, y=220,width=160,height=36,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Pickup.PickupSprite(x=2224,y=257,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2140,y=257,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1685,y=140,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1607,y=140,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1201,y=257,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1117,y=257,width=32, height=32, static='false',angle=0))
lb.addObject(Enemy.EnemySprite(x=1395, y=19,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Enemy.EnemySprite(x=1353, y=19,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Enemy.EnemySprite(x=736, y=19,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Enemy.EnemySprite(x=1261, y=19,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Enemy.EnemySprite(x=1219, y=19,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Enemy.EnemySprite(x=1158, y=255,width=33,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Crate.CrateSprite(x=2343,y=15,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2305,y=15,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1645,y=176,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1607,y=176,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2343,y=89,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2305,y=89,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2383,y=53,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2383,y=15,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1685,y=176,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2383,y=89,width=32, height=32, static='false',angle=0))
lb.addObject(Beam.BeamSprite(x=406, y=38,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=108,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=179,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=249,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=320,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=390,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Beam.BeamSprite(x=406, y=461,width=72,height=76,angle='90' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable').setName('dBeam'))
lb.addObject(Enemy.EnemySprite(x=1983, y=116,width=225,height=225,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.render()
|
[
"github@fotoboer.nl"
] |
github@fotoboer.nl
|
a2ef8e111e897f9bbacafce69f64708f2fef7967
|
e41d21d3f2db1e3f3bf3f34da357a6fa70670e9f
|
/03/2.py
|
535fd215c07861bc21932f1998f88a3e52566918
|
[] |
no_license
|
reynoldscem/aoc2016
|
98536a220461365e004ce370db41e540277c513a
|
432415d1c72f7eac6b386627a3f235e2233964cb
|
refs/heads/master
| 2020-06-13T22:07:48.734760
| 2016-12-21T09:11:30
| 2016-12-21T09:11:30
| 75,547,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from itertools import permutations
import numpy as np
import argparse
import os
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('filename')
return parser
def is_valid(triangle):
for permutation in permutations(triangle):
if np.sum(permutation[0:2]) <= permutation[2]:
return False
return True
def main(args):
with open(args.filename) as fd:
data = fd.read().split()
data = np.array(
list(map(int, data))
).reshape(-1, 3).transpose().reshape(-1, 3)
triangles = [
tuple(entry)
for entry in data
]
valid_count = 0
for triangle in triangles:
if is_valid(triangle):
valid_count += 1
print(valid_count)
if __name__ == '__main__':
args = build_parser().parse_args()
assert os.path.isfile(args.filename), 'Must provide a valid filename'
main(args)
|
[
"reynoldscem@gmail.com"
] |
reynoldscem@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.