blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91ac240c83a14f13f239bfa42d189a7ab771a61f
|
e71fa62123b2b8f7c1a22acb1babeb6631a4549b
|
/xlsxwriter/test/comparison/test_chart_pie02.py
|
da43f3a95439dfc445a57e81a063b68c6b55a26e
|
[
"BSD-2-Clause"
] |
permissive
|
timgates42/XlsxWriter
|
40480b6b834f28c4a7b6fc490657e558b0a466e5
|
7ad2541c5f12b70be471b447ab709c451618ab59
|
refs/heads/main
| 2023-03-16T14:31:08.915121
| 2022-07-13T23:43:45
| 2022-07-13T23:43:45
| 242,121,381
| 0
| 0
|
NOASSERTION
| 2020-02-21T11:14:55
| 2020-02-21T11:14:55
| null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pie02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'font': {'bold': 1, 'italic': 1, 'baseline': -1}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
8915d6ab351b062c940bd3ec98322f3559a95024
|
1546f4ebbb639fddf05d4aebea263931681eaeeb
|
/code/app/simulation/action/base.py
|
f63f1c57de9de7e666e7fd5ee0f69e77221c0e48
|
[] |
no_license
|
ferdn4ndo/the-train-app
|
34e9c885a658f51e42ec6184ca8058872b266244
|
4650433f7f860df3de1f7502cb052891c410618d
|
refs/heads/master
| 2023-01-21T07:15:02.984308
| 2020-11-22T23:57:28
| 2020-11-22T23:57:28
| 315,156,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
from app.common.logger import generate_logger, LoggerFolders
from app.simulation.exception.error import ConflictConditionError
class BaseAction:
name = "none"
abbrev = "---"
def __init__(self):
"""Class constructor"""
self.moving_towards_section = ""
self.lookup_train_prefix = ""
self.executed = False
@staticmethod
def is_applicable(dispatcher, train):
"""Define the criteria for the action application (should be overridden by children)"""
return True
def was_executed(self, train):
"""Define the criteria for the action to be considered executed (could be overridden by children)"""
return self.executed
def serialize(self):
return {
"name": self.name,
"abbrev": self.abbrev,
"executed": self.executed,
"description": self.describe()
}
def describe(self):
"""Define the message to describe the action (should be overridden by children)"""
return "No action (idle)"
def execute(self, dispatcher, train):
"""Define the action execution method (should be overridden by children)"""
self.executed = True
def move_to(self, dispatcher, train, next_section=None):
"""Helper function to be used by functions that moves a train from a section to another"""
self.moving_towards_section = next_section.name if next_section is not None else ''
if not train.is_at_section_end():
train.go_at_maximum_speed()
return
train.stop()
# if reached section end and there's no next straight section to move, raise error
if next_section is None:
raise ConflictConditionError("Tried to move into a non-existing section")
# if section is not occupied, move the train to it
if not dispatcher.is_section_occupied(next_section, train.is_reversed):
dispatcher.move_train_to_section(train, next_section)
# in any case (being moved to the new section or not due to its occupancy), mark the action as executed
self.executed = True
|
[
"const.fernando@gmail.com"
] |
const.fernando@gmail.com
|
e0d03d82a89f95990dd13ba64cb7019fc71d2dd3
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/vmware/nsx/manager/bridge_endpoint/api/nsx70_crud_impl.py
|
008596b40abee0735845510a687241e799f38991
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835
| 2015-12-10T09:57:04
| 2015-12-10T09:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
import vmware.nsx_api.manager.bridgeendpoint.bridgeendpoint\
as bridgeendpoint
import vmware.nsx_api.manager.bridgeendpoint.schema.bridgeendpoint_schema\
as bridgeendpoint_schema
import vmware.nsx.manager.api.base_crud_impl as base_crud_impl
class NSX70CRUDImpl(base_crud_impl.BaseCRUDImpl):
_attribute_map = {
'id_': 'id',
'name': 'display_name',
'summary': 'description',
'guest_vlan': 'guest_vlan_tag',
'node_id': 'bridge_cluster_id',
'vlan_id': 'vlan',
'ha': 'ha_enable'
}
_client_class = bridgeendpoint.BridgeEndpoint
_schema_class = bridgeendpoint_schema.BridgeEndpointSchema
|
[
"bpei@vmware.com"
] |
bpei@vmware.com
|
a4129e72e7a4b57f2a59c688c107e9ab48afe0a9
|
57845ff6759377884092d8d1c5fe82244e30b108
|
/code/era5_heat_comp/bias_correction.py
|
df5ba6a607bd21e394286839459eee3bba4e7534
|
[
"CC-BY-4.0"
] |
permissive
|
l5d1l5/project10
|
c11424c7329cdc264f6fedd974a8f887fe6c8cf8
|
ded1ef7ce04573d669c2f0352b03317f64f1f967
|
refs/heads/main
| 2023-06-10T17:17:01.748249
| 2021-06-09T13:20:35
| 2021-06-09T13:20:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,728
|
py
|
#!/usr/bin/env python
"""
Bias correction for the UTCI dataset
Both the climate model data and the ERA5-HEAT data have been regridded to 1x1 degree and uploaded to JASMIN. Here we use the ERA5-HEAT dataset from 1985 to 2014 and compare this to the derived UTCI from each climate model.
Therefore, instead of bias-correcting temperature or any other variables, we bias correct the derived UTCI.
We therefore assume that ERA5-HEAT is "Truth"! To be fair, I would probably bias correct the individual variables against their ERA5 counterparts. Additionally, for all except temperature this becomes a little tricky and subjective.
"""
import iris
from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import unify_time_units
import iris.analysis.cartography
import iris.coord_categorisation
import matplotlib.pyplot as pl
from climateforcing.utils import mkdir_p
import numpy as np
#import pickle
import scipy.stats as st
from tqdm import tqdm
# ## Obtain historical "training" distributions
era5heatdir = '/gws/pw/j05/cop26_hackathons/bristol/project10/era5-heat_1deg/'
modeldir = '/gws/pw/j05/cop26_hackathons/bristol/project10/utci_projections_1deg/HadGEM3-GC31-LL/historical/r1i1p1f3/'
## just 30 years for now
## load up the regridding annual chunks and concatenate
cube_era5 = iris.load(era5heatdir + 'ECMWF_utci_*_v1.0_con.nc')
equalise_attributes(cube_era5)
unify_time_units(cube_era5)
for cu in cube_era5:
cu.coord('time').points = cu.coord('time').points.astype(int)
cube_era5 = cube_era5.concatenate_cube()
## also 30 years of HadGEM3 historical
cube_model = iris.load(modeldir + 'utci_3hr_HadGEM3-GC31-LL_historical_r1i1p1f3_gn_*.nc')
cube_model = cube_model.concatenate_cube()
# generalise this
leeds_model = cube_model[:,143,178]
leeds_era5 = cube_era5[:,143,178]
model_params = {}
model_params['a'] = np.zeros((cube_model.shape[1:3]))
model_params['loc'] = np.zeros((cube_model.shape[1:3]))
model_params['scale'] = np.zeros((cube_model.shape[1:3]))
model_params['lat'] = cube_model.coord('latitude').points
model_params['lon'] = cube_model.coord('longitude').points
era5_params = {}
era5_params['a'] = np.zeros((cube_era5.shape[1:3]))
era5_params['loc'] = np.zeros((cube_era5.shape[1:3]))
era5_params['scale'] = np.zeros((cube_era5.shape[1:3]))
era5_params['lat'] = cube_era5.coord('latitude').points
era5_params['lon'] = cube_era5.coord('longitude').points
model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178] = st.skewnorm.fit(leeds_model.data)
era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178] = st.skewnorm.fit(leeds_era5.data)
# ## How to bias correct
#
# $\hat{x}_{m,p}(t) = F^{-1}_{o,h} ( F_{m,h} (x_{m,p}(t)) )$
#
# - $x_{m,p}$ is the future predicted variable, i.e. the SSP value from the climate model
# - $F_{m,h}$ is the CDF of the historical period in the climate model
# - $F_{o,h}$ is the CDF of the historical period in the observations (or in this case, ERA5)
# F_{m,h}
# In: st.skewnorm.cdf(290, model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178])
# Out: 0.4921534798137802 # percentile of 290 K in HadGEM3 climate
# F^{-1}_{o,h}
# In: st.skewnorm.ppf(0.4921534798137802, era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178])
# Out: 290.57999427509816 # UTCI in ERA5 corresponding to this percentile.
# transfer function
def bias_correct(x, model_params, obs_params, ilat, ilon):
cdf = st.skewnorm.cdf(x, model_params['a'][ilat, ilon], model_params['loc'][ilat, ilon], model_params['scale'][ilat, ilon])
x_hat = st.skewnorm.ppf(cdf, obs_params['a'][ilat, ilon], obs_params['loc'][ilat, ilon], obs_params['scale'][ilat, ilon])
return x_hat
# ## Bias correct future simulations
#
# For now, just use 2100
modelfuturedir = '/gws/pw/j05/cop26_hackathons/bristol/project10/utci_projections_1deg/HadGEM3-GC31-LL/ssp585/r1i1p1f3/'
cube_model_future = iris.load(modelfuturedir + 'utci_3hr_HadGEM3-GC31-LL_ssp585_r1i1p1f3_gn_210001010300-210101010000.nc')
cube_model_future = cube_model_future.concatenate_cube()
leeds_model_future = cube_model_future[:,143,178]
model_future_params = {}
model_future_params['a'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['loc'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['scale'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['lat'] = cube_model_future.coord('latitude').points
model_future_params['lon'] = cube_model_future.coord('longitude').points
model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178] = st.skewnorm.fit(leeds_model_future.data)
#pl.hist(leeds_model.data, density=True, label='HadGEM3-GC31-LL 1985', alpha=0.3, bins=50)
#pl.hist(leeds_era5.data, density=True, label='ERA5-HEAT', alpha=0.3, bins=50)
#pl.hist(leeds_model_future.data, density=True, label='HadGEM3-GC31-LL 2100', alpha=0.3, bins=50)
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178]), color='tab:blue')
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178]), color='tab:orange')
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178]), color='tab:green')
#pl.legend()
#pl.title('Leeds grid cell')
#pl.show()
# bias correct the Leeds 2100 projections
leeds_model_future_biascorrected = bias_correct(leeds_model_future.data, model_params, era5_params, 143, 178)
pl.hist(leeds_model.data, density=True, label='HadGEM3-GC31-LL 1985', alpha=0.3, bins=50)
pl.hist(leeds_era5.data, density=True, label='ERA5-HEAT', alpha=0.3, bins=50)
pl.hist(leeds_model_future.data, density=True, label='HadGEM3-GC31-LL 2100', alpha=0.3, bins=50)
pl.hist(leeds_model_future_biascorrected, density=True, label='Bias-corrected 2100', alpha=0.3, bins=50)
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178]), color='tab:blue')
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178]), color='tab:orange')
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178]), color='tab:green')
pl.legend()
pl.title('Leeds grid cell')
pl.show()
|
[
"chrisroadmap@gmail.com"
] |
chrisroadmap@gmail.com
|
951acdaacbf96a5af43073fe36dba77c68a2eb14
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/BsJPsiKst/python/BsJPsiKst/GetTristanWeights_paramAc.py
|
de47f925bc07c284ed7678d0bb08603b73f108b6
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
from ROOT import *
from math import *
from array import *
from Urania import PDG
from Urania.Helicity import *
from Urania import RooInterfaces as D
from Urania import *
AccessPackage("Bs2MuMu")
from smartpyROOT import *
from OurSites import *
from sympy.utilities.lambdify import lambdify
from parameters import KpiBins4 as Kpibins
#neim = sys.argv[1]
#neim = "2011p_826_861"
spins = [0,1]
## ### Generate the pdf using the tools in Urania.Helicity
A = doB2VX(spins, helicities = [1,-1], transAmp = 1)#0)
### masage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())#H.values())
phys = 0
TristanIntegral = 0
TristanWeights = {}
#BREAK
x = Symbol("helcosthetaK",real = True)
y = Symbol("helcosthetaL", real = True)
z = Symbol("helphi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi,-z)])
return function
lam_pdf_split = {}
for key in pdf_split:
pdf_split[key] = changeFreeVars(pdf_split[key])
phys += StrongPhases(key)*pdf_split[key]
if pdf_split[key]:
lam_pdf_split[key] = lambdify((x,y,z), pdf_split[key], ("numpy")) ### Lambdify it to make it faster.
TristanWeights[key] = 0# Symbol("w_" + str(list(key.atoms())[0]) + str(list(key.atoms())[1]), positive = True)
#TristanIntegral += StrongPhases(key) * TristanWeights[key]
T = TransAmpModuli
P = TransAmpPhases
##c1_psi = Symbol("c1_psi",real = True)
##c2_psi = Symbol("c2_psi",real = True)
##c3_psi = Symbol("c3_psi",real = True)
##c4_psi = Symbol("c4_psi",real = True)
##y_acc = Symbol("y_acc", positive = True)
##c2_theta = Symbol("c2_theta", real = True)
##c5_psi = -1-c1_psi - c2_psi - c3_psi - c4_psi + y_acc
##acc = (1. + c1_psi*x + c2_psi*x*x + c3_psi*x*x*x + c4_psi*x*x*x*x + c5_psi*x*x*x*x*x)*(1. + c2_theta*y*y)
##acc = acc.subs([( c1_psi, -5.20101e-01),(c2_psi, -7.33299e-01), (c3_psi, -2.90606e-01), (c4_psi, 2.69475e-01), (c2_theta, 2.76201e-01), (y_acc,0)])
def CalculateWeights(acc):
out = {}
for key in TristanWeights.keys():
TristanWeights[key] = iter_integrate(acc*pdf_split[key],(z,-Pi,Pi),(x,-1,1), (y, -1,1)).n()
if "Abs" in str(key): out[str(key).replace("Abs(A_","").replace(")**2","")+str(key).replace("Abs(A_","").replace(")**2","")]=TristanWeights[key]
else: out[str(key).replace("re(","").replace("im(","").replace("A_","").replace("*conjugate(","").replace("))","")]=TristanWeights[key]
den = out['00']
for key in out.keys(): out[key] = out[key]/den
return out
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
a5ab4535550b8fb055e694138a48dab497767a30
|
d22f8cd1a834f706d2c0cd77a814414cb4650265
|
/login/login/middleware/checksum.py
|
2a6998d66cfd6febc69caf3468ff43ac487ed4c2
|
[
"MIT"
] |
permissive
|
teris1994/L2py
|
9e7535935f58d729453f39bee998f21240b85e8b
|
07cc5d7c5d52ac4179378b29ef4873b11f6daa0c
|
refs/heads/master
| 2023-09-01T06:21:10.625029
| 2021-10-24T12:48:18
| 2021-10-24T13:21:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from common import exceptions
from common.middleware.middleware import Middleware
from login.packets.init import Init
class ChecksumMiddleware(Middleware):
@staticmethod
def verify_checksum(data):
if len(data) % 4 != 0:
return False
checksum = Int32(0)
for i in range(0, len(data) - 4, 4):
check = Int32(data[i]) & 0xFF
check |= Int32(data[i + 1]) << 8 & 0xFF00
check |= Int32(data[i + 2]) << 0x10 & 0xFF0000
check |= Int32(data[i + 3]) << 0x18 & 0xFF000000
checksum ^= check
check = Int32(data[-4:])
return check == checksum
@staticmethod
def add_checksum(response_data):
"""Adds checksum to response."""
checksum = Int32(0)
for i in range(0, len(response_data) - 4, 4):
check = Int32(response_data[i]) & 0xFF
check |= Int32(response_data[i + 1]) << 8 & 0xFF00
check |= Int32(response_data[i + 2]) << 0x10 & 0xFF0000
check |= Int32(response_data[i + 3]) << 0x18 & 0xFF000000
checksum ^= check
response_data[-4:] = checksum
@classmethod
def before(cls, session, request):
"""Checks that requests checksum match."""
if not cls.verify_checksum(request.data):
raise exceptions.ChecksumMismatch()
@classmethod
def after(cls, client, response):
"""Adds checksum to response data."""
if not isinstance(response.packet, Init):
cls.add_checksum(response.data)
|
[
"yurzs@icloud.com"
] |
yurzs@icloud.com
|
7854b547da510307bdd078230c768958d415bf82
|
17e31331b7bf66ce11b77ff26a4ddbeb8355c53b
|
/2M1207ANALYSIS/plotTinyTimResult.py~
|
14780eb0b19495045732afd7925ceec7476793c6
|
[] |
no_license
|
YifZhou/Exoplanet-Patchy-Clouds
|
06e314b941055b2a758c081d5b169f5b909b416c
|
31c52938b22187182475872fd1550e3b9d384bf2
|
refs/heads/master
| 2020-12-24T16:43:02.882067
| 2015-12-11T22:26:30
| 2015-12-11T22:26:30
| 25,367,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
#! /usr/bin/env python
from __future__ import print_function
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('ggplot')
fn = '2M1207B_flt_F125W_fileInfo.csv'
df = pd.read_csv(fn, parse_dates = {'datetime':['obs date', 'obs time']}, index_col = 'datetime')
plt.plot(df.index, df['fluxA'], 's', label = '2M1207 A')
plt.plot(df.index, df['fluxB'], 'o', label = '2M1207 B')
plt.gcf().autofmt_xdate()
plt.legend(loc = 'best')
plt.xlabel('UT')
plt.ylabel('Normalized flux')
plt.show()
|
[
"zhouyifan1012@gmail.com"
] |
zhouyifan1012@gmail.com
|
|
8a3a42371a8d7d3f73a4cbf063670af54642286d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03805/s696591698.py
|
98b40232bec4242d8b740de8f5e409457aaeddf4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#入力
N,M = map(int,input().split())
graph = [ ]
for _ in range(N+1):
graph.append([])
for _ in range(M):
a,b = map(int,input().split())
graph[a].append(b)
graph[b].append(a)
visited = []
for _ in range(N+1):
visited.append(False)
def dfs(dep,cur):
global N,visited,graph
if dep == N:
return 1
ans = 0
for dist in graph[cur]:
if visited[dist] == False:
visited[dist] = True
ans += dfs(dep + 1,dist)
visited[dist] = False
return ans
visited[1] = True
print(dfs(1,1))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
df5293357794e1ccbcb5ff320b79d5c8e310eda0
|
2063a4c153b380c1d6d25f0ece6fb514389f9b1d
|
/mock/buildbot_secret.py
|
f9a64aec05765a936f22e9ece4058f6782435778
|
[
"MIT"
] |
permissive
|
musm/julia-buildbot
|
1d097f709d3f75c0becd46a9075fa52638f745d7
|
653746334ba7106cde197c910f4125a3f2930fc0
|
refs/heads/master
| 2021-06-24T08:15:38.373394
| 2020-07-15T06:10:41
| 2020-07-15T06:10:41
| 211,406,721
| 0
| 0
|
MIT
| 2019-09-27T21:48:59
| 2019-09-27T21:48:59
| null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
GITHUB_WEBHOOK_SECRET="nothing to see here"
GITHUB_OAUTH_CLIENT_ID="nothing to see here"
GITHUB_OAUTH_CLIENT_SECRET="nothing to see here"
GITHUB_STATUS_OAUTH_TOKEN="nothing to see here"
COVERALLS_REPO_TOKEN="nothing to see here"
CODECOV_REPO_TOKEN="nothing to see here"
FREEBSDCI_OAUTH_TOKEN="nothing to see here"
FQDN="buildog.julialang.org"
BUILDBOT_BRANCH="master"
db_user="nothing to see here"
db_password="nothing to see here"
DOCUMENTER_KEY="nothing to see here"
MACOS_CODESIGN_IDENTITY="nothing to see here"
|
[
"staticfloat@gmail.com"
] |
staticfloat@gmail.com
|
b1d83eb193e2280bb822881484407fa574b2b1dd
|
03ec2daac0989f9b6936b1e87d8ca1b0d99f1bce
|
/optfn/local_attention.py
|
cd07243576469d8196efc9d4b5eacfb79f74b7cb
|
[] |
no_license
|
SSS135/optfn
|
f7364dce8c1857baa90d2d6564316762c574a9ba
|
48ae4f5439daa89ac54921a7642e612838c724eb
|
refs/heads/master
| 2020-05-29T15:21:38.827291
| 2020-04-29T17:51:09
| 2020-04-29T17:51:09
| 189,217,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .tile_2d import Tile2d
import math
from torch.utils.checkpoint import checkpoint
class LocalAttention2d(nn.Module):
def __init__(self, in_channels, num_heads, key_size, kernel_size, stride=1, padding=0,
conv_kernel_size=1, conv_stride=1, conv_padding=0):
super().__init__()
self.in_channels = in_channels
self.key_size = key_size
self.num_heads = num_heads
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.out_channels = key_size * num_heads
self.attn_conv = nn.Conv2d(in_channels, 3 * key_size * num_heads, conv_kernel_size, conv_stride, conv_padding)
self.tiler = Tile2d(self.attn_conv.out_channels, kernel_size, stride, padding)
self.norm = nn.GroupNorm(3 * num_heads, 3 * key_size * num_heads)
def run_tiled(self, attn):
# (B, C, K, K, OH, OW)
tiles = self.tiler(attn)
B, C, K, _, OH, OW = tiles.shape
# (B, OH, OW, C, K, K)
tiles = tiles.permute(0, 4, 5, 1, 2, 3)
assert tiles.shape == (B, OH, OW, C, K, K)
# (B * OH * OW, NH, KS + QS + VS, K * K)
VS, KS, NH = self.key_size, self.key_size, self.num_heads
tiles = tiles.contiguous().view(B * OH * OW, NH, KS * 2 + VS, K * K)
# (B * OH * OW, NH, KS, K * K)
key, query, value = tiles.split([KS, KS, VS], dim=2)
# # (B * OH * OW, NH, KS, 1)
# query = query.mean(3, keepdim=True)
# (B * OH * OW, NH, 1, K * K)
saliency = query.transpose(-1, -2) @ key / math.sqrt(KS)
assert saliency.shape == (B * OH * OW, NH, K * K, K * K)
# (B * OH * OW, NH, 1, K * K)
mask = F.softmax(saliency, dim=-1)
# (B * OH * OW, NH, VS, 1)
out = value @ mask.transpose(-1, -2)
assert out.shape == (B * OH * OW, NH, VS, K * K)
# (B, NH, VS, OH, OW)
out = out.mean(-1).view(B, OH, OW, NH, VS).permute(0, 3, 4, 1, 2)
# (B, NH * VS, OH, OW)
out = out.view(B, NH * VS, OH, OW)
return out.contiguous()
def forward(self, input):
# (B, (KS + QS + VS) * NH, H, W)
attn = self.attn_conv(input)
attn = self.norm(attn)
return checkpoint(self.run_tiled, attn) if attn.requires_grad else self.run_tiled(attn)
class AddLocationInfo2d(nn.Module):
def __init__(self, config=((0.5, 0, 0), (1, 0, 0), (2, 0, 0), (4, 0, 0))):
super().__init__()
self.register_buffer('config', None)
self.register_buffer('harr', None)
self.register_buffer('warr', None)
self.config = torch.tensor(config, dtype=torch.float32)
self.harr = None
self.warr = None
def forward(self, input):
with torch.no_grad():
b, _, h, w = input.shape
targs = dict(device=input.device, dtype=input.dtype)
# if self.harr is None or self.harr.shape[2] != h or self.warr.shape[3] != w:
harr = torch.arange(h, **targs).div_(h - 1).view(1, 1, h, 1)
warr = torch.arange(w, **targs).div_(w - 1).view(1, 1, 1, w)
scale, hoffset, woffset = [x.view(1, -1, 1, 1) for x in torch.unbind(self.config, -1)]
harr, warr = [x.repeat(b, len(self.config), 1, 1).mul_(scale) for x in (harr, warr)]
self.harr = harr.add_(hoffset).mul_(2 * math.pi)
self.warr = warr.add_(woffset).mul_(2 * math.pi)
# else:
# harr, warr = self.harr, self.warr
# scale = self.config[:, 0].view(1, -1, 1, 1)
hrand, wrand = torch.empty((b, 2, 1, 1), **targs).uniform_(-1000, 1000).chunk(2, dim=1)
loc = (harr + hrand).sin_() + (warr + wrand).sin_()
loc.mul_(0.5)
return torch.cat([input, loc], 1)
|
[
"sss13594@gmail.com"
] |
sss13594@gmail.com
|
28e50a72627709735aaa8070033c738ca8ed1c72
|
3cb3702d2f3fb6729f1ea685d8439b7c2ad4f069
|
/work22/q1_sol.py
|
c7c5943f9db4e95834bb8b91f23bdecf41c4985e
|
[] |
no_license
|
ysmintor/MLAlgorithm
|
95d6ceea5b16f94a039a2a5014f78ba3cdbd49d6
|
0ac119eacca336dbc9a1c22ea8a558c1761a08f4
|
refs/heads/master
| 2020-04-02T01:59:12.813910
| 2019-05-31T05:06:15
| 2019-05-31T05:06:15
| 153,885,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
"""
Solution to simple exercises to get used to TensorFlow API
You should thoroughly test your code.
TensorFlow's official documentation should be your best friend here
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Created by Chip Huyen (chiphuyen@cs.stanford.edu)
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sess = tf.InteractiveSession()
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
###############################################################################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x > 30)
out = tf.gather(x, indices)
###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2)
|
[
"ysmintor@gmail.com"
] |
ysmintor@gmail.com
|
3365218943d3ae9ecc58ae8e412764c6fc07228b
|
deca929038a88ced836ede461b3cdd951b02dfd6
|
/file_upload_project/file_upload_project/settings.py
|
22f59090d0178ccac5b3a733a7deb59b9992133e
|
[] |
no_license
|
JeffLawrence1/Python-Django-Beginner
|
cff08ff5ab167ff82987b2c4fb1e33d37b1876a9
|
49c0f270f61ae31cf39562bb63c2facf7a443b8d
|
refs/heads/master
| 2020-03-09T03:36:25.347060
| 2018-04-07T21:16:54
| 2018-04-07T21:16:54
| 128,567,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
"""
Django settings for file_upload_project project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y39if6%-3f7(u_bjoxw#%wmt82xdgd%%q2^%y0wedt)$gsc$oc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.file_upload_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'file_upload_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'file_upload_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"jefflaw13@hotmail.com"
] |
jefflaw13@hotmail.com
|
8f4797c22b68ab37a8cf040014770158b6c472ef
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2339/60581/245838.py
|
f70511660b4a3a605ebb9a0482586e53a440cc8f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
import sys
lst = []
for line in sys.stdin:
if line.strip()=="":
break
lst.append(line)
input = []
#读入处理
for i in range(0,len(lst)):
theLine = []
j = 0
while j < len(lst[i]):
str = ''
judgeWord = False
judgeNumber = False
if lst[i][j]>='A' and lst[i][j]<='Z':
judgeWord = True
str += lst[i][j]
while judgeWord:
j += 1
if j == len(lst[i]):
theLine.append(str)
break
if lst[i][j]>='A' and lst[i][j]<='Z':
str += lst[i][j]
else:
judgeWord = False
theLine.append(str)
if lst[i][j]>='0' and lst[i][j]<='9':
judgeNumber = True
str += lst[i][j]
while judgeNumber:
j += 1
if j == len(lst[i]):
theLine.append(int(str))
break
if lst[i][j]>='0' and lst[i][j]<='9':
str += lst[i][j]
else:
judgeNumber = False
theLine.append(int(str))
j += 1
input.append(theLine)
testNumber = input[0][0]
start = 1
count = 0
while count < testNumber:
reverseNumber = 0
numbers = input[start][0]
numberList = input[start+1].copy()
for i in range(0,numbers-1):
for j in range(i+1,numbers):
if numberList[i]> numberList[j]:
reverseNumber += 1
print(reverseNumber)
start += 2
count += 1
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
92c1eb4f1f66c9d305670471cc22c3e4c381ebf3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03372/s276854525.py
|
57b897a767dabcd81e8e6b4752abbd0e7252b35d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
N,C = map(int,input().split())
A = [list(map(int,input().split())) for _ in range(N)]
A.insert(0,[0,0])
F = [0 for _ in range(N+1)]
F[N] = A[N][1]-(C-A[N][0])
for i in range(N-1,0,-1):
F[i] = F[i+1]+A[i][1]-(A[i+1][0]-A[i][0])
G = [0 for _ in range(N+1)]
for i in range(1,N+1):
G[i] = G[i-1]+A[i][1]-(A[i][0]-A[i-1][0])
cmax = max(max(F),max(G))
dmax = 0
for i in range(N-1,0,-1):
dmax = max(dmax,F[i+1])
cmax = max(cmax,dmax+G[i]-A[i][0])
emax = 0
for i in range(2,N+1):
emax = max(emax,G[i-1])
cmax = max(cmax,emax+F[i]-(C-A[i][0]))
print(cmax)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e0d1c815ddb30cf7f7840777d2c216dcf534a24c
|
467be8fc9c975638fcb7a64d098e1526fd1c96f0
|
/dlint/linters/bad_defusedxml_use.py
|
95bc81cf1c4d9b13fbf7c12dd478ebb65246b99c
|
[
"BSD-3-Clause"
] |
permissive
|
dlint-py/dlint
|
ed8d2ca0446914fceded654a2b810b7f8ad0d9d3
|
307b301cd9e280dcd7a7f9d5edfda3d58e4855f5
|
refs/heads/master
| 2023-04-13T08:54:52.987469
| 2023-04-10T19:27:01
| 2023-04-10T19:27:15
| 232,599,661
| 154
| 16
|
BSD-3-Clause
| 2023-03-09T21:21:19
| 2020-01-08T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
#!/usr/bin/env python
from .helpers import bad_kwarg_use
from .. import tree
class BadDefusedxmlUseLinter(bad_kwarg_use.BadKwargUseLinter):
"""This linter looks for lack of "defusedxml" parsing defenses. The
"defusedxml" library offers "forbid_dtd", "forbid_entities", and
"forbid_external" keyword arguments to prevent various XML attack
vectors[1]. All defenses should be enabled.
[1] https://pypi.org/project/defusedxml/
"""
off_by_default = False
_code = 'DUO135'
_error_tmpl = 'DUO135 enable all "forbid_*" defenses when using "defusedxml" parsing'
@property
def kwargs(self):
return [
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
]
|
[
"schwag09@gmail.com"
] |
schwag09@gmail.com
|
32a6600da729c0f3fd1643970f821d26a6615da5
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/allergies/12407861133f488faa80356443c08313.py
|
6ece998ee9ac220a1f267c249ba6d91f9e465777
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
class Allergies(list):
def __init__(self, score):
items = [ 'eggs',\
'peanuts',\
'shellfish',\
'strawberries',\
'tomatoes',\
'chocolate',\
'pollen',\
'cats' ]
self.list = []
for i in range(8):
if (1 << i) & score:
self.list.append(items[i])
def is_allergic_to(self, item):
return item in self.list
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
fba38459a2d51b42d006592e4fd6866557f118f7
|
4f04ce5667f895889cfe54ed5f0dec6f5e7d4e4e
|
/bert_brain/data_sets/word_in_context.py
|
a0e401eb6455ea5649b200a69f747179c6b36a65
|
[] |
no_license
|
danrsc/bert_brain
|
e172859b7ab93b0a05ed7c5b936778fae134eabb
|
eca204f163018270ac6b6687c2f3b6b5b158a89c
|
refs/heads/master
| 2022-11-28T14:32:45.420452
| 2020-08-03T00:14:42
| 2020-08-03T00:14:42
| 167,277,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,590
|
py
|
import os
import json
from dataclasses import dataclass
import numpy as np
from ..common import split_with_indices
from .input_features import RawData, KindData, ResponseKind
from .corpus_base import CorpusBase, CorpusExampleUnifier, path_attribute_field
__all__ = ['WordInContext']
@dataclass(frozen=True)
class WordInContext(CorpusBase):
path: str = path_attribute_field('word_in_context_path')
@staticmethod
def sentence_and_keyword_index(sentence, keyword, character_index):
keyword_index = None
words = list()
for w_index, (c_index, word) in enumerate(split_with_indices(sentence)):
if c_index + len(word) > character_index >= c_index:
keyword_index = w_index
words.append(word)
if keyword_index is None:
raise ValueError('Unable to match keyword index')
return words, keyword_index
@staticmethod
def _read_examples(path, example_manager: CorpusExampleUnifier, labels):
examples = list()
with open(path, 'rt') as f:
for line in f:
fields = json.loads(line.strip('\n'))
words_1, keyword_1 = WordInContext.sentence_and_keyword_index(
fields['sentence1'], fields['word'], fields['start1'])
words_2, keyword_2 = WordInContext.sentence_and_keyword_index(
fields['sentence2'], fields['word'], fields['start2'])
label = fields['label'] if 'label' in fields else 1
data_ids = -1 * np.ones(len(words_1) + len(words_2), dtype=np.int64)
data_ids[keyword_1] = len(labels)
data_ids[keyword_2] = len(labels)
examples.append(example_manager.add_example(
example_key=None,
words=words_1 + words_2,
sentence_ids=[0] * len(words_1) + [1] * len(words_2),
data_key='wic',
data_ids=data_ids,
start=0,
stop=len(words_1),
start_sequence_2=len(words_1),
stop_sequence_2=len(words_1) + len(words_2)))
labels.append(label)
return examples
@classmethod
def response_key(cls) -> str:
return 'wic'
@classmethod
def num_classes(cls) -> int:
return 2
def _load(self, example_manager: CorpusExampleUnifier, use_meta_train: bool):
labels = list()
train = WordInContext._read_examples(
os.path.join(self.path, 'train.jsonl'), example_manager, labels)
meta_train = None
if use_meta_train:
from sklearn.model_selection import train_test_split
idx_train, idx_meta_train = train_test_split(np.arange(len(train)), test_size=0.2)
meta_train = [train[i] for i in idx_meta_train]
train = [train[i] for i in idx_train]
validation = WordInContext._read_examples(
os.path.join(self.path, 'val.jsonl'), example_manager, labels)
test = WordInContext._read_examples(
os.path.join(self.path, 'test.jsonl'), example_manager, labels)
labels = np.array(labels, dtype=np.float64)
labels.setflags(write=False)
return RawData(
input_examples=train,
validation_input_examples=validation,
test_input_examples=test,
meta_train_input_examples=meta_train,
response_data={type(self).response_key(): KindData(ResponseKind.generic, labels)},
is_pre_split=True)
|
[
"daniel.robert.schwartz@gmail.com"
] |
daniel.robert.schwartz@gmail.com
|
2410138a68e12d0198596a040e18476ee91d7569
|
dee143986a25fd602b67aadf82e15d2f7b18f85b
|
/perfect_stranger/game/pages.py
|
7235807b1ada5f95df35ec3eb9200b9fd3776b30
|
[
"MIT"
] |
permissive
|
cesslab/otree-perfect-stranger-matching
|
ea3bace81d2cd810a3197c67648ed6584839bfd9
|
0f5a4fc2beac0176d86f622a23e07511026f77cc
|
refs/heads/master
| 2022-12-12T17:55:44.895739
| 2020-02-08T14:50:46
| 2020-02-08T14:50:46
| 239,089,905
| 0
| 0
|
NOASSERTION
| 2022-12-08T03:35:36
| 2020-02-08T07:55:53
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class MyPage(Page):
def vars_for_template(self):
subsessions = self.session.get_subsessions()
matrices = [subsession.get_group_matrix() for subsession in subsessions]
return {
'other_player': self.player.get_others_in_group()[0],
'matrix': self.subsession.get_group_matrix(),
'group_matrices': matrices
}
# class ResultsWaitPage(WaitPage):
# def after_all_players_arrive(self):
# pass
# class Results(Page):
# pass
page_sequence = [MyPage]
|
[
"anwarruff@gmail.com"
] |
anwarruff@gmail.com
|
7ed601dcf4757b3e86143ca0ec316307eb2303e2
|
a50487ba59c7ce04854f9004b9752a32823b7b2a
|
/src/server.py
|
347e07bc1763ec6c906308c9d56625708ab15516
|
[] |
no_license
|
shonenada-archives/sqlite-sync
|
909062646b01f80cf621b5527f168049e9012e76
|
f1379939893cebfffae701904ef12d6b4e4e18ea
|
refs/heads/master
| 2021-01-01T05:13:56.854536
| 2016-06-04T17:53:22
| 2016-06-04T17:53:22
| 59,564,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import socket
import base64
import sqlite3
HOST = '0.0.0.0'
PORT = 23333
MAX_CONNECTIONS = 1
SEGMENT_SZIE = 1024
DB_PATH = './dbs/sync.db'
db = sqlite3.connect(DB_PATH)
cursor = db.cursor()
def invalid_command(params):
return 'Invalid command'
def ping_command(params):
return 'Pong'
def last_command(params):
cursor.execute('SELECT id FROM images ORDER BY ID DESC LIMIT 1')
rs = cursor.fetchone()
if rs:
return str(rs[0])
else:
return None
def sync_command(params):
id_ = params
cursor.execute('SELECT id, data FROM images WHERE id > ? ORDER BY ID LIMIT 1', (id_,))
data = cursor.fetchone()
img = base64.b64encode(data[1])
packet = '{} {}'.format(data[0], img)
if data is None:
return None
return packet
def shutdown(params):
raise IOError()
class Server(object):
commands = {
'PING': ping_command,
'LAST': last_command,
'SYNC': sync_command,
'SHUTDOWN': shutdown,
}
def __init__(self, host, port):
self.host = host
self.port = port
self.server = None
def run(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host, self.port))
self.server.listen(MAX_CONNECTIONS)
print 'listen %s:%s' % (self.host, self.port)
while True:
connection, address = self.server.accept()
print 'Connected from %s' % str(address)
while True:
msg = connection.recv(SEGMENT_SZIE)
if msg is not None:
split_msg = msg.split(' ', 1)
if len(split_msg) > 1:
command, params = split_msg
else:
command = split_msg[0]
params = None
# print command
if command == 'CLOSE':
break
command_handler = self.commands.get(command, invalid_command)
result = command_handler(params)
if result is not None:
connection.send(result + '\r\n\r\n')
connection.close()
def main():
if len(sys.argv) == 1:
host, port = HOST, PORT
elif len(sys.argv) == 2:
host = sys.argv[1]
port = PORT
elif len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
server = Server(host, port)
server.run()
if __name__ == '__main__':
main()
|
[
"shonenada@gmail.com"
] |
shonenada@gmail.com
|
d472390654a9ff3499afe1db639ef2ad8891fa67
|
e811a08b8b653da94e516ca147ec49b534f74a62
|
/test/Test_unittest.py
|
770f9e14cf79477dded396b3d430e617b5f53bf2
|
[] |
no_license
|
HoYaStudy/Python_Study
|
0feb4a9ba7e68ebea6b2db15b20a3680f979a4de
|
59c2cc093ae8ae87c8e07365cc432d87ded29ccc
|
refs/heads/master
| 2023-02-07T23:40:16.135565
| 2023-01-24T06:17:58
| 2023-01-24T06:17:58
| 200,445,372
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,493
|
py
|
# Import Module --------------------------------------------------------------#
import unittest
# Class Definition to Test ---------------------------------------------------#
class TestClass1:
pass
class TestClass2:
pass
# Test Suite Class Definition ------------------------------------------------#
class TestSuite(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEqual(self):
"""
actual == expected
"""
actual = 1
expected = 1
self.assertEqual(actual, expected)
def testNotEqual(self):
"""
actual != expected
"""
actual = 1
expected = 2
self.assertNotEqual(actual, expected)
def testTrue(self):
"""
bool(value) is True
"""
value = True
self.assertTrue(value)
def testFalse(self):
"""
bool(value) is False
"""
value = False
self.assertFalse(value)
def testIs(self):
"""
value1 is value2
"""
value1 = TestClass1()
value2 = value1
self.assertIs(value1, value2)
def testIsNot(self):
"""
value1 is not value2
"""
value1 = TestClass1()
value2 = TestClass2()
self.assertIsNot(value1, value2)
def testIsNone(self):
"""
value is None
"""
value = None
self.assertIsNone(value)
def testIsNotNone(self):
"""
value is not None
"""
value = "test"
self.assertIsNotNone(value)
def testIn(self):
"""
value1 in value2
"""
value1 = 1
value2 = range(6)
self.assertIn(value1, value2)
def testNotIn(self):
"""
value1 not in value2
"""
value1 = 7
value2 = range(6)
self.assertNotIn(value1, value2)
def testIsInstance(self):
"""
isinstance(value1, value2)
"""
value1 = TestClass1()
value2 = TestClass1
self.assertIsInstance(value1, value2)
def testNotIsInstance(self):
"""
not isinstance(value1, value2)
"""
value1 = TestClass1()
value2 = TestClass2
self.assertNotIsInstance(value1, value2)
def testAlmostEqual(self):
"""
round(value1 - value2, 7) == 0
"""
value1 = 1.23456789
value2 = 1.23456788
self.assertAlmostEqual(value1, value2)
def testNotAlmostEqual(self):
"""
round(value1 - value2, 7) != 0
"""
value1 = 3.14
value2 = 3.15
self.assertNotAlmostEqual(value1, value2)
def testGreater(self):
"""
value1 > value2
"""
value1 = 5
value2 = 3
self.assertGreater(value1, value2)
def testGreaterEqual(self):
"""
value1 >= value2
"""
value1 = 5
value2 = 3
self.assertGreaterEqual(value1, value2)
def testLess(self):
"""
value1 < value2
"""
value1 = 2
value2 = 4
self.assertLess(value1, value2)
def testLessEqual(self):
"""
value1 <= value2
"""
value1 = 2
value2 = 4
self.assertLessEqual(value1, value2)
def testRegex(self):
"""
value2.search(value1)
"""
value1 = "test"
value2 = "e"
self.assertRegex(value1, value2)
def testNotRegex(self):
"""
not value2.search(value1)
"""
value1 = "test"
value2 = "a"
self.assertNotRegex(value1, value2)
def testCountEqual(self):
"""
value1 and value2 have the same elements in the same number,
regardless of their order.
"""
value1 = "abcde"
value2 = "ecbda"
self.assertCountEqual(value1, value2)
def testMultiLineEqual(self):
str1 = "T\
E\
S\
T"
str2 = "T\
E\
S\
T"
self.assertMultiLineEqual(str1, str2)
def testSuquenceEqual(self):
seq1 = range(6)
seq2 = range(6)
self.assertSequenceEqual(seq1, seq2)
def testListEqual(self):
list1 = [1, 2, 3]
list2 = [1, 2, 3]
self.assertListEqual(list1, list2)
def testTupleEqual(self):
tuple1 = (1, 2, 3)
tuple2 = (1, 2, 3)
self.assertTupleEqual(tuple1, tuple2)
def testSetEqual(self):
set1 = set([1, 2, 3])
set2 = set([3, 2, 1])
self.assertSetEqual(set1, set2)
def testDictEqual(self):
dict1 = {"key1": "value1", "key2": "value2"}
dict2 = {"key2": "value2", "key1": "value1"}
self.assertDictEqual(dict1, dict2)
def testAdd(self):
params = ((3, {"a": 1, "b": 2}), (5, {"a": 2, "b": 3}), (7, {"a": 3, "b": 4}))
for expected, param in params:
with self.subTest(**param):
actual = param["a"] + param["b"]
self.assertEqual(actual, expected)
@unittest.skip("This test will be skipped")
def testSkip(self):
pass
@unittest.skipIf(2 > 1, "This test will be skipped")
def testSkipIf(self):
pass
# Main -----------------------------------------------------------------------#
if __name__ == "__main__":
unittest.main()
|
[
"hoya128@gmail.com"
] |
hoya128@gmail.com
|
b9c7959458aceb44a3dafe8e626e9ad91f88343a
|
02800e659f2088550a402d7d7d8e3902560893e3
|
/merf/internal/configs.py
|
783fbcf0eb66cf46b5722afa9c8dbd741cc86fd4
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
antonpolishko/google-research
|
ab8a445d5401185eadfe757e73dc8bcf34da8dea
|
1b4e7db5f90bcb4f80803383a81d8613ebfdfeec
|
refs/heads/master
| 2023-08-31T06:38:33.963505
| 2023-08-26T16:33:48
| 2023-08-26T16:37:57
| 422,090,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,732
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for handling configurations."""
# pylint: disable=g-importing-member
import dataclasses
from dataclasses import field
from os import path
from typing import Any, Optional, Tuple
from absl import flags
from flax.core import FrozenDict
import gin
from internal import schedule
from internal import utils
import jax
import jax.numpy as jnp
configurables = {
'jnp': [
jnp.reciprocal,
jnp.log,
jnp.log1p,
jnp.exp,
jnp.sqrt,
jnp.square,
jnp.sum,
jnp.mean,
],
'jax.nn': [jax.nn.relu, jax.nn.softplus, jax.nn.silu],
'jax.nn.initializers.he_normal': [jax.nn.initializers.he_normal()],
'jax.nn.initializers.he_uniform': [jax.nn.initializers.he_uniform()],
'jax.nn.initializers.glorot_normal': [jax.nn.initializers.glorot_normal()],
'jax.nn.initializers.glorot_uniform': [
jax.nn.initializers.glorot_uniform()
],
}
for module, configurables in configurables.items():
for configurable in configurables:
gin.config.external_configurable(configurable, module=module)
@gin.configurable()
@dataclasses.dataclass
class Config:
"""Configuration flags for everything."""
# Paths.
checkpoint_dir: Optional[str] = None # Where to log checkpoints.
data_dir: Optional[str] = None # Input data directory.
# Representation.
triplane_resolution: int = 2048 # Planes will have dimensions (T, T) where
# T = triplane_resolution.
sparse_grid_resolution: int = 512 # Voxel grid will have dimensions (S, S, S)
# where S = sparse_grid_resolution.
num_samples_per_voxel: int = 1 # Only affects rendering from the baked
# representation.
data_block_size: int = 8 # Block size for the block-sparse 3D grid
# (see SNeRG).
range_features: Tuple[float, float] = field(
default_factory=lambda: (-7.0, 7.0)
) # Value range for appearance features.
range_density: Tuple[float, float] = field(
default_factory=lambda: (-14.0, 14.0)
) # Value range for density features.
# Control flow.
max_steps: int = 25000 # Number of optimization steps.
batch_size: int = 65536 # The number of rays/pixels in each batch.
render_chunk_size: int = 65536 # Chunk size for whole-image renderings.
checkpoint_every: int = 5000 # Steps to save a checkpoint.
print_every: int = 100 # Steps between printing losses.
train_render_every: int = 500 # Steps between validation renders
cast_rays_in_train_step: bool = True # If True, compute rays in train step.
gradient_accumulation_steps: int = 8 # Increase this value when running OOM.
stop_after_training: bool = False
stop_after_testing: bool = False
stop_after_compute_alive_voxels: bool = False
render_train_set: bool = False
model_seed: int = 6550634 # This seed is used to initalize model parameters.
# Loss weights.
data_loss_mult: float = 1.0 # Mult for the finest data term in the loss.
charb_padding: float = 0.001 # The padding used for Charbonnier loss.
interlevel_loss_mult: float = 1.0 # Mult. for the loss on the proposal MLP.
distortion_loss_mult: float = 0.01 # Multiplier on the distortion loss.
yu_sparsity_loss_mult: Optional[schedule.Schedule] = schedule.ConstSchedule(
0.01
) # Multiplier for sparsity loss.
num_random_samples: int = 2**17 # For sparsity loss
alpha_threshold: Optional[schedule.Schedule] = schedule.LogLerpSchedule(
start=10000, end=20000, v0=0.0005, v1=0.005, zero_before_start=True
) # Multiplier for alpha-culling-simulation loss.
param_regularizers: FrozenDict[str, Any] = FrozenDict({
'DensityAndFeaturesMLP_0/HashEncoding_0': (0.03, jnp.mean, 2, 1),
'PropMLP_0/PropHashEncoding_0': (0.03, jnp.mean, 2, 1),
}) # Fine-grained parameter regularization strength.
# Optimization.
lr_init: float = 1e-2 # The initial learning rate.
lr_final: float = 1e-3 # The final learning rate.
lr_delay_steps: int = 100 # The number of "warmup" learning steps.
lr_delay_mult: float = 0.01 # How much sever the "warmup" should be.
adam_beta1: float = 0.9 # Adam's beta2 hyperparameter.
adam_beta2: float = 0.99 # Adam's beta2 hyperparameter.
adam_eps: float = 1e-15 # Adam's epsilon hyperparameter.
grad_max_norm: float = 0.001 # Gradient clipping magnitude, disabled if == 0.
grad_max_val: float = 0.0 # Gradient clipping value, disabled if == 0.
# Data loading.
dataset_loader: str = 'llff' # The type of dataset loader to use.
batching: str = 'all_images' # Batch composition, [single_image, all_images].
patch_size: int = 1 # Resolution of patches sampled for training batches.
factor: int = 4 # The downsample factor of images, 0 for no downsampling.
# Load images in COLMAP vs alphabetical ordering (affects heldout test set).
load_alphabetical: bool = True
forward_facing: bool = False # Set to True for forward-facing LLFF captures.
llffhold: int = 8 # Use every Nth image for the test set. Used only by LLFF.
# If true, use all input images for training.
llff_load_from_poses_bounds: bool = False # If True, load camera poses of
# LLFF data from poses_bounds.npy.
llff_use_all_images_for_training: bool = False
use_tiffs: bool = False # If True, use 32-bit TIFFs. Used only by Blender.
randomized: bool = True # Use randomized stratified sampling.
near: float = 0.2 # Near plane distance.
far: float = 1e6 # Far plane distance.
vocab_tree_path: Optional[str] = None # Path to vocab tree for COLMAP.
def define_common_flags():
flags.DEFINE_multi_string('gin_bindings', None, 'Gin parameter bindings.')
flags.DEFINE_multi_string('gin_configs', None, 'Gin config files.')
def load_config(save_config=True):
"""Load the config, and optionally checkpoint it."""
gin.parse_config_files_and_bindings(
flags.FLAGS.gin_configs, flags.FLAGS.gin_bindings, skip_unknown=True
)
config = Config()
if save_config and jax.host_id() == 0:
utils.makedirs(config.checkpoint_dir)
with utils.open_file(
path.join(config.checkpoint_dir, 'config.gin'), 'w'
) as f:
f.write(gin.config_str())
return config
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
75c37893e9690925f4f47ba04135171f0874ad45
|
5cda8f2070c83341fc7218946213e11788ec7543
|
/reg_task/red_winedata.py
|
e387c7214cae7f6d523b5ff85e0d4aa5f47d7b54
|
[] |
no_license
|
masknugget/somethings
|
9b833abb2ee4df27118177f3d8c1523916fe957a
|
a5c0ceaf5e98c68715ce2b2bad89256b76c2118d
|
refs/heads/master
| 2020-09-14T17:18:56.128901
| 2020-03-21T08:29:30
| 2020-03-21T08:29:30
| 223,197,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,692
|
py
|
import numpy as np, pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, \
Ridge, Lasso, ElasticNet, SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt, seaborn as sns
def get_scores(model, Xtest, ytest):
y_pred = model.predict(Xtest)
return np.sqrt(mean_squared_error(ytest, y_pred)), \
model.__class__.__name__
if __name__ == "__main__":
br = '\n'
d = dict()
X = np.load('data/X_red.npy')
y = np.load('data/y_red.npy')
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
print('rmse (unscaled):')
rfr = RandomForestRegressor(random_state=0, n_estimators=100)
rfr.fit(X_train, y_train)
rmse, rfr_name = get_scores(rfr, X_test, y_test)
d['rfr'] = [rmse]
print(rmse, '(' + rfr_name + ')')
lr = LinearRegression().fit(X_train, y_train)
rmse, lr_name = get_scores(lr, X_test, y_test)
d['lr'] = [rmse]
print(rmse, '(' + lr_name + ')')
ridge = Ridge(random_state=0).fit(X_train, y_train)
rmse, ridge_name = get_scores(ridge, X_test, y_test)
d['ridge'] = [rmse]
print(rmse, '(' + ridge_name + ')')
lasso = Lasso(random_state=0).fit(X_train, y_train)
rmse, lasso_name = get_scores(lasso, X_test, y_test)
d['lasso'] = [rmse]
print(rmse, '(' + lasso_name + ')')
en = ElasticNet(random_state=0).fit(X_train, y_train)
rmse, en_name = get_scores(en, X_test, y_test)
d['en'] = [rmse]
print(rmse, '(' + en_name + ')')
sgdr = SGDRegressor(random_state=0, max_iter=1000, tol=0.001)
sgdr.fit(X_train, y_train)
rmse, sgdr_name = get_scores(sgdr, X_test, y_test)
print(rmse, '(' + sgdr_name + ')', br)
scaler = StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_test_std = scaler.fit_transform(X_test)
print('rmse scaled:')
lr_std = LinearRegression().fit(X_train_std, y_train)
rmse, lr_std_name = get_scores(lr_std, X_test_std, y_test)
print(rmse, '(' + lr_std_name + ')')
rr_std = Ridge(random_state=0).fit(X_train_std, y_train)
rmse, rr_std_name = get_scores(rr_std, X_test_std, y_test)
print(rmse, '(' + rr_std_name + ')')
lasso_std = Lasso(random_state=0).fit(X_train_std, y_train)
rmse, lasso_std_name = get_scores(lasso_std, X_test_std, y_test)
print(rmse, '(' + lasso_std_name + ')')
en_std = ElasticNet(random_state=0).fit(X_train_std, y_train)
rmse, en_std_name = get_scores(en_std, X_test_std, y_test)
print(rmse, '(' + en_std_name + ')')
sgdr_std = SGDRegressor(random_state=0, max_iter=1000, tol=0.001)
sgdr_std.fit(X_train_std, y_train)
rmse, sgdr_std_name = get_scores(sgdr_std, X_test_std, y_test)
d['sgdr_std'] = [rmse]
print(rmse, '(' + sgdr_std_name + ')', br)
pipe = Pipeline([('poly', PolynomialFeatures(degree=2)),
('linear', LinearRegression())])
model = pipe.fit(X_train, y_train)
rmse, poly_name = get_scores(model, X_test, y_test)
d['poly'] = [rmse]
print(PolynomialFeatures().__class__.__name__, '(rmse):')
print(rmse, '(' + poly_name + ')')
algo, rmse = [], []
for key, value in d.items():
algo.append(key)
rmse.append(value[0])
plt.figure('RMSE')
sns.set(style="whitegrid")
ax = sns.barplot(algo, rmse)
plt.title('Red Wine Algorithm Comparison')
plt.xlabel('regressor')
plt.ylabel('RMSE')
plt.show()
|
[
"946883098@qq.com"
] |
946883098@qq.com
|
a48484aee293a6b54e8aa54af7b99820b5111cd8
|
9cf6a19289e9335f32f1081832dff33e9f1fdc86
|
/examples/flask_ext/flask_ext.py
|
5f8b0aa4267df72ca84477842773615dfba3c469
|
[
"MIT"
] |
permissive
|
SmartManoj/quart
|
3f25e7c27d29d930139bea1d34d375f476c897ac
|
317562ea660edb7159efc20fa57b95223d408ea0
|
refs/heads/master
| 2020-06-06T10:25:45.512773
| 2019-06-09T20:11:02
| 2019-06-09T20:11:02
| 192,714,053
| 1
| 0
|
MIT
| 2019-06-19T10:51:28
| 2019-06-19T10:51:27
| null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
import quart.flask_patch
from secrets import compare_digest
import flask_login
from quart import Quart, redirect, request, url_for
app = Quart(__name__)
app.secret_key = 'secret' # Create an actual secret key for production
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# Rather than storing passwords in plaintext, use something like
# bcrypt or similar to store the password hash.
users = {'quart': {'password': 'secret'}}
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(username):
if username not in users:
return
user = User()
user.id = username
return user
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
password = request.form.get('password', '')
if username not in users:
return
user = User()
user.id = username
user.is_authenticated = compare_digest(password, users[username]['password'])
return user
@app.route('/', methods=['GET', 'POST'])
async def login():
if request.method == 'GET':
return '''
<form method='POST'>
<input type='text' name='username' id='username' placeholder='username'></input>
<input type='password' name='password' id='password' placeholder='password'></input>
<input type='submit' name='submit'></input>
</form>
'''
username = (await request.form)['username']
password = (await request.form)['password']
if username in users and compare_digest(password, users[username]['password']):
user = User()
user.id = username
flask_login.login_user(user)
return redirect(url_for('protected'))
return 'Bad login'
@app.route('/protected')
@flask_login.login_required
async def protected():
return 'Logged in as: ' + flask_login.current_user.id
@app.route('/logout')
async def logout():
flask_login.logout_user()
return 'Logged out'
@login_manager.unauthorized_handler
def unauthorized_handler():
return 'Unauthorized'
|
[
"philip.graham.jones@googlemail.com"
] |
philip.graham.jones@googlemail.com
|
52cae2c239cddc1e62d8442bee69791c62f20002
|
23e55ab51e322a3c0f967976a84f42f70f8ab701
|
/tensorflow/python/ops/nn_loss_scaling_utilities_test.py
|
427972f5ce13f1401a30ba675a043d9c63486d56
|
[
"Apache-2.0"
] |
permissive
|
thangnvit/tensorflow
|
f58e7c2f95690f337361aa2973f2b84ac7e7f947
|
c83887196eb717af66a7b3f008e970b4a226ff8f
|
refs/heads/master
| 2021-02-21T17:51:56.030461
| 2020-03-06T07:55:33
| 2020-03-06T07:58:38
| 245,362,540
| 3
| 0
|
Apache-2.0
| 2020-03-06T08:05:41
| 2020-03-06T08:05:40
| null |
UTF-8
|
Python
| false
| false
| 7,872
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loss scaling utilities in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test as test_lib
class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
def setUp(self):
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(LossUtilitiesTest, self).setUp()
def testComputeAverageLossGlobalBatchSize(self):
per_example_loss = [1, 2, 3, 4, 5]
loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10)
self.assertEqual(self.evaluate(loss), 1.5)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossDefaultGlobalBatchSize(self, distribution):
# Without strategy - num replicas = 1
per_example_loss = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.compute_average_loss(per_example_loss)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss, args=(per_example_loss,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossSampleWeights(self, distribution):
with distribution.scope():
# Scalar sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3)
# Per example sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": [0.3, 0.5, 0.2]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3)
# Time-step sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([[2., 0.5], [4., 1.]],),
kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
def testComputeAverageLossInvalidSampleWeights(self):
with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
"Dimensions must be equal")):
nn_impl.compute_average_loss([2.5, 6.2, 5.],
sample_weight=[0.2, 0.8],
global_batch_size=10)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossDtype(self, distribution):
with distribution.scope():
per_example_loss = constant_op.constant([2., 4., 6.],
dtype=dtypes.float64)
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=(per_example_loss,),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertEqual(loss.dtype, dtypes.float64)
def testComputeAverageLossInvalidRank(self):
per_example_loss = constant_op.constant(2)
# Static rank
with self.assertRaisesRegex(
ValueError, "Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1,"):
nn_impl.compute_average_loss(per_example_loss)
with context.graph_mode():
# Dynamic rank
per_example_loss = array_ops.placeholder(dtype=dtypes.float32)
loss = nn_impl.compute_average_loss(per_example_loss)
with self.cached_session() as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
sess.run(loss, {per_example_loss: 2})
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError,
"You are calling `compute_average_loss` in cross replica context"):
nn_impl.compute_average_loss([2, 3])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLoss(self, distribution):
# Without strategy - num replicas = 1
reg_losses = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.scale_regularization_loss(reg_losses)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.scale_regularization_loss, args=(reg_losses,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "You are calling `scale_regularization_loss` in "
"cross replica context"):
nn_impl.scale_regularization_loss([2, 3])
if __name__ == "__main__":
test_lib.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
bd5e34f3398b5facd631a6575e61d6dee48981a9
|
b873ea1def0810f67834bf4926901b9a8fead362
|
/exam_preparation_10_21/problem_1 - taxi_express.py
|
214d77abc7ad1bb307fe421628f0f89d08382383
|
[] |
no_license
|
NikiDimov/SoftUni-Python-Advanced
|
20f822614fa0fa7de6ded3956fa8d40d589a4a86
|
d6c1fe886a3c27c82f03e5e4a6c670f0905d54e6
|
refs/heads/main
| 2023-08-23T17:42:32.063057
| 2021-10-25T10:32:03
| 2021-10-25T10:32:03
| 328,750,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from collections import deque
customers = deque(int(el) for el in input().split(', '))
taxis = [int(el) for el in input().split(', ')]
total_time = 0
while customers and taxis:
if customers[0] <= taxis[-1]:
total_time += customers.popleft()
taxis.pop()
else:
taxis.pop()
if not customers:
print(f"All customers were driven to their destinations\nTotal time: {total_time} minutes")
if not taxis and customers:
print(f"Not all customers were driven to their destinations\nCustomers left: {', '.join(map(str,customers))}")
|
[
"niki.dimov86@gmail.com"
] |
niki.dimov86@gmail.com
|
b6df72d27ae81c311084682cded4083282df6a16
|
11c39c48a02d25f2dffab7db76649949a0cca5e5
|
/venv/bin/gunicorn_django
|
3793a5d4203c3914062d0b6f343e9a2816ec6fc6
|
[] |
no_license
|
lssdeveloper/djangoecommerce
|
3a1fb8e9208264e143142b112f7ed93fe3654dfe
|
f93b23dad7c4753cad23cb87f329226aacf1a2f6
|
refs/heads/main
| 2023-01-03T02:48:52.010251
| 2020-11-05T01:17:00
| 2020-11-05T01:17:00
| 310,119,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/home/leandro/djangoecommerce/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.djangoapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"leandro.serra.10@gmail.com"
] |
leandro.serra.10@gmail.com
|
|
8a509f4b1924614dc3b87e2b87e2cb1716aa792c
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_6208.py
|
93485a216bd02c33878868b948176cbbe6a5050b
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,333
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((556, 471, 306), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((897, 721, 126), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((695, 488, 509), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((760, 54, 917), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((737, 131, 568), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((10, 544, 10), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((919, 877, 152), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((547, 784, 262), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((851, 120, 466), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((949, 188, 84), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((153, 179, 743), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((742, 895, 140), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((664, 562, 878), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((727, 688, 584), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((179, 781, 550), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((564, 808, 971), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((141, 248, 291), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((625, 371, 591), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((411, 435, 483), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((544, 429, 684), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((318, 291, 443), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
a58d2d0f98b28216c8228e5c1ea84e9f433c6285
|
45de3aa97525713e3a452c18dcabe61ac9cf0877
|
/src/primaires/combat/commandes/bander/__init__.py
|
df15806a8f441e1a2a4bf8d37a4d5bfa54f3e48e
|
[
"BSD-3-Clause"
] |
permissive
|
stormi/tsunami
|
95a6da188eadea3620c70f7028f32806ee2ec0d1
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
refs/heads/master
| 2020-12-26T04:27:13.578652
| 2015-11-17T21:32:38
| 2015-11-17T21:32:38
| 25,606,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,123
|
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'bander'.
"""
from primaires.interpreteur.commande.commande import Commande
from primaires.objet.conteneur import SurPoids
class CmdBander(Commande):
"""Commande 'bander'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "charger", "bend")
self.nom_categorie = "combat"
self.schema = "<jet:nom_objet> (avec/with <projectile:nom_objet>)"
self.aide_courte = "charge une arme de jet"
self.aide_longue = \
"Cette commande permet de charger une arme de jet que " \
"vous équipez. Elle prend en paramètre obligatoire le " \
"nom de l'arme. Si rien n'est précisé ensuite, le système " \
"cherchera le bon projectile dans vos conteneurs équipés " \
"et le placera automatiquement sur l'arme de jet. Sinon, " \
"vous pouvez préciser après le nom de l'arme de jet le " \
"mot-clé |cmd|avec|ff| (ou |cmd|with|ff| en anglais) suivi " \
"du nom du projectile. Vous devez dans tous les cas " \
"posséder le projectile indiqué."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
arme_de_jet = self.noeud.get_masque("jet")
arme_de_jet.proprietes["conteneurs"] = \
"(personnage.equipement.equipes, )"
projectile = self.noeud.get_masque("projectile")
projectile.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
projectile.proprietes["quantite"] = "True"
projectile.proprietes["conteneur"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
personnage.agir("charger")
arme_de_jet = dic_masques["jet"].objet
if not arme_de_jet.est_de_type("arme de jet"):
personnage << "|err|Ceci n'est pas une arme de jet.|ff|"
return
if dic_masques["projectile"]:
projectiles = list(dic_masques["projectile"].objets_qtt_conteneurs)
projectile, qtt, conteneur = projectiles[0]
if not projectile.est_de_type("projectile"):
personnage << "|err|Ceci n'est pas un projectile.|ff|"
return
else:
projectile = conteneur = None
for objet in personnage.equipement.inventaire:
if objet.est_de_type("projectile") and objet.cle in \
arme_de_jet.projectiles_autorises:
projectile = objet
conteneur = objet.contenu
break
if projectile is None or conteneur is None:
personnage << "|err|Aucun projectile pour cette arme " \
"de jet ne peut être trouvé sur vous.|ff|"
return
if projectile.cle not in arme_de_jet.projectiles_autorises:
personnage << "|err|Vous ne pouvez utiliser {} avec " \
"{}.|ff|".format(arme_de_jet.get_nom(),
projectile.get_nom())
personnage << "Vous commencez à recharger {}.".format(
arme_de_jet.get_nom())
personnage.etats.ajouter("charger")
yield 1
if "charger" not in personnage.etats:
return
personnage.etats.retirer("charger")
# Si l'arme de jet est déjà chargée
if arme_de_jet.projectile:
ancien_projectile = arme_de_jet.projectile
try:
personnage.ramasser(objet=ancien_projectile)
except SurPoids:
personnage.salle.objets_sol.ajouter(objet=ancien_projectile)
personnage << "{} glisse à terre.".format(
ancien_projectile.get_nom().capitalize())
else:
personnage << "Vous récupérez {}.".format(
ancien_projectile.get_nom())
arme_de_jet.projectile = None
conteneur.retirer(projectile)
arme_de_jet.script["charge"].executer(personnage=personnage,
arme=arme_de_jet, projectile=projectile)
arme_de_jet.projectile = projectile
personnage << "Vous bandez {} avec {}.".format(
arme_de_jet.get_nom(), projectile.get_nom())
personnage.salle.envoyer("{{}} bande {} avec {}.".format(
arme_de_jet.get_nom(), projectile.get_nom()), personnage)
|
[
"kredh@free.fr"
] |
kredh@free.fr
|
712ffc2d8089cd9cdf5177d269fd592d0b46f7db
|
50658c41dd6de3330d3795b116c0e0e1b38a41d4
|
/lib/daal/storages/__init__.py
|
bcaea9583775754402938acf7f54233f020ca15e
|
[] |
no_license
|
nolar/shortener
|
c01223f4d24f794cd5df3eb76a4beca81419c03a
|
05da766aeef7cac4df7a172aefd1d37d360083ac
|
refs/heads/master
| 2021-01-25T05:27:48.011877
| 2011-08-29T18:24:48
| 2011-08-29T18:24:48
| 2,200,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
# coding: utf-8
from ._base import Storage, StorageID
from ._base import StorageExpectationError, StorageItemAbsentError, StorageUniquenessError
from .wrapped import WrappedStorage
from .sdb import SDBStorage
from .mysql import MysqlStorage
|
[
"nolar@nolar.info"
] |
nolar@nolar.info
|
53acc62eb0a66fd52d0aac9c4d02ff50f8eec581
|
574e7b276c83dc3866c0401f51fba38031e1faf1
|
/setup.py
|
386ff893bd50d64a2733f87a81810a0d7977b6bf
|
[
"BSD-3-Clause"
] |
permissive
|
hiclib/iced
|
251805e411a4126a07f186c67b378d4e03320f16
|
53d7a936d841dba6ae0f8fe8d168e7f4553a62eb
|
refs/heads/master
| 2023-01-27T23:34:12.999341
| 2023-01-20T14:15:16
| 2023-01-20T14:15:16
| 29,135,229
| 30
| 38
|
NOASSERTION
| 2023-01-20T14:15:18
| 2015-01-12T12:50:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
import os
import sys
DISTNAME = 'iced'
DESCRIPTION = 'ICE normalization'
MAINTAINER = 'Nelle Varoquaux'
MAINTAINER_EMAIL = 'nelle.varoquaux@gmail.com'
VERSION = "0.6.0a0.dev0"
SCIPY_MIN_VERSION = '0.19.0'
NUMPY_MIN_VERSION = '1.16.0'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('iced')
return config
def setup_package():
metadata = dict(
configuration=configuration,
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
version=VERSION,
scripts=['iced/scripts/ice'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
[
"nelle.varoquaux@gmail.com"
] |
nelle.varoquaux@gmail.com
|
8060013fa68577e23d342f0850936dfbffc7c906
|
5bfa6d39ca5999f24d5c054cf26d4112156d6842
|
/Practice/Numpy/Polynomials.py
|
e95bad632063bb8b47132979c10f83ea795d01de
|
[] |
no_license
|
CompetitiveCode/hackerrank-python
|
3ad7f70f3d09149242b2ab6b27d0e4ec2a188837
|
898e6bf791791cbdeca9192c78c623a115b4c97b
|
refs/heads/master
| 2022-02-03T23:14:57.866923
| 2019-05-30T11:34:01
| 2019-05-30T11:34:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
#Answer to Polynomials
import numpy
a=list(map(float,input().split()))
x=int(input())
print(numpy.polyval(a,x))
"""
poly
The poly tool returns the coefficients of a polynomial with the given sequence of roots.
print numpy.poly([-1, 1, 1, 10]) #Output : [ 1 -11 9 11 -10]
roots
The roots tool returns the roots of a polynomial with the given coefficients.
print numpy.roots([1, 0, -1]) #Output : [-1. 1.]
polyint
The polyint tool returns an antiderivative (indefinite integral) of a polynomial.
print numpy.polyint([1, 1, 1]) #Output : [ 0.33333333 0.5 1. 0. ]
polyder
The polyder tool returns the derivative of the specified order of a polynomial.
print numpy.polyder([1, 1, 1, 1]) #Output : [3 2 1]
polyval
The polyval tool evaluates the polynomial at specific value.
print numpy.polyval([1, -2, 0, 2], 4) #Output : 34
polyfit
The polyfit tool fits a polynomial of a specified order to a set of data using a least-squares approach.
print numpy.polyfit([0,1,-1, 2, -2], [0,1,1, 4, 4], 2)
#Output : [ 1.00000000e+00 0.00000000e+00 -3.97205465e-16]
The functions polyadd, polysub, polymul, and polydiv also handle proper addition, subtraction, multiplication, and division of polynomial coefficients, respectively.
"""
|
[
"admin@remedcu.com"
] |
admin@remedcu.com
|
4abe6e32b5d50900b94925c86cfa31cd58e03efc
|
09e8c92187ff8d7a726727041e2dd80850dcce3d
|
/interview_cake/CAKE_203_find_rotation_point.py
|
3353430fa0d222707af18761f0fd88a2d67630b8
|
[] |
no_license
|
kakru/puzzles
|
6dd72bd0585f526e75d026f3ba2446b0c14f60e0
|
b91bdf0e68605f7e517446f8a00b1e0f1897c24d
|
refs/heads/master
| 2020-04-09T09:47:31.341475
| 2019-05-03T21:24:41
| 2019-05-03T21:24:41
| 160,246,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,210
|
py
|
import unittest
# class Solution: # naive solution with O(n) time
# def findRotationPoint(self, words):
# """
# :type A: List[List[int]]
# :rtype: List[List[int]]
# """
# length = len(words)
# if length == 0:
# return None
# elif length == 1:
# return 0
# index = 1
# prev = now = words[0]
# while index < length:
# prev, now = now, words[index]
# if prev > now:
# return index
# index += 1
class Solution: # with binary search O(logn)
def findRotationPoint(self, words):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
length = len(words)
left, right = 0, length-1
while True:
middle = (left + right)//2
if words[left] < words[middle] > words[right]:
left = middle
elif words[left] > words[middle] < words[right]:
right = middle
else:
break
if right-left == length-1: # middle index never moved
return 0
else:
return middle+1
class BasicTest(unittest.TestCase):
def test_not_rotated(self):
input_ = [
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
]
expected_output = 0
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_1(self):
input_ = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 5
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = [
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 4
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'zzzzzz',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 6
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"karol@kruzelecki.com"
] |
karol@kruzelecki.com
|
a334b1604a06a8cca9a7a309ca067b58b98f81c6
|
b637e53b36ad083575b161eaa8371f0cc11981a2
|
/apps/Articulos_Cientificos/models.py
|
a096dbfd20585fd9a513898401accf4ae8f69bc2
|
[] |
no_license
|
cienciometrico2017/cienciometrico2018v2.0
|
d7d014f858296aa262649696a4d3bfceb0b9afec
|
22e8800c921e8c4890c4f52c9826532364a99a68
|
refs/heads/master
| 2020-03-20T22:04:26.710351
| 2018-07-26T04:28:26
| 2018-07-26T04:28:26
| 137,777,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from apps.Linea_Investigacion.models import linea_investigacion
from apps.Sub_Lin_Investigacion.models import sub_lin_investigacion
from apps.baseDatos.models import baseDatos
from apps.Revista.models import revista
from apps.palabraClave.models import palabraClave
from apps.ciudad.models import ciudad
from apps.pais.models import pais
from django.contrib.auth.models import User
# Create your models here.
class articulos_cientificos(models.Model):
Estado = (
('Receptado', 'Receptado'),
('En revisión', 'En revisión'),
('Aceptado', 'Aceptado'),
('Publicado', 'Publicado'),
)
Tipo = (
('Científico', 'Científico'),
('De revisión', 'De revisión'),
('Ensayo', 'Ensayo'),
('Reflexión', 'Reflexión'),
)
titulo = models.CharField(max_length=300, null=True, blank=True, unique=True)
estado = models.CharField(max_length=30, blank=True, null=True, choices=Estado)
iSSN = models.CharField(max_length=60, blank=True, null=True)
url = models.CharField(max_length=300, blank=True, null=True)
doi = models.CharField(max_length=300, blank=True, null=True)
fechaPublicacion = models.DateField(blank=True, null=True)
pais = models.ForeignKey(pais, blank=True, null=True)
ciudad = models.ForeignKey(ciudad, blank=True, null=True)
baseDatos = models.ManyToManyField(baseDatos, blank=True)
revista = models.ForeignKey(revista, blank=True)
volumen = models.CharField(max_length=150, blank=True, null=True)
numero = models.CharField(max_length=150, blank=True, null=True)
lineaInves = models.ForeignKey(linea_investigacion, max_length=150, blank=True, null=True)
SubLinea = models.ForeignKey(sub_lin_investigacion, max_length=150, blank=True, null=True)
resumen = models.TextField(blank=True, null=True)
palabraClave = models.ManyToManyField(palabraClave, blank=True)
documento = models.FileField(upload_to='articulo/', blank=True, null=True)
tipoArticulo = models.CharField(max_length=150, blank=True, null=True, choices=Tipo)
aprobado = models.CharField(max_length=150, blank=True, null=True)
comiteEditorial = models.CharField(max_length=150, blank=True, null=True)
estPub = models.BooleanField(blank=True)
desEstado = models.TextField(null=True, blank=True)
class Meta:
permissions = (
("ver_articulo", "ver articulo"),
)
|
[
"danilomoya19@gmail.com"
] |
danilomoya19@gmail.com
|
a2febb4a52872577529393b416dd06c38c3770c7
|
bee9379d610c8e7b524646c5f6fe26608bf54fe1
|
/detectron/ops/generate_proposals.py
|
b804c36f0c3664f92a804f9b1d5cfa10abb0d969
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
PacteraOliver/Detectron
|
8e6eeaa5c17506b8fa73a47d3df7c7dbe93a200a
|
b3ad58fe74b64b7594dac7537ce950f5e4f54235
|
refs/heads/master
| 2020-03-19T02:33:24.927162
| 2018-06-12T18:57:02
| 2018-06-12T18:57:02
| 135,638,922
| 3
| 0
|
Apache-2.0
| 2018-06-12T18:14:33
| 2018-05-31T21:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,996
|
py
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
from detectron.core.config import cfg
import detectron.utils.boxes as box_utils
class GenerateProposalsOp(object):
"""Output object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, anchors, spatial_scale, train):
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
# predicted probability of fg object for each RPN anchor
scores = inputs[0].data
# predicted achors transformations
bbox_deltas = inputs[1].data
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = inputs[2].data
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :]
)
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32
)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
def proposals_for_one_image(
self, im_info, all_anchors, bbox_deltas, scores
):
# Get mode-dependent configuration
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(
-scores.squeeze(), pre_nms_topN
)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = box_utils.bbox_transform(
all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = box_utils.clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = _filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = box_utils.nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
return proposals, scores
def _filter_boxes(boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Scale min_size to match image scale
min_size *= im_info[2]
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where(
(ws >= min_size) & (hs >= min_size) &
(x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]
return keep
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
23f2bea2b14225dad77414ae3db666636d85bc98
|
f89d70fc8bf370ef4e2aa54c7ee0de3b4a053624
|
/examples/EC2InstanceSample.py
|
1de96374251ddaabaa2bd2c493498593a6a8dba3
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yks0000/troposphere
|
a7622bff01c31f10dcb296d2ca353144e1d7f793
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
refs/heads/main
| 2022-04-28T03:51:42.770881
| 2022-04-15T15:15:01
| 2022-04-15T15:15:01
| 482,753,190
| 1
| 0
|
BSD-2-Clause
| 2022-04-18T07:20:42
| 2022-04-18T07:20:42
| null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
# Converted from EC2InstanceSample.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
import troposphere.ec2 as ec2
from troposphere import Base64, FindInMap, GetAtt, Output, Parameter, Ref, Template
template = Template()
keyname_param = template.add_parameter(
Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH "
"access to the instance",
Type="String",
)
)
template.add_mapping(
"RegionMap",
{
"us-east-1": {"AMI": "ami-7f418316"},
"us-west-1": {"AMI": "ami-951945d0"},
"us-west-2": {"AMI": "ami-16fd7026"},
"eu-west-1": {"AMI": "ami-24506250"},
"sa-east-1": {"AMI": "ami-3e3be423"},
"ap-southeast-1": {"AMI": "ami-74dda626"},
"ap-northeast-1": {"AMI": "ami-dcfa4edd"},
},
)
ec2_instance = template.add_resource(
ec2.Instance(
"Ec2Instance",
ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
InstanceType="t1.micro",
KeyName=Ref(keyname_param),
SecurityGroups=["default"],
UserData=Base64("80"),
)
)
template.add_output(
[
Output(
"InstanceId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(ec2_instance),
),
Output(
"AZ",
Description="Availability Zone of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "AvailabilityZone"),
),
Output(
"PublicIP",
Description="Public IP address of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PublicIp"),
),
Output(
"PrivateIP",
Description="Private IP address of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PrivateIp"),
),
Output(
"PublicDNS",
Description="Public DNSName of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PublicDnsName"),
),
Output(
"PrivateDNS",
Description="Private DNSName of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PrivateDnsName"),
),
]
)
print(template.to_json())
|
[
"mark@peek.org"
] |
mark@peek.org
|
d5bb51ef5a3cee89328ed10c67a8b8fd3f001861
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/business_word_uml_president_state_father/history.py
|
44a4e3fdcc4a16d61ba54303596d20b908eb412b
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="844380b03ac2e822c304c3ffc5f2bb3d";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'f3714fe8d47433890ba7eaa3d9424e4d': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
adcdc60dec982562dd7453dba24bea1c337f66f5
|
86990e89998b2ac9e2aa3e79df4bd72c07173903
|
/server.py
|
5ff1c63e1b6522d95a7645d3b6e9bdd21616e387
|
[] |
no_license
|
StevenAWillis/counter.py
|
ce0dac1983de177101821cbf11b40a5dbf5c6b58
|
c60873e53e069a693d11383902b5215109cc3d78
|
refs/heads/master
| 2020-06-20T18:46:19.194569
| 2019-07-16T14:43:04
| 2019-07-16T14:43:04
| 197,212,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from flask import Flask, render_template, request, redirect, session
app =Flask(__name__)
app.secret_key = 'keep it secret, keep it safe'
@app.route("/")
def count():
if 'count' not in session:
session['count'] =0
else:
session['count'] +=1
if 'countv' not in session:
session['countv'] =0
else:
session['countv'] +=1
return render_template("index.html", count_visits = session['count'],count_visits2 = session['countv'])
@app.route("/by2")
def count_2():
session['count'] +=1
return redirect("/" )
@app.route("/reset")
def count_reset():
session['count'] = 0
return redirect("/" )
@app.route("/manual_count",methods=['POST'])
def manual_count():
# session['manual_counter'] = request.form['number']
# session['count'] += int(session['manual_counter'])-1
session['count'] += int(request.form['number'])-1
return redirect("/" )
app.run(debug=True)
|
[
"you@example.com"
] |
you@example.com
|
37d399628b1fda5d720d6cfa764e556e40257814
|
dea7bc1fe176d090ffa426ffd982f3f8ddb8afa7
|
/Histogram_Equalization.py
|
48c9fb6dd1c7f0db61f7ddb0227333f5d4228d5f
|
[] |
no_license
|
cool229/Ashish-Kumar
|
f03f99a02d6c96ff94931b9026b079b7f6c8ffad
|
72b40c7916b2447c11a6548fbb0d72a25098a6eb
|
refs/heads/master
| 2020-03-29T20:10:41.810904
| 2019-03-10T09:42:47
| 2019-03-10T09:42:47
| 150,299,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
import cv2
import matplotlib.pyplot as plt
def main():
imgpath = "D:\\COURSES\\OpenCV\\Action\\standard_test_images\\lena_color_256.tif"
img = cv2.imread(imgpath, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
R, G, B = cv2.split(img)
output1_R = cv2.equalizeHist(R)
output1_G = cv2.equalizeHist(G)
output1_B = cv2.equalizeHist(B)
output1 = cv2.merge((output1_R, output1_G, output1_B))
# clahe = cv2.createCLAHE()
clahe = cv2.createCLAHE(clipLimit = 2.0, tileGridSize = (8,8))
output2_R = clahe.apply(R)
output2_G = clahe.apply(G)
output2_B = clahe.apply(B)
output2 = cv2.merge((output2_R, output2_G, output2_B))
outputs = [img, output1, output2]
titles = ['Original Image', 'Adjusted Histogram','CLAHE']
# outputs = [img, box, blur, gaussian]
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(outputs[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
cool229.noreply@github.com
|
b9248fdad90ce6ea4bd2265ba41473172071b84c
|
55bf9e277f3e222c3c0d5fc571f59c2454eca491
|
/scratch/delicatessen/main.py
|
688db894b2960acab3987a54a2596a102b923ee2
|
[
"MIT"
] |
permissive
|
megbedell/delicatessen
|
92dce685dcb808ddfcf8efb49a2d0dd9c1200a18
|
2f7217413c93a6ac76875724a8cc56b570065e4c
|
refs/heads/master
| 2022-12-14T01:51:06.414952
| 2020-09-08T21:23:38
| 2020-09-08T21:23:38
| 293,909,165
| 0
| 0
|
MIT
| 2020-09-08T19:35:19
| 2020-09-08T19:35:18
| null |
UTF-8
|
Python
| false
| false
| 3,123
|
py
|
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
ColumnDataSource,
Div,
Select,
MultiSelect,
Slider,
TextInput,
)
from bokeh.plotting import figure
from bokeh.models.tools import TapTool
from bokeh.models.callbacks import CustomJS
import os
PATH = os.path.abspath(os.path.dirname(__file__))
# Load an example dataset
data = np.loadtxt(
os.path.join(PATH, "data", "TESS-Gaia-mini.csv"), delimiter=",", skiprows=1
)
ra, dec, par, sid, _, _, ticid, tmag, dist = data.T
data = dict(ra=ra, dec=dec, dist=dist, ticid=ticid)
# Things the user can plot (label: parameter name)
axis_map = {"Right Ascension": "ra", "Declination": "dec", "Distance": "dist"}
# Input controls
x_axis = Select(
title="X Axis",
options=sorted(axis_map.keys()),
value="Right Ascension",
name="x_axis",
)
y_axis = Select(
title="Y Axis", options=sorted(axis_map.keys()), value="Declination"
)
s_axis = Select(
title="Marker Size", options=sorted(axis_map.keys()), value="Distance"
)
controls = [s_axis, x_axis, y_axis]
# Primary plot
source1 = ColumnDataSource(data=dict(x=[], y=[], size=[]))
plot1 = figure(
plot_height=600,
plot_width=700,
title="",
tooltips=[("TIC ID", "@ticid")],
tools="tap",
sizing_mode="scale_both",
)
plot1.circle(
x="x", y="y", source=source1, size="size", line_color=None,
)
taptool = plot1.select(type=TapTool)
# Secondary plot
source2 = ColumnDataSource(data=dict(x=[], y=[]))
plot2 = figure(
plot_height=300, plot_width=700, title="", sizing_mode="scale_both",
)
plot2.circle(
x="x", y="y", source=source2, line_color=None, color="black", alpha=0.1
)
# Events
def callback1(attr, old, new):
"""
Triggered when the user changes what we're plotting on the main plot.
"""
# Get the parameters to plot (x axis, y axis, and marker size)
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
s_name = axis_map[s_axis.value]
# Update the labels
plot1.xaxis.axis_label = x_axis.value
plot1.yaxis.axis_label = y_axis.value
# Update the data source
source1.data = dict(
x=data[x_name],
y=data[y_name],
size=data[s_name] / np.min(data[s_name]),
ticid=data["ticid"],
)
def callback2(attr, old, new):
"""
Triggered when the user selects a point on the main plot.
"""
# Get the TIC ID
ticid = source1.data["ticid"][source1.selected.indices[0]]
print("Fetching data for TIC ID {0}".format(ticid))
# TODO: Actually fetch the data from MAST.
# For now just populate with random numbers
source2.data = dict(x=np.arange(10000), y=np.random.randn(10000))
# Register the callbacks
source1.selected.on_change("indices", callback2)
for control in controls:
control.on_change("value", callback1)
# Display things on the page
inputs = column(*controls, width=320)
inputs.sizing_mode = "fixed"
l = column(row(inputs, plot1), plot2)
# Load and display the data
callback1(None, None, None)
# Go!
curdoc().add_root(l)
curdoc().title = "delicatessen"
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
97f925580a92f0e296fd2c6ab77eaf73efcb812a
|
19102b3cc7a78b4f09d5e5eef3f7a93e33d8b988
|
/backend/api/serializers/model_year_report_vehicle.py
|
7d0463c011bac8eb464d731e893cdbd75be67554
|
[
"Apache-2.0"
] |
permissive
|
emi-hi/zeva
|
196b766096d2353b8ba57347b4946dce43c1b0a7
|
b395efe620a1b82c2ecee2004cca358d8407397e
|
refs/heads/master
| 2023-04-16T15:20:29.240394
| 2023-03-21T21:44:08
| 2023-03-21T21:44:08
| 234,123,338
| 0
| 0
|
Apache-2.0
| 2020-01-15T16:27:38
| 2020-01-15T16:27:37
| null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
from rest_framework.serializers import ModelSerializer, \
SlugRelatedField
from api.models.model_year_report_vehicle import ModelYearReportVehicle
from api.serializers.vehicle import VehicleZevTypeSerializer
from api.serializers.vehicle import ModelYearSerializer
from api.serializers.credit_transaction import CreditClassSerializer
from api.models.vehicle_zev_type import ZevType
from api.models.model_year import ModelYear
from api.models.model_year_report import ModelYearReport
from api.models.credit_class import CreditClass
class ModelYearReportVehicleSerializer(ModelSerializer):
zev_class = SlugRelatedField(
slug_field='credit_class',
queryset=CreditClass.objects.all()
)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
vehicle_zev_type = SlugRelatedField(
slug_field='vehicle_zev_code',
queryset=ZevType.objects.all()
)
class Meta:
model = ModelYearReportVehicle
fields = (
'id', 'pending_sales', 'sales_issued', 'make', 'model_name',
'range', 'zev_class', 'model_year', 'vehicle_zev_type', 'update_timestamp',
)
class ModelYearReportVehicleSaveSerializer(ModelSerializer):
"""
Model Year Report Vehicle save serializer
"""
zev_class = SlugRelatedField(
slug_field='credit_class',
queryset=CreditClass.objects.all()
)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
vehicle_zev_type = SlugRelatedField(
slug_field='vehicle_zev_code',
queryset=ZevType.objects.all()
)
def create(self, validated_data):
request = self.context.get('request')
model_id = request.data.get('model_year_report_id')
model_year_report_vehicle = ModelYearReportVehicle.objects.create(
**validated_data,
model_year_report=ModelYearReport.objects.get(id=model_id)
)
return model_year_report_vehicle
class Meta:
model = ModelYearReportVehicle
fields = (
'pending_sales', 'sales_issued', 'make', 'model_name', 'range',
'zev_class', 'model_year', 'vehicle_zev_type',
'model_year_report_id'
)
|
[
"noreply@github.com"
] |
emi-hi.noreply@github.com
|
2ab6c871fdd07dcd31a44b4c2ab1a93718ab24fc
|
4860fc856e6c75cc980c92399a2f673bf6ee06e2
|
/hello/migrations/0003_sitemessage_event_date.py
|
f7fac7459be5640425d79342fedc65881ff9c27e
|
[] |
no_license
|
RobertPastor/music-rungis
|
bcf994bfd515e3cdc220b12b32dd2cdead9a35c6
|
b69153ded934d1d317b828f2a1aa4dbdc5b2caae
|
refs/heads/master
| 2023-03-17T18:08:44.411749
| 2022-11-19T12:16:06
| 2022-11-19T12:16:06
| 62,034,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-04-13 19:35
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0002_sitemessage'),
]
operations = [
migrations.AddField(
model_name='sitemessage',
name='event_date',
field=models.DateField(default=datetime.datetime(2016, 4, 13, 21, 35, 9, 134000)),
),
]
|
[
"robert.pastor0691@orange.fr"
] |
robert.pastor0691@orange.fr
|
9992ae849702757f553f0c77125a04659fad1629
|
6e885227c59b5b8a5a7359beb938139fca98a16f
|
/contacts/wsgi.py
|
ec24a26034a81360f3da321164adb11883330151
|
[] |
no_license
|
BukhosiMoyo/contact_list_app
|
880ada2703a50014ca30ac6f1f65dac54a8fe49a
|
122df2f328f0fd375f28587112daf14190d50896
|
refs/heads/master
| 2023-02-23T17:49:20.942877
| 2021-01-14T21:18:39
| 2021-01-14T21:18:39
| 331,508,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for contacts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'contacts.settings')
application = get_wsgi_application()
|
[
"bukhosizimcode@gmail.com"
] |
bukhosizimcode@gmail.com
|
723f727f775cbac7453aa196794d2a1531788594
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/bncdea001/question2.py
|
0a3ac49bba9175ed2a5208dcfbf1e0a3657c7dbc
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# Program to check if time is valid
def main():
h=eval(input("Enter the hours: \n"))
m=eval(input("Enter the minutes: \n"))
s=eval(input("Enter the seconds: \n"))
if 0<=h<=23 and 0<=m<=59 and 0<=s<=59:
print("Your time is valid. ")
else:
print("Your time is invalid. ")
main()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
9dfc3d5217726a4d5b436b42281e13f320ce4a7b
|
e7c6304677326cc40b33d72b7ee079ce3c14af4f
|
/getPredictionsSTS-BAlternatives_c.py
|
6f17588f1041ab7fc8cc466c71703d46b565b595
|
[
"MIT"
] |
permissive
|
m-hahn/fairseq
|
77f86676dd3a0793b616da89e8bc286b3c913da6
|
8508699326640a6a7a83ed4de17ac986e6213bbe
|
refs/heads/master
| 2023-02-24T02:56:20.477873
| 2021-01-29T01:38:59
| 2021-01-29T01:38:59
| 256,415,870
| 0
| 0
|
MIT
| 2020-05-01T02:02:19
| 2020-04-17T06:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(
'checkpoints_STS-B/',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='STS-B-bin'
)
import torch
label_fn = lambda label: roberta.task.label_dictionary.string(
torch.LongTensor([label + roberta.task.label_dictionary.nspecial])
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
evaluatedSoFar = set()
lineNumbers = 0
with open('/u/scr/mhahn/PRETRAINED/GLUE/glue_data/STS-B/dev_alternatives_c.tsv', "r") as fin:
with open('/u/scr/mhahn/PRETRAINED/GLUE/glue_data/STS-B/dev_alternatives_c_predictions_fairseq.tsv', "w") as outFile:
while True:
lineNumbers += 1
try:
line = next(fin).strip()
except UnicodeDecodeError:
print("UnicodeDecodeError", lineNumbers)
continue
if line == "#####":
originalSentences = next(fin) # the original
separation = int(next(fin).strip()) # position of separation
tokenizedSentences = next(fin)
line = next(fin)
#print(line)
subset, sentences = line.strip().split("\t")
sentences = sentences.strip().split(" ")
# print(sentences, separation)
sentences = [sentences[:separation], sentences[separation:]]
# print(sentences)
assert len(sentences[1]) > 1, (line, separation, sentences)
# quit()
for i in range(2):
sentences[i] = ("".join(sentences[i])).replace("▁", " ").replace("</s>", "").strip()
assert len(sentences[1]) > 1, (line, separation, sentences)
assert sentences[0].endswith("."), (line, separation, sentences)
# print(sentences)
if tuple(sentences) in evaluatedSoFar:
continue
evaluatedSoFar.add(tuple(sentences))
if len(evaluatedSoFar) % 100 == 0:
print(len(evaluatedSoFar), sentences)
tokens = roberta.encode(sentences[0], sentences[1])
# https://github.com/pytorch/fairseq/issues/1009
features = roberta.extract_features(tokens)
prediction = float(5.0 * roberta.model.classification_heads['sentence_classification_head'](features))
print("\t".join([sentences[0], sentences[1], str(prediction)]), file=outFile)
|
[
"mhahn29@gmail.com"
] |
mhahn29@gmail.com
|
c2237e88d3b71f74dceb4aafd4548925815cecce
|
d926e5308b5fe794d56e57d02041ea0c2436af6e
|
/dockerfiles/application/sonarqube/build.py
|
1cd7ed2db98961fe9ca7043b4c9550ff6ad80d4c
|
[] |
no_license
|
Amos-x/Blog-scripts
|
fa1db3805551aeff22fc326b302ec433ac595add
|
95427b45cfdd5c2de6b3f8f5b4b68983fe1c5751
|
refs/heads/master
| 2022-04-19T21:19:12.070218
| 2020-03-15T15:15:27
| 2020-03-15T15:15:27
| 208,993,845
| 0
| 0
| null | 2022-03-29T21:56:43
| 2019-09-17T07:59:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
# -*- coding:utf-8 -*-
# __author__ = Amos
# Email = 379833553@qq.com
# Create_at = 2018/11/8 下午3:48
# FileName = build
from config import config as CONFIG
from utils.common import exec_shell,container_is_exist
def build_sonarqube():
if not container_is_exist('sonarqube'):
pull = 'docker pull sonarqube:7.1'
exec_shell(pull)
build = 'docker run -d --name sonarqube \
-p 9000:9000 \
-e SONARQUBE_JDBC_USERNAME={mysql_username} \
-e SONARQUBE_JDBC_PASSWORD={mysql_password} \
-e SONARQUBE_JDBC_URL=jdbc:mysql://{mysql_host}:3306/{soanr_db_name}?useUnicode=true\&characterEncoding=utf8\&rewriteBatchedStatements=true\&useConfigs=maxPerformance \
sonarqube:7.1'.format(mysql_host=CONFIG.MYSQL_HOST,mysql_username=CONFIG.MYSQL_USERNAME,
mysql_password=CONFIG.MYSQL_PASSWORD,soanr_db_name=CONFIG.MYSQL_NAME_SONARQUBE)
exec_shell(build)
exec_shell('docker start sonarqube')
else:
print('sonarqube 容器已存在,跳过安装')
|
[
"379833553@qq.com"
] |
379833553@qq.com
|
d26571867c2f8a8198ce2da00ebd7c1366a86ecd
|
d8b1effe86a654d1831b56fdd8d6a9248b29fe01
|
/Week_4/Assignment_3/assignment3.py
|
6f35dc7ef13eaee69ac858ab419489061ddaf63d
|
[] |
no_license
|
Nidhiks2000/eyrc-mooc
|
b273e376b5ae31779469c48443dee4f73ade7c82
|
8e49668569a89700954165136ea29524143ff49f
|
refs/heads/master
| 2023-07-18T05:26:22.311527
| 2021-09-08T16:48:23
| 2021-09-08T16:48:23
| 404,424,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,017
|
py
|
# No other modules apart from 'csv' and 'datetime' need to be imported
# as they aren't required to solve the assignment
# Import required module/s
import csv
from datetime import datetime as dt
def dayofweek(d, m, y):
t = [ 0, 3, 2, 5, 0, 3,
5, 1, 4, 6, 2, 4 ]
y -= m < 3
return (( y + int(y / 4) - int(y / 100)
+ int(y / 400) + t[m - 1] + d) % 7)
def readWorkSheet(file_name):
"""Reads the input CSV file of Work Sheet and creates a mapping of date and office name where he worked.
Parameters
----------
file_name : str
CSV file name of Work Sheet
Returns
-------
dict
Mapping of the date and office name where he worked as { Key : Value } pair
Example
-------
>>> csv_file_name = 'week4_assignment3_sample.csv'
>>> print( readWorkSheet( csv_file_name ) )
{
'2021-03-26': 'A', '2021-04-01': 'B', '2021-04-20': 'B', '2021-04-04': '-', '2021-04-12': 'A', '2021-04-23': 'A',
'2021-04-03': 'B', '2021-03-29': 'A', '2021-03-28': '-', '2021-03-31': 'A', '2021-04-10': 'B', '2021-04-16': 'A',
'2021-04-24': 'B', '2021-04-11': '-', '2021-04-13': 'B'
}
"""
date_office_name_mapping = {}
input_file_obj = open(file_name, 'r')
############## ADD YOUR CODE HERE ##############
reader=csv.DictReader(input_file_obj)
for rows in reader:
now = rows['date']
x = now.split("-")
#print(x)
#print(dt.date(2020,7,24).strftime('%A'))
res = dayofweek(int(x[2]),int(x[1]),int(x[0]))
if(res!=0 and res%2!=0):
date_office_name_mapping[now] = "A"
elif (res!=0 and res%2 == 0):
date_office_name_mapping[now] = "B"
elif (res == 0):
date_office_name_mapping[now] = "-"
##################################################
input_file_obj.close()
return date_office_name_mapping
def calculateOfficeHrs(mapping_dict):
"""Calculate the number of hours worked in office A and B with the given mapping of date and office name.
Parameters
----------
mapping_dict : dict
Mapping of the date and office name where he worked as { Key : Value } pair
Returns
-------
tuple
Number of hours worked in office A and B as pair
Example
-------
>>> date_office_name_mapping = {
'2021-03-26': 'A', '2021-04-01': 'B', '2021-04-20': 'B', '2021-04-04': '-', '2021-04-12': 'A', '2021-04-23': 'A',
'2021-04-03': 'B', '2021-03-29': 'A', '2021-03-28': '-', '2021-03-31': 'A', '2021-04-10': 'B', '2021-04-16': 'A',
'2021-04-24': 'B', '2021-04-11': '-', '2021-04-13': 'B'
}
>>> print( calculateOfficeHrs( date_office_name_mapping ) )
(48, 36)
"""
no_hrs_office_A, no_hrs_office_B = 0, 0
############## ADD YOUR CODE HERE ##############
for key,value in mapping_dict.items():
if (value == "A"):
no_hrs_office_A+=8
elif(value == "B"):
no_hrs_office_B+=6
##################################################
return (no_hrs_office_A, no_hrs_office_B)
def writeOfficeWorkSheet(mapping_dict, out_file_name):
"""Writes a CSV file with date and office name where the person worked on each day.
Parameters
----------
mapping_dict : dict
Mapping of the date and office name where he worked as { Key : Value } pair
out_file_name : str
File name of CSV file for writing the data to
"""
output_file_obj = open(out_file_name, 'w')
############## ADD YOUR CODE HERE ##############
writer = csv.writer(output_file_obj,delimiter=',')
writer.writerow(['date','office_name'])
for key,value in mapping_dict.items():
writer.writerow([key,value])
##################################################
output_file_obj.close()
if __name__ == "__main__":
"""Main function, code begins here.
"""
csv_file_name = 'week4_assignment3_sample.csv'
date_office_name_mapping = readWorkSheet(csv_file_name)
print(date_office_name_mapping)
total_hrs_office_A_B = calculateOfficeHrs(date_office_name_mapping)
print(total_hrs_office_A_B)
out_csv_file_name = 'output_week4_assignment3_sample.csv'
writeOfficeWorkSheet(date_office_name_mapping, out_csv_file_name)
|
[
"Happysunshine.disroot.org"
] |
Happysunshine.disroot.org
|
145447a9d8ca15126c29c3cdd7b0d1c28972fe54
|
d2a181395347b6b7308cdbd9a411c79775a035c8
|
/tests/loggers/test_csv.py
|
dcdb6421c517f5474a910124471f2b301b058c20
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
rephrase-ai/pytorch-lightning
|
d30d552288d1bf6f65a605e5c8893583ecc58862
|
8bd7b1bdd7d3f723822e78908033cf0a6743713a
|
refs/heads/master
| 2023-06-06T11:32:41.765882
| 2021-06-23T12:09:53
| 2021-06-23T12:09:53
| 291,268,679
| 2
| 0
|
Apache-2.0
| 2020-08-29T12:38:33
| 2020-08-29T12:38:32
| null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
import pytest
import torch
from pytorch_lightning.core.saving import load_hparams_from_yaml
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.loggers.csv_logs import ExperimentWriter
def test_file_logger_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir.mkdir("exp")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
logger = CSVLogger(save_dir=tmpdir, name="exp")
assert logger.version == 2
def test_file_logger_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir.mkdir("exp")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
root_dir.mkdir("version_2")
logger = CSVLogger(save_dir=tmpdir, name="exp", version=1)
assert logger.version == 1
def test_file_logger_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
exp_name = "exp"
tmpdir.mkdir(exp_name)
expected_version = "2020-02-05-162402"
logger = CSVLogger(save_dir=tmpdir, name=exp_name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2})
logger.save()
assert logger.version == expected_version
assert os.listdir(tmpdir / exp_name) == [expected_version]
assert os.listdir(tmpdir / exp_name / expected_version)
@pytest.mark.parametrize("name", ['', None])
def test_file_logger_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = CSVLogger(save_dir=tmpdir, name=name)
logger.save()
assert logger.root_dir == tmpdir
assert os.listdir(tmpdir / 'version_0')
@pytest.mark.parametrize("step_idx", [10, None])
def test_file_logger_log_metrics(tmpdir, step_idx):
logger = CSVLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1),
}
logger.log_metrics(metrics, step_idx)
logger.save()
path_csv = os.path.join(logger.log_dir, ExperimentWriter.NAME_METRICS_FILE)
with open(path_csv, 'r') as fp:
lines = fp.readlines()
assert len(lines) == 2
assert all([n in lines[0] for n in metrics])
def test_file_logger_log_hyperparams(tmpdir):
logger = CSVLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
'a': {
'b': 'c'
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
logger.log_hyperparams(hparams)
logger.save()
path_yaml = os.path.join(logger.log_dir, ExperimentWriter.NAME_HPARAMS_FILE)
params = load_hparams_from_yaml(path_yaml)
assert all([n in params for n in hparams])
|
[
"noreply@github.com"
] |
rephrase-ai.noreply@github.com
|
d15f4682a12eaabed8dec8aae69d51acc2542be6
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/Omsg/PyRunPancakeLarge.py
|
001bb8508f54e1f99cced37f0cb802254ec1da0b
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
import numpy as np
# ---- config ---- #
FileInput="dataPancakesLarge.in"
FileOutput="dataPancakesLarge.out"
# ---------------- #
def start(pancakes):
pancakes=pancakes[::-1]
pan=[]
turns=0
for p in pancakes:
pan.append(p)
i=0
for p in pan:
if p=="-":
pan=turn_pancakes(pan,i)
turns=turns+1
i=i+1
return str(turns)
def build_pancakes(pan):
pancakes=""
for p in pan:
pancakes=pancakes+p
return pancakes
def turn_pancakes(pan,start):
i=0
for p in pan:
if i>=start:
if pan[i]=="-":
pan[i]="+"
else:
pan[i]="-"
i=i+1
return pan
def file_load():
check=[]
with open(FileInput) as f:
for line in f:
check.append(line)
return check
def normal_mode():
result = start("+-+")
print "------------------------------------"
print "Result: "+str(result)
print "------------------------------------"
def array_mode():
f = open(FileOutput, 'w')
check = file_load()
print check
for i in range(np.size(check)):
if i>0:
writeString = "Case #"+str(i)+": "+str(start(str(check[i]).replace("\n","")))
f.write(writeString+"\n")
print writeString
print "------------------------------------"
f.close()
if __name__ == "__main__":
print "------------------------------------"
print "Start program"
print "------------------------------------"
array_mode()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
1504d4dc673f453ce8d6a9116235cb370f0c3b20
|
b74a332412c303921e8e067e83cbae47d64a1c9f
|
/common/libs/food/FoodService.py
|
0337a5785c2dbeb7523e2209c76d2f5e1b7cba13
|
[] |
no_license
|
whisnos/weix
|
aaaf490d8f80b7ea991fa8261a74cabff8c12da1
|
4ff6f5d03e8cc304c8b12c278488b284672210c0
|
refs/heads/master
| 2022-12-11T03:36:26.547935
| 2019-07-13T04:27:22
| 2019-07-13T04:27:22
| 195,082,638
| 0
| 0
| null | 2022-12-08T05:50:35
| 2019-07-03T15:35:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
# -*- coding: utf-8 -*-
from application import app,db
from common.models.food.FoodStockChangeLog import FoodStockChangeLog
from common.models.food.Food import Food
from common.libs.Helper import geneTime
class FoodService():
@staticmethod
def setStockChangeLog( food_id = 0,quantity = 0,note = '' ):
if food_id < 1:
return False
food_info = Food.query.filter_by( id = food_id ).first()
if not food_info:
return False
model_stock_change = FoodStockChangeLog()
model_stock_change.food_id = food_id
model_stock_change.unit = quantity
model_stock_change.total_stock = food_info.stock
model_stock_change.note = note
model_stock_change.created_time = geneTime()
db.session.add(model_stock_change)
db.session.commit()
return True
|
[
"whisnos@163.com"
] |
whisnos@163.com
|
df62c390162d75f6230cac5e9e41da256e08bfd0
|
f33b30743110532ddae286ba1b34993e61669ab7
|
/352. Data Stream as Disjoint Intervals.py
|
7fcf3a2a59b62c44815f299b37bcf78d3bfd76c8
|
[] |
no_license
|
c940606/leetcode
|
fe9dcee7a5daa4d52999d5f53253dd6dd33c348b
|
631df2ce6892a6fbb3e435f57e90d85f8200d125
|
refs/heads/master
| 2021-07-10T14:01:26.164966
| 2020-08-16T10:46:16
| 2020-08-16T10:46:16
| 186,588,449
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import bisect
from typing import List
class SummaryRanges:
def __init__(self):
"""
Initialize your data structure here.
"""
self.res = []
def addNum(self, val: int) -> None:
loc = bisect.bisect_left(self.res, [val])
if loc < len(self.res):
if self.res[loc][0] == val:
return
if self.res[loc][0] > val:
if loc >= 1:
if self.res[loc - 1][1] >= val :
return
if self.res[loc - 1][1] + 1 == val and self.res[loc][0] - 1 == val:
self.res[loc - 1:loc + 1] = [[self.res[loc - 1][0], self.res[loc][1]]]
elif self.res[loc - 1][1] + 1 == val:
self.res[loc-1:loc] = [[self.res[loc-1][0], val]]
elif self.res[loc][0] - 1 == val:
self.res[loc:loc+1] = [[val, self.res[loc][1]]]
else:
if self.res[loc][0] - 1 == val:
self.res[loc:loc+1] = [[val, self.res[loc][1]]]
else:
self.res.insert(loc, [val, val])
else:
self.res.insert(loc, [val, val])
else:
if self.res[loc - 1][1] >= val:
return
elif self.res[loc - 1][1] + 1 == val:
self.res[loc - 1:loc] = [[self.res[loc - 1][0], val]]
else:
self.res.insert(loc, [val, val])
def getIntervals(self) -> List[List[int]]:
return self.res
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
a = SummaryRanges()
a.addNum(1)
print(a.res)
a.addNum(3)
print(a.res)
a.addNum(7)
print(a.res)
a.addNum(2)
a.addNum(6)
print(a.getIntervals())
|
[
"762307667@qq.com"
] |
762307667@qq.com
|
79771f0560e4f83582b509058b21e9a77b8696ae
|
8c568d5ba0c4f05b10ac831d4961f34925d3db8e
|
/09_面向对象特性/yc_12_类属性.py
|
bb5951530999d19e6324a3e2a9c8366c183e082d
|
[] |
no_license
|
Yang-yc/Python
|
dbca12bf10f7eb628ab2676e56ea5dc8ebe025af
|
985bafccb45232e3c2e24d14f5a1e0dd1ff67065
|
refs/heads/master
| 2022-12-31T00:47:01.659889
| 2020-09-27T07:11:32
| 2020-09-27T07:11:32
| 285,573,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
class Tool(object):
# 使用赋值语句定义类属性,记录所有工具对象的数量
count = 0
def __init__(self, name):
self.name = name
# 让类属性的值+1
Tool.count += 1
# 1.创建工具对象
tool1 = Tool("斧头")
tool2 = Tool("榔头")
tool3 = Tool("水桶")
# 2.输出工具对象的总数
print(Tool.count)
|
[
"ycc20121404@163.com"
] |
ycc20121404@163.com
|
37c6e185d005cfd7fee82133847c02321d343861
|
c3e0792872e6bc34299b64e532f20187ec92dd0b
|
/uvrnmt/imagesfromsentence.py
|
8f999fa08ebe4422ec2aa039219a1c39d6f59364
|
[] |
no_license
|
zubairabid/hybrid-mt
|
fc1fcb8c7b8e4837d1a8b383b9e0f6766cb32073
|
242e0588d2f6b694e5bc9b50b036e5a1c48c7a20
|
refs/heads/master
| 2022-06-10T14:15:46.959808
| 2020-05-06T02:24:28
| 2020-05-06T02:24:28
| 255,111,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pickle
# In[2]:
def load_stopwords(path_to_stopwords):
stopwords = []
with open(path_to_stopwords, 'rb') as f:
stopwords = pickle.load(f)
return stopwords
# In[3]:
def load_index_from_word(path_to_en2id):
en2id = {}
with open(path_to_en2id, 'rb') as f:
en2id = pickle.load(f)
return en2id
# In[4]:
def load_lookup_table(path_to_lookup_table):
lookup_table = []
with open(path_to_lookup_table, 'rb') as f:
lookup_table = pickle.load(f)
return lookup_table
# In[5]:
def preprocess(sentences):
processed_sentences = []
for sentence in sentences:
processed_sentences.append(sentence.lower())
return processed_sentences
# In[6]:
def topics_from_dataset(sentences):
print("Generating topics and weights for dataset")
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorizer.fit_transform(sentences))
topics = vectorizer.get_feature_names()
weights = tfidf.toarray()
return topics, weights
# In[7]:
def sentence_remove_stopwords(sentence, stopwords):
filtered_words = []
reduced_sentence = ''
wordlist = sentence.strip().split(' ')
for word in wordlist:
if word not in stopwords:
filtered_words.append(word)
reduced_sentence = ' '.join(filtered_words)
return reduced_sentence
# In[8]:
def topics_from_sentence(sentence_id, sentence, weights, topics):
top_topics = []
sentence_topics = []
weight = weights[sentence_id]
location = np.argsort(-weight)
limit = min(10, len(weight))
for i in range(limit):
if weight[location[i]] > 0.0:
top_topics.append(topics[location[i]])
for word in sentence.split():
if word.lower() in top_topics:
sentence_topics.append(word)
return sentence_topics
# In[9]:
def images_from_topics(sentence_topics, stopwords, en2id, lookup_table):
imagelist = []
for topic in sentence_topics:
if topic in en2id.keys() and not topic in stopwords:
if en2id[topic] in lookup_table:
#print('<', topic, '> is in lookup table')
#print(topic, lookup_table[en2id[topic]])
for image in lookup_table[en2id[topic]]:
if image > 0.0 and not image in imagelist:
imagelist.append(image)
else:
pass
#print('>', topic, '< not in lookup table')
else:
if topic not in en2id.keys():
pass
#print('|', topic, '| not in dictionary')
return imagelist
# In[10]:
def get_features(sentences, cap):
path_to_en2id = 'en2id.pkl'
path_to_stopwords = 'stopwords-en.pkl'
path_to_lookup_table = 'cap2image_en2fr.pickle'
sentences = preprocess(sentences)
images_for_sentence = []
en2id = load_index_from_word(path_to_en2id)
stopwords = load_stopwords(path_to_stopwords)
lookup_table = load_lookup_table(path_to_lookup_table)
topics, weights = topics_from_dataset(sentences)
for sentence_id, sentence in enumerate(sentences):
sentence_topics = topics_from_sentence(sentence_id, sentence, weights, topics)
imagelist = images_from_topics(sentence_topics, stopwords, en2id, lookup_table)
if not imagelist:
imagelist=[0]
images_for_sentence.append(imagelist)
feature_index = np.load('./data/train-resnet50-avgpool.npy')
batch_sentence_features = []
for i, dummy in enumerate(sentences):
sentence = sentences[i]
images = images_for_sentence[i]
sentence_features = []
for image in images:
image_feature = feature_index[image-1]
sentence_features.append(image_feature)
if len(sentence_features) > cap:
sentence_features = sentence_features[:cap]
elif len(sentence_features) < cap:
for j in range(cap-len(sentence_features)):
sentence_features.append(np.zeros((2048,), dtype=float ))
batch_sentence_features.append(sentence_features)
pt = np.array(batch_sentence_features)
return pt
|
[
"zubairabid1999+github@gmail.com"
] |
zubairabid1999+github@gmail.com
|
7302e33689ecf7a8f5a508a4ca323c6c352a2fa7
|
69bcc45028038351a7f891025df1f8e7d4b855f1
|
/supervised_learning/0x04-error_analysis/2-precision.py
|
bbbb8199ffd49f17a12649012c3a6b52dc6fe389
|
[] |
no_license
|
linkjavier/holbertonschool-machine_learning
|
6db799844821d450fed2a33a8819cb8df0fef911
|
c7b6ea4c37b7c5dc41e63cdb8142b3cdfb3e1d23
|
refs/heads/main
| 2023-08-17T21:00:24.182003
| 2021-09-09T05:47:06
| 2021-09-09T05:47:06
| 304,503,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!/usr/bin/env python3
""" Precision Module """
import numpy as np
def precision(confusion):
""" Function that calculates the sensitivity for each
class in a confussion matrix
"""
classes, _ = confusion.shape
classPrecision = np.zeros(classes)
for classItem in range(classes):
classPrecision[classItem] = np.divide(
confusion[classItem][classItem], np.sum(confusion[:, classItem]))
return classPrecision
|
[
"linkjavier@hotmail.com"
] |
linkjavier@hotmail.com
|
838c922568619e9c765ef08d146d4c44efb7f403
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/testing/web-platform/tests/tools/third_party/pytest/src/_pytest/_io/terminalwriter.py
|
578b4507e30a12c62aeff72ba90fca207718f8ec
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853
| 2023-08-01T00:40:39
| 2023-08-01T00:40:39
| 211,297,481
| 0
| 0
|
NOASSERTION
| 2019-09-29T01:27:49
| 2019-09-27T10:44:24
|
C++
|
UTF-8
|
Python
| false
| false
| 6,906
|
py
|
"""Helper functions for writing to terminals and files."""
import os
import shutil
import sys
from typing import Optional
from typing import Sequence
from typing import TextIO
from .wcwidth import wcswidth
from _pytest.compat import final
def get_terminal_width() -> int:
width, _ = shutil.get_terminal_size(fallback=(80, 24))
if width < 40:
width = 80
return width
def should_do_markup(file: TextIO) -> bool:
if os.environ.get("PY_COLORS") == "1":
return True
if os.environ.get("PY_COLORS") == "0":
return False
if "NO_COLOR" in os.environ:
return False
if "FORCE_COLOR" in os.environ:
return True
return (
hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb"
)
@final
class TerminalWriter:
_esctable = dict(
black=30,
red=31,
green=32,
yellow=33,
blue=34,
purple=35,
cyan=36,
white=37,
Black=40,
Red=41,
Green=42,
Yellow=43,
Blue=44,
Purple=45,
Cyan=46,
White=47,
bold=1,
light=2,
blink=5,
invert=7,
)
def __init__(self, file: Optional[TextIO] = None) -> None:
if file is None:
file = sys.stdout
if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32":
try:
import colorama
except ImportError:
pass
else:
file = colorama.AnsiToWin32(file).stream
assert file is not None
self._file = file
self.hasmarkup = should_do_markup(file)
self._current_line = ""
self._terminal_width: Optional[int] = None
self.code_highlight = True
@property
def fullwidth(self) -> int:
if self._terminal_width is not None:
return self._terminal_width
return get_terminal_width()
@fullwidth.setter
def fullwidth(self, value: int) -> None:
self._terminal_width = value
@property
def width_of_current_line(self) -> int:
"""Return an estimate of the width so far in the current line."""
return wcswidth(self._current_line)
def markup(self, text: str, **markup: bool) -> str:
for name in markup:
if name not in self._esctable:
raise ValueError(f"unknown markup: {name!r}")
if self.hasmarkup:
esc = [self._esctable[name] for name, on in markup.items() if on]
if esc:
text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m"
return text
def sep(
self,
sepchar: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool,
) -> None:
if fullwidth is None:
fullwidth = self.fullwidth
if sys.platform == "win32":
fullwidth -= 1
if title is not None:
N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
fill = sepchar * N
line = f"{fill} {title} {fill}"
else:
line = sepchar * (fullwidth // len(sepchar))
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **markup)
def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None:
if msg:
current_line = msg.rsplit("\n", 1)[-1]
if "\n" in msg:
self._current_line = current_line
else:
self._current_line += current_line
msg = self.markup(msg, **markup)
try:
self._file.write(msg)
except UnicodeEncodeError:
msg = msg.encode("unicode-escape").decode("ascii")
self._file.write(msg)
if flush:
self.flush()
def line(self, s: str = "", **markup: bool) -> None:
self.write(s, **markup)
self.write("\n")
def flush(self) -> None:
self._file.flush()
def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None:
"""Write lines of source code possibly highlighted.
Keeping this private for now because the API is clunky. We should discuss how
to evolve the terminal writer so we can have more precise color support, for example
being able to write part of a line in one color and the rest in another, and so on.
"""
if indents and len(indents) != len(lines):
raise ValueError(
"indents size ({}) should have same size as lines ({})".format(
len(indents), len(lines)
)
)
if not indents:
indents = [""] * len(lines)
source = "\n".join(lines)
new_lines = self._highlight(source).splitlines()
for indent, new_line in zip(indents, new_lines):
self.line(indent + new_line)
def _highlight(self, source: str) -> str:
"""Highlight the given source code if we have markup support."""
from _pytest.config.exceptions import UsageError
if not self.hasmarkup or not self.code_highlight:
return source
try:
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.python import PythonLexer
from pygments import highlight
import pygments.util
except ImportError:
return source
else:
try:
highlighted: str = highlight(
source,
PythonLexer(),
TerminalFormatter(
bg=os.getenv("PYTEST_THEME_MODE", "dark"),
style=os.getenv("PYTEST_THEME"),
),
)
return highlighted
except pygments.util.ClassNotFound:
raise UsageError(
"PYTEST_THEME environment variable had an invalid value: '{}'. "
"Only valid pygment styles are allowed.".format(
os.getenv("PYTEST_THEME")
)
)
except pygments.util.OptionError:
raise UsageError(
"PYTEST_THEME_MODE environment variable had an invalid value: '{}'. "
"The only allowed values are 'dark' and 'light'.".format(
os.getenv("PYTEST_THEME_MODE")
)
)
|
[
"mcastelluccio@mozilla.com"
] |
mcastelluccio@mozilla.com
|
a7d0db784cf881f05dba47cc0b12b2e1fbbdb62d
|
d8913c1512146bb42756f61ba0872d73179884eb
|
/listinghouse/serializers.py
|
80ebe2255ddd5aae19eed9132ea1d96f83bea056
|
[
"MIT"
] |
permissive
|
sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack
|
2ed305c399edfab05ce3653e8bcaf36f09ae9015
|
10e31c4071bcebc0e4401f42084211d170b2ea56
|
refs/heads/main
| 2023-03-22T17:00:37.102265
| 2021-03-16T17:26:53
| 2021-03-16T17:26:53
| 319,297,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from rest_framework import serializers
from .models import Listings
class ListingsSerializers(serializers.ModelSerializer):
class Meta:
model = Listings
fields = ('title', 'adress', 'city', 'state', 'price', 'house_type', 'sqft', 'open_house',
'sale_type', 'photo_main', 'bathrooms', 'bedrooms', 'photo_main', 'slug')
class ListingsDetailSerializers(serializers.ModelSerializer):
class Meta:
model = Listings
fields = '__all__'
lookup_field = 'slug'
|
[
"sahinmuratogur@gmail.com"
] |
sahinmuratogur@gmail.com
|
13fc739aab16ab38d7a30ba8b63752d9ac9fbcd2
|
ef9d0d7d305ed829ff3ef1c66869d80517eebfc0
|
/tfx/orchestration/portable/python_executor_operator_test.py
|
388a145199578452084c6774e4b14c14df31c77a
|
[
"Apache-2.0"
] |
permissive
|
Saiprasad16/tfx
|
22ee62ccef1ec4b6fbb4dfa1ece5d7f701918c94
|
c1e0704b2a83232469f55598efcdb7808b6c909f
|
refs/heads/master
| 2023-04-28T09:58:04.522405
| 2021-05-10T09:01:22
| 2021-05-10T09:02:37
| 366,007,194
| 1
| 0
|
Apache-2.0
| 2021-05-10T10:41:53
| 2021-05-10T10:40:57
| null |
UTF-8
|
Python
| false
| false
| 6,801
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.python_executor_operator."""
import os
from typing import Any, Dict, List, Text
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import python_executor_operator
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from google.protobuf import text_format
class InprocessExecutor(base_executor.BaseExecutor):
"""A Fake in-process executor what returns execution result."""
def Do(
self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> execution_result_pb2.ExecutorOutput:
executor_output = execution_result_pb2.ExecutorOutput()
python_executor_operator._populate_output_artifact(executor_output,
output_dict)
return executor_output
class NotInprocessExecutor(base_executor.BaseExecutor):
"""A Fake not-in-process executor what writes execution result to executor_output_uri."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
executor_output = execution_result_pb2.ExecutorOutput()
python_executor_operator._populate_output_artifact(executor_output,
output_dict)
with fileio.open(self._context.executor_output_uri, 'wb') as f:
f.write(executor_output.SerializeToString())
class InplaceUpdateExecutor(base_executor.BaseExecutor):
"""A Fake noop executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
model = output_dict['output_key'][0]
model.name = 'my_model'
class PythonExecutorOperatorTest(test_case_utils.TfxTest):
def testRunExecutor_with_InprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_NotInprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.NotInprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_InplaceUpdateExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InplaceUpdateExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {
'string': 'value',
'int': 1,
'float': 0.0,
# This should not happen on production and will be
# dropped.
'proto': execution_result_pb2.ExecutorOutput()
}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
custom_properties {
key: "name"
value {
string_value: "my_model"
}
}
}
}
}""", executor_output)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
6c4eddee5209f157275e0c1c486d4a9000dd913a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/nextGreater_20200626113110.py
|
7ce3a916d9cbe406ef38804518f0fc5d2f6c8ae5
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
# [4,5,2,25]
def nextGreater(arr):
for i in range(len(arr)):
for j in range(i+1,len(arr)):
print('i -------->',arr[i])
print('j--->',arr[j])
next = -1
if arr[i]
nextGreater([4,5,2,25])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
488d37a9557bbaa9ff0e1776cd2ed117da779dae
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/paint_house_iii.py
|
051c5ec114bf5ae06f30f68479b4daa2fe5f9c4f
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
# https://leetcode.com/problems/paint-house-iii/
"""
There is a row of m houses in a small city, each house must be painted with one of the n colors (labeled from 1 to n), some houses that have been painted last summer should not be painted again.
A neighborhood is a maximal group of continuous houses that are painted with the same color.
For example: houses = [1,2,2,3,3,2,1,1] contains 5 neighborhoods [{1}, {2,2}, {3,3}, {2}, {1,1}].
Given an array houses, an m x n matrix cost and an integer target where:
houses[i]: is the color of the house i, and 0 if the house is not painted yet.
cost[i][j]: is the cost of paint the house i with the color j + 1.
Return the minimum cost of painting all the remaining houses in such a way that there are exactly target neighborhoods. If it is not possible, return -1.
Example 1:
Input: houses = [0,0,0,0,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3
Output: 9
Explanation: Paint houses of this way [1,2,2,1,1]
This array contains target = 3 neighborhoods, [{1}, {2,2}, {1,1}].
Cost of paint all houses (1 + 1 + 1 + 1 + 5) = 9.
Example 2:
Input: houses = [0,2,1,2,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3
Output: 11
Explanation: Some houses are already painted, Paint the houses of this way [2,2,1,2,2]
This array contains target = 3 neighborhoods, [{2,2}, {1}, {2,2}].
Cost of paint the first and last house (10 + 1) = 11.
Example 3:
Input: houses = [3,1,2,3], cost = [[1,1,1],[1,1,1],[1,1,1],[1,1,1]], m = 4, n = 3, target = 3
Output: -1
Explanation: Houses are already painted with a total of 4 neighborhoods [{3},{1},{2},{3}] different of target = 3.
Constraints:
m == houses.length == cost.length
n == cost[i].length
1 <= m <= 100
1 <= n <= 20
1 <= target <= m
0 <= houses[i] <= n
1 <= cost[i][j] <= 104
"""
from functools import cache
from math import inf
# bottom up
def min_cost(
houses: list[int], cost: list[list[int]], m: int, n: int, target: int
) -> int:
# dp[k][i][c] := min cost to form k groups with first i houses and last house paint with c
dp = [
[[inf for _ in range(n + 1)] for _ in range(m + 1)] for _ in range(target + 1)
]
# init values: 0 groups with first 0 houses is dummy
for c in range(n + 1):
dp[0][0][c] = 0
for k in range(1, target + 1):
for i in range(k, m + 1):
hi = houses[i - 1]
hj = houses[i - 2] if i >= 2 else 0
si, ei = (hi, hi) if hi else (1, n)
sj, ej = (hj, hj) if hj else (1, n)
for ci in range(si, ei + 1):
v = 0 if ci == hi else cost[i - 1][ci - 1]
for cj in range(sj, ej + 1):
# when ci == cj: same group
# when ci != cj: form new group
dp[k][i][ci] = min(
dp[k][i][ci], dp[k - int(ci != cj)][i - 1][cj] + v
)
ans = min(dp[target][m])
return -1 if ans == inf else ans
# top down
def min_cost(
houses: list[int], cost: list[list[int]], m: int, n: int, target: int
) -> int:
@cache
def dp(i: int, p: int, h: int) -> int:
"""
Args:
i (int): index
p (int): previous color
h (int): neighborhoods
Returns:
int: cost
"""
if (h > target) or (i == m and h != target):
return inf
if i == m:
return 0
if houses[i] != 0:
return dp(i + 1, houses[i], h + int(p != houses[i]))
best = inf
for nxt_c, cst in enumerate(cost[i], start=1):
best = min(best, dp(i + 1, nxt_c, h + int(p != nxt_c)) + cst)
return best
res = dp(0, 0, 0)
return res if res != inf else -1
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
492e82e716c69c6f83f8eabf883c64187f99db7a
|
118546c7bf7fe3063ed68e1c6270b33ed500c3c9
|
/python8/ex08.py
|
f223d0f2db2d8d7c1744afd39fd84c04f31a3fa4
|
[] |
no_license
|
yoonah95/Python_practice
|
83b1070f1c95d57a9ea81d2ec3898521f98544f4
|
1e8fbded66e789ba77b3af5499520b8e8e01a6a1
|
refs/heads/master
| 2022-06-12T20:55:38.490142
| 2020-05-08T02:20:20
| 2020-05-08T02:20:20
| 256,125,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
import sys
import StringIO
stdout = sys.stdout
sys.stdout = f = StringIO.StringIO()
print('Sample output')
print('good')
print('Good')
sys.stdout = stdout
s = f.getvalue()
print 'Done-------------'
print(s)
|
[
"yoon.a1@hanmail.net"
] |
yoon.a1@hanmail.net
|
9dc8de4d2758350b6b958b69e457c1d86f34e7aa
|
0eb599c3bbfa6e5b31516913b88cc9db3a1311ce
|
/AGC/agc041b.py
|
047f318a49e38b13fc421cfd7491200922169bae
|
[] |
no_license
|
Linus-MK/AtCoder
|
5b84dc88c2d2773d0f97ed18265d303290da7879
|
a587e89a9e0c2ab4d36b09176bcc95e901e14326
|
refs/heads/master
| 2022-11-25T05:37:12.148722
| 2022-11-17T16:04:10
| 2022-11-17T16:04:10
| 169,840,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
# うーん分からん
# ある特定の問題を入れたい人の気持ちになって考えた場合、トップにする必要はない
# 上位P問が使われるので、同率P位に滑り込めればOK
# 明らかに、ソートしても問題ない
# 投票して数字を増やすより減らしたほうが見通しが立てやすい気がするぞ
# (P問の値を1引き上げるのではなく、N-P問の値を1引き下げる、と考える。これでも同じ。)
# 降順にソートする。
# 「ある特定の問題」がk問目だとする。
# 上位(P-1)問は確定。P問目〜k-1問目をk問目と同じ数まで引き下げられたらOK。
# この引き下げが可能である条件は
# ・P問目の数 - k問目の数 <= 投票者の数M
# ・引き下げに必要な票数合計 <= 投票者の数M * 引き下げ票数(N-V)
# これが必要条件なのは分かるけど、十分性は……? この2つを満たせば必ず引き下げができるのか……?
# まぁ多分できそうな気がする
# 実際は毎回この判定をやると計算量が間に合わない
# 差分だけ調べて合計を更新する(累積和っぽい感じ)
n, m, v, p = list(map(int, input().split()))
vote = list(map(int, input().split()))
vote.sort(reverse=True)
target_score = vote[p-1] # P問目
ans = p # 最初p問は明らかに条件を満たす
vote_num_to_match = 0
# print(vote)
for i in range(p, n):
vote_num_to_match += (vote[i-1] - vote[i]) * (i-(p-1))
# print(vote_num_to_match)
if target_score - vote[i] <= m and vote_num_to_match <= m * (n-v):
ans += 1
else:
break
print(ans)
|
[
"13600386+Linus-MK@users.noreply.github.com"
] |
13600386+Linus-MK@users.noreply.github.com
|
192d9b5bc4efbf95468b5095c4c8a2857a41666c
|
0910e259a9bd252300f19b2ff22049d640f19b1a
|
/ml/m16_pipeline_RS4_boston.py
|
fddc26054c201ab70e936aa04c527f0903fdb23a
|
[] |
no_license
|
kimtaeuk-AI/Study
|
c7259a0ed1770f249b78f096ad853be7424a1c8e
|
bad5a0ea72a0117035b5e45652819a3f7206c66f
|
refs/heads/master
| 2023-05-05T12:34:52.471831
| 2021-05-22T16:16:12
| 2021-05-22T16:16:12
| 368,745,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
#전처리 하나와 모델을 합침
import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, make_pipeline
import timeit
start_time = timeit.default_timer()
import warnings
warnings.filterwarnings('ignore')
dataset = load_boston()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
# Pipeline은 전처리 + 모델해줘서 MinMaxScaler문 생략 가능
# from sklearn.preprocessing import MinMaxScaler
# scale = MinMaxScaler()
# scale.fit(x_train)
# x_train = scale.transform(x_train)
# x_test = scale.transform(x_test)
parameters = [
{"svc__C" :[1,10,100,1000], "svc__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"svc__C" :[1,10,100], "svc__kernel":["rbf"], "svc__gamma":[0.001, 0.0001]}, #3x2 6번
{"svc__C" :[1,10,100,1000], "svc__kernel":["sigmoid"],"svc__gamma":[0.001, 0.0001]}] #4x2 8번
parameters = [
{"mal__C" :[1,10,100,1000], "mal__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"mal__C" :[1,10,100], "mal__kernel":["rbf"], "mal__gamma":[0.001, 0.0001]}, #3x2 6번
{"mal__C" :[1,10,100,1000], "mal__kernel":["sigmoid"],"mal__gamma":[0.001, 0.0001]}] #4x2 8번
# 언더바 (_) 두개 써줘야한다
# 2. 모델
Pipe = Pipeline([('scale', MinMaxScaler()), ('mal', SVC())]) #SVC모델과 MinMax 를합친다 , 괄호 조심
# pipe = make_pipeline(StandardScaler(), SVC()) # 두가지 방법이 있다.
# Pipeline 써주는 이유 : 트레인만 하는게 효과적, cv만큼 스케일링, 과적합 방지, 모델에 적합해서 성능이 강화 .....
model = GridSearchCV(Pipe, parameters, cv=5)
model.fit(x_train, y_train)
results = model.score(x_test, y_test)
print('results : ', results)
# MinMaxScaler
# results : 0.9666666666666667
# StandardScaler
# results : 0.9666666666666667
|
[
"ki3123.93123@gmail.com"
] |
ki3123.93123@gmail.com
|
942af07212df99cf419268d0a99a758b26bcbd9b
|
262195faec1b59ff67067f2dc7e3eb7db8dba946
|
/src/follow.py
|
9a204856a7170ebd404ce32e362b91c1f6278f99
|
[
"MIT"
] |
permissive
|
sudeep0901/python
|
3a090ae2cd8a61e8e375cebb4722c051d2d766aa
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
refs/heads/master
| 2022-04-21T14:15:25.606241
| 2020-04-13T02:35:56
| 2020-04-13T02:35:56
| 155,167,294
| 0
| 0
|
MIT
| 2020-03-07T06:59:36
| 2018-10-29T07:08:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
'''
Created on Dec 7, 2018
@author: _patels13
'''
import time
def follow(thefile):
thefile.seek(0, 2)
while True:
line = thefile.readline()
print(line)
if not line:
time.sleep(0.1)
continue
yield line
# Example use
if __name__ == '__main__':
logfile = open("access-log.log")
print(logfile)
for line in follow(logfile):
print(line)
|
[
"sudeep.tech.patel@gmail.com"
] |
sudeep.tech.patel@gmail.com
|
635df689e111bfcf60068fb814cf2a224fc3dc42
|
893656022f3d70c0fc0cab0e864c2600885125bb
|
/setup.py
|
a8886101c13999b4ad5286f9573fc2235eeb0662
|
[
"MIT"
] |
permissive
|
iaxyzHpi/froide
|
85fdd421db71afcf3ca83e2b0760e8328cd1d4b1
|
fa159d352e77960f5ee696a1271509ced31785d1
|
refs/heads/master
| 2020-04-19T19:24:43.468542
| 2019-01-30T16:32:10
| 2019-01-30T16:32:10
| 168,387,397
| 0
| 0
| null | 2019-01-30T17:46:42
| 2019-01-30T17:46:42
| null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
#!/usr/bin/env python
import codecs
import re
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="froide",
version=find_version("froide", "__init__.py"),
url='https://github.com/okfde/froide',
license='MIT',
description="German Freedom of Information Portal",
long_description=read('README.md'),
author='Stefan Wehrmeyer',
author_email='mail@stefanwehrmeyer.com',
packages=find_packages(),
scripts=['manage.py'],
install_requires=[
'Django',
'Markdown',
'celery',
'geoip2',
'django-elasticsearch-dsl',
'django-taggit',
'pytz',
'requests',
'python-magic',
'djangorestframework',
'djangorestframework-csv',
'djangorestframework-jsonp',
'python-mimeparse',
'django-configurations',
'django-storages',
'dj-database-url',
'django-cache-url',
'django-filter',
'phonenumbers',
'django-filingcabinet',
'icalendar',
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP'
]
)
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
62cbd89d4141d9d2c4b797326954958585d031c9
|
4c03def55433d8fa736c59a6a00f8e3b0ab4bbe4
|
/scripts/aws/availability.py
|
54e89598682f7c97efb203d12e1d1599799b7d17
|
[
"MIT"
] |
permissive
|
stanford-futuredata/training_on_a_dime
|
58c9884e9621db8c56c4a2d189b8079d9bf6bc65
|
85f659572ff9da2701e5f309fbad7e828e6be46b
|
refs/heads/master
| 2022-11-22T21:14:09.685491
| 2020-02-21T00:49:45
| 2020-07-27T19:51:18
| 242,011,847
| 5
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
import argparse
from datetime import datetime
import signal
import json
import subprocess
import sys
import time
instances = {}
instance_types = {
("v100", 1): "p3.2xlarge",
("v100", 4): "p3.8xlarge",
("v100", 8): "p3.16xlarge",
("k80", 1): "p2.xlarge",
("k80", 8): "p2.8xlarge",
("k80", 16): "p2.16xlarge",
}
def signal_handler(sig, frame):
global instances
# Clean up all instances when program is interrupted.
for (zone, gpu_type, num_gpus) in instances:
[instance_id, _] = instances[(zone, gpu_type, num_gpus)]
if instance_id is not None:
delete_spot_instance(zone, instance_id)
sys.exit(0)
def launch_spot_instance(zone, gpu_type, num_gpus, instance_id):
instance_type = instance_types[(gpu_type, num_gpus)]
with open("specification.json.template", 'r') as f1, open("specification.json", 'w') as f2:
template = f1.read()
specification_file = template % (instance_type, zone)
f2.write(specification_file)
command = """aws ec2 request-spot-instances --instance-count 1 --type one-time --launch-specification file://specification.json"""
try:
spot_instance_request_id = None
print("[%s] Trying to create instance with %d GPU(s) of type %s in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
num_gpus, gpu_type, zone), file=sys.stderr)
output = subprocess.check_output(command, shell=True).decode()
return_obj = json.loads(output)
spot_instance_request_id = return_obj["SpotInstanceRequests"][0]["SpotInstanceRequestId"]
command = """aws ec2 describe-spot-instance-requests --spot-instance-request-id %s""" % (
spot_instance_request_id)
time.sleep(30)
output = subprocess.check_output(command, shell=True).decode()
return_obj = json.loads(output)
instance_id = return_obj["SpotInstanceRequests"][0]["InstanceId"]
print("[%s] Created instance %s with %d GPU(s) of type %s in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
instance_id, num_gpus, gpu_type, zone))
return [instance_id, True]
except Exception as e:
pass
if spot_instance_request_id is not None:
command = """aws ec2 cancel-spot-instance-requests --spot-instance-request-ids %s""" % (
spot_instance_request_id)
subprocess.check_output(command, shell=True)
print("[%s] Instance with %d GPU(s) of type %s creation failed" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), num_gpus, gpu_type))
return [None, False]
def monitor_spot_instance(zone, instance_id):
command = """aws ec2 describe-instances --instance-id %(instance_id)s""" % {
"instance_id": instance_id,
}
try:
output = subprocess.check_output(command, shell=True).decode()
if "running" in output:
print("[%s] Instance %s running in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
instance_id, zone))
return True
except Exception as e:
pass
print("[%s] Instance %s not running in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), instance_id, zone))
# Delete spot instance in case it exists.
delete_spot_instance(zone, instance_id)
return False
def delete_spot_instance(zone, instance_id):
command = """aws ec2 terminate-instances --instance-ids %(instance_id)s""" % {
"instance_id": instance_id,
}
try:
output = subprocess.check_output(command, shell=True)
print("[%s] Successfully deleted instance %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), instance_id))
except:
return
def main(args):
global instances
for zone in args.zones:
for gpu_type in args.gpu_types:
for num_gpus in args.all_num_gpus:
instances[(zone, gpu_type, num_gpus)] = [None, False]
while True:
# Spin in a loop; try to launch spot instances of particular type if
# not running already. Check on status of instances, and update to
# "not running" as needed.
for (zone, gpu_type, num_gpus) in instances:
[instance_id, running] = instances[(zone, gpu_type, num_gpus)]
if instance_id is not None:
running = \
monitor_spot_instance(zone, instance_id)
if not running:
[instance_id, running] = \
launch_spot_instance(zone, gpu_type, num_gpus, instance_id)
instances[(zone, gpu_type, num_gpus)] = [instance_id, running]
time.sleep(600)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Get AWS spot instance availability')
parser.add_argument('--zones', type=str, nargs='+',
default=["us-east-1b", "us-east-1c"],
help='AWS availability zones')
parser.add_argument('--gpu_types', type=str, nargs='+',
default=["v100", "k80"],
help='GPU types')
parser.add_argument('--all_num_gpus', type=int, nargs='+',
default=[1, 8],
help='Number of GPUs per instance')
args = parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
main(args)
|
[
"deepakn94@gmail.com"
] |
deepakn94@gmail.com
|
8f08bea2b00b6368d534a49cc1fb79cce05d5036
|
4bc696d97f9fec7e5ce136593556007a8b889d5f
|
/server/apps/reportAdmin/serializers.py
|
64305a4b89bca1e9f3e6cba439f38354d22ba3bd
|
[] |
no_license
|
davidhorst/FirstDjangular
|
37224a72ebd1e487b4b07755b06432a99f572eaf
|
5d18577f8d52e7e276c2c850d33f929de8e77ee6
|
refs/heads/master
| 2021-06-12T09:34:21.103774
| 2016-12-13T14:53:24
| 2016-12-13T14:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from rest_framework.serializers import ModelSerializer
from .models import Report
class ReportSerializer(ModelSerializer):
class Meta:
model = Report
|
[
"="
] |
=
|
a6aea563186b15e750ba2fdd61cbf03c3df667ad
|
14e7058adf766352a0b90b66b7dcf887105a481c
|
/portal/disciplines/forms.py
|
39c5085371b163ba74eb94f11e5700a74e0c2746
|
[
"BSD-2-Clause"
] |
permissive
|
brunogamacatao/portalsaladeaula
|
2b7f07f07c2518dd359f043483fbb27417f62aaf
|
9429e485aa37ffea3208339a807032e9230a3c84
|
refs/heads/master
| 2020-12-29T01:42:18.594281
| 2012-06-22T12:24:44
| 2012-06-22T12:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
__author__ = 'brunocatao'
from django import forms
from portal.models import Discipline
from django.utils.translation import ugettext as _
class DisciplineForm(forms.ModelForm):
class Meta:
model = Discipline
fields = ('name', 'acronym', 'description', 'feed_url', 'twitter_id',
'registration_type', 'access_type', )
|
[
"brunogamacatao@gmail.com"
] |
brunogamacatao@gmail.com
|
f8ce9dd28dcb500d0b0228e76c9a387e5584278c
|
cee11bed1fd868fc87ef113f6062440cd190a40c
|
/detect/model/backbone/MobilenetV2.py
|
95ec1e952dae01010670f264bbf36bfb9f651726
|
[] |
no_license
|
Peiiii/plate_detect_recongnize_tf_py3
|
f1d41270c7e6ed1718cb9d0d46784d8c83701439
|
39a04ef6475cdbaf8b4ff6e6f729e5b28b24daf1
|
refs/heads/master
| 2020-07-10T12:30:17.863818
| 2019-08-25T07:52:44
| 2019-08-25T07:52:44
| 204,263,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
# coding: utf-8
import tensorflow as tf
from ...model.layers import *
def MobilenetV2(input_data, training):
with tf.variable_scope('MobilenetV2'):
conv = convolutional(name='Conv', input_data=input_data, filters_shape=(3, 3, 3, 32),
training=training, downsample=True, activate=True, bn=True)
conv = inverted_residual(name='expanded_conv', input_data=conv, input_c=32, output_c=16,
training=training, t=1)
conv = inverted_residual(name='expanded_conv_1', input_data=conv, input_c=16, output_c=24, downsample=True,
training=training)
conv = inverted_residual(name='expanded_conv_2', input_data=conv, input_c=24, output_c=24, training=training)
conv = inverted_residual(name='expanded_conv_3', input_data=conv, input_c=24, output_c=32, downsample=True,
training=training)
conv = inverted_residual(name='expanded_conv_4', input_data=conv, input_c=32, output_c=32, training=training)
feature_map_s = inverted_residual(name='expanded_conv_5', input_data=conv, input_c=32, output_c=32,
training=training)
conv = inverted_residual(name='expanded_conv_6', input_data=feature_map_s, input_c=32, output_c=64,
downsample=True, training=training)
conv = inverted_residual(name='expanded_conv_7', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_8', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_9', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_10', input_data=conv, input_c=64, output_c=96, training=training)
conv = inverted_residual(name='expanded_conv_11', input_data=conv, input_c=96, output_c=96, training=training)
feature_map_m = inverted_residual(name='expanded_conv_12', input_data=conv, input_c=96, output_c=96,
training=training)
conv = inverted_residual(name='expanded_conv_13', input_data=feature_map_m, input_c=96, output_c=160,
downsample=True, training=training)
conv = inverted_residual(name='expanded_conv_14', input_data=conv, input_c=160, output_c=160, training=training)
conv = inverted_residual(name='expanded_conv_15', input_data=conv, input_c=160, output_c=160, training=training)
conv = inverted_residual(name='expanded_conv_16', input_data=conv, input_c=160, output_c=320, training=training)
feature_map_l = convolutional(name='Conv_1', input_data=conv, filters_shape=(1, 1, 320, 1280),
training=training, downsample=False, activate=True, bn=True)
return feature_map_s, feature_map_m, feature_map_l
|
[
"1535376447@qq.com"
] |
1535376447@qq.com
|
491a906ed44c2bb3341d88b41d6bb070781fff0d
|
77717d0024c8597fec83600259ea5547abbc183a
|
/mmdet/apis/inference.py
|
2d4a987e2606fa6e324e8245d6eabe1877171244
|
[
"Apache-2.0"
] |
permissive
|
fengyouliang/wheat_detection
|
0a090ef5eda7f2c5463996f4795f9ce06dd04050
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
refs/heads/master
| 2022-11-17T15:09:29.113493
| 2020-07-18T13:47:34
| 2020-07-18T13:47:34
| 276,532,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
import warnings
import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmdet.ops import RoIAlign, RoIPool
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# Use torchvision ops for CPU mode instead
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
if not m.aligned:
# aligned=False is not implemented on CPU
# set use_torchvision on-the-fly
m.use_torchvision = True
warnings.warn('We set use_torchvision=True in CPU mode.')
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
async def async_inference_detector(model, img):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
Awaitable detection results.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
result = await model.aforward_test(rescale=True, **data)
return result
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show()
|
[
"1654388696@qq.com"
] |
1654388696@qq.com
|
bfd6a2a39dc4018db6176fae203a4a0bded8c670
|
1a04e02811c844ecf53cc041b104667e5c987a09
|
/vgrabber/datalayer/serializer/test.py
|
4fe5c4679330e91f2240bb6902bee4213cce9703
|
[] |
no_license
|
janjanech/vzdelavanieGui
|
dff17add6e6946063597d4c1eba5d6d76b6f5374
|
b2015f41f7cb1be1ecccf1c4778a91f43f8fba12
|
refs/heads/master
| 2021-10-24T16:21:24.911817
| 2019-01-15T17:03:49
| 2019-01-15T17:03:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
from lxml.etree import Element
from vgrabber.model import Test
class TestSerializer:
__test: Test
def __init__(self, test):
self.__test = test
def serialize(self):
test_element = Element(
'test',
id=str(self.__test.id),
name=self.__test.name,
moodleid=str(self.__test.moodle_id)
)
return test_element
|
[
"janik@janik.ws"
] |
janik@janik.ws
|
0be06179167ae3177f62d6f0f00b960ebd3eacda
|
1a6919459bd4619bfef7527bc9c49ced3901e483
|
/tests/test_permissions_sql.py
|
a146e330aa370d7398c2e2786d0be3d5641e7cb2
|
[
"Apache-2.0"
] |
permissive
|
simonw/datasette-permissions-sql
|
870b1129b13377b812353183ba64e0bb69fa7339
|
e0103ea1c13389391a3e40241485df45739aa638
|
refs/heads/master
| 2022-10-09T19:47:28.383910
| 2020-06-12T07:03:35
| 2020-06-12T07:03:35
| 271,408,895
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,570
|
py
|
from datasette.app import Datasette
import httpx
import sqlite_utils
import pytest
def create_tables(conn):
db = sqlite_utils.Database(conn)
db["table_access"].insert_all(
[
{"user_id": 1, "database": "test", "table": "dogs"},
{"user_id": 2, "database": "test", "table": "dogs"},
{"user_id": 1, "database": "test", "table": "cats"},
]
)
db["cats"].insert({"name": "Casper"})
db["dogs"].insert({"name": "Cleo"})
db["other"].insert({"name": "Other"})
# user_id = 3 is banned from 'sqlite_master'
db["banned"].insert({"table": "other", "user_id": 3})
@pytest.fixture
async def ds(tmpdir):
filepath = tmpdir / "test.db"
ds = Datasette(
[filepath],
metadata={
"plugins": {
"datasette-permissions-sql": [
{
"action": "view-query",
"fallback": True,
"resource": ["test", "sqlite_master"],
"sql": """
SELECT
-1
FROM
banned
WHERE
user_id = :actor_id
""",
},
{
"action": "view-table",
"sql": """
SELECT
*
FROM
table_access
WHERE
user_id = :actor_id
AND "database" = :resource_1
AND "table" = :resource_2
""",
},
]
},
"databases": {
"test": {
"allow_sql": {},
"queries": {"sqlite_master": "select * from sqlite_master"},
}
},
},
)
await ds.get_database().execute_write_fn(create_tables, block=True)
return ds
@pytest.mark.asyncio
async def test_ds_fixture(ds):
assert {"table_access", "cats", "dogs", "banned", "other"} == set(
await ds.get_database().table_names()
)
@pytest.mark.parametrize(
"actor,table,expected_status",
[
(None, "dogs", 403),
(None, "cats", 403),
({"id": 1}, "dogs", 200),
({"id": 2}, "dogs", 200),
({"id": 1}, "cats", 200),
({"id": 2}, "cats", 403),
],
)
@pytest.mark.asyncio
async def test_permissions_sql(ds, actor, table, expected_status):
async with httpx.AsyncClient(app=ds.app()) as client:
cookies = {}
if actor:
cookies = {"ds_actor": ds.sign({"a": actor}, "actor")}
response = await client.get(
"http://localhost/test/{}".format(table), cookies=cookies
)
assert expected_status == response.status_code
@pytest.mark.parametrize(
"actor,expected_status", [(None, 200), ({"id": 1}, 200), ({"id": 3}, 403),],
)
@pytest.mark.asyncio
async def test_fallback(ds, actor, expected_status):
async with httpx.AsyncClient(app=ds.app()) as client:
cookies = {}
if actor:
cookies = {"ds_actor": ds.sign({"a": actor}, "actor")}
response = await client.get(
"http://localhost/test/sqlite_master", cookies=cookies
)
assert expected_status == response.status_code
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
c9be03f3e886d8a684bee9b8789e37ca03bdd523
|
4720b2f296b21b60836510d1fe997d58026ff573
|
/remo/remozilla/admin.py
|
dc3cf6f8678f1beed6df00e8c775b45a04ceca17
|
[] |
no_license
|
seocam/remo
|
9bc9b9e52bfdbef87a5c333e4f4f2be14630ccba
|
879cbbb0132f12dff64dfbd4ed118d0f5169615f
|
refs/heads/master
| 2021-01-15T13:06:39.844096
| 2014-05-13T10:23:06
| 2014-05-13T10:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from django.contrib import admin
from remo.remozilla.models import Bug, Status
class BugAdmin(admin.ModelAdmin):
"""Bug Admin."""
list_display = ('__unicode__', 'summary', 'status', 'resolution')
list_filter = ('status', 'resolution', 'council_vote_requested')
search_fields = ('bug_id', )
admin.site.register(Bug, BugAdmin)
admin.site.register(Status)
|
[
"giorgos@mozilla.com"
] |
giorgos@mozilla.com
|
ce2abc1434b7dcd4474ff498d805bd178c9cf4cc
|
72ea8dbdbd68813156b76c077edb5a3806bf42ab
|
/synapse/lib/scrape.py
|
61310192069525156af418567b158186df1f3042
|
[
"Apache-2.0"
] |
permissive
|
williballenthin/synapse
|
5c6f197f5a3cb3566c48dc444770592e89d4152a
|
799854da814b79d6631e5cc2796c347bf4a80ce7
|
refs/heads/master
| 2020-12-24T14:19:12.530026
| 2017-03-16T20:30:38
| 2017-03-16T20:30:38
| 41,521,212
| 2
| 0
| null | 2015-08-28T02:01:50
| 2015-08-28T02:01:50
| null |
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
import re
import synapse.data as s_data
import synapse.cortex as s_cortex
import synapse.lib.datfile as s_datfile
from synapse.common import *
tldlist = list(s_data.get('iana.tlds'))
tldlist.sort(key=lambda x: len(x))
tldlist.reverse()
tldcat = '|'.join(tldlist)
fqdn_re = r'((?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))' % tldcat
scrape_types = [
('hash:md5', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{32})(?:[^A-Za-z0-9]|$))',{}),
('hash:sha1', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{40})(?:[^A-Za-z0-9]|$))',{}),
('hash:sha256', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{64})(?:[^A-Za-z0-9]|$))',{}),
('inet:url', r'\w+://[^ \'"\t\n\r\f\v]+',{}),
('inet:ipv4', r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',{}),
('inet:tcp4', r'((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):[0-9]{1,5})',{}),
('inet:fqdn', r'(?:[^a-z0-9_.-]|^)((?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))(?:[^a-z0-9_.-]|$)' % tldcat, {}),
('inet:email', r'(?:[^a-z0-9_.+-]|^)([a-z0-9_\.\-+]{1,256}@(?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))(?:[^a-z0-9_.-]|$)' % tldcat, {} ),
]
regexes = { name:re.compile(rule,re.IGNORECASE) for (name,rule,opts) in scrape_types }
def scrape(text, data=None):
'''
Scrape types from a blob of text and return an ingest compatible dict.
'''
if data == None:
data = {}
for ptype,rule,info in scrape_types:
regx = regexes.get(ptype)
for valu in regx.findall(text):
yield (ptype,valu)
def getsync(text, tags=()):
ret = []
core = s_cortex.openurl('ram://')
with s_cortex.openurl('ram://'):
core.setConfOpt('enforce',1)
core.on('core:sync', ret.append)
for form,valu in scrape(text):
tufo = core.formTufoByFrob(form,valu)
for tag in tags:
core.addTufoTag(tufo,tag)
return ret
if __name__ == '__main__':
import sys
data = {}
for path in sys.argv[1:]:
byts = reqbytes(path)
text = byts.decode('utf8')
data = scrape(text,data=data)
#FIXME options for taging all / tagging forms / form props
print( json.dumps( {'format':'syn','data':data} ) )
#
#print( repr( data ) )
#def scanForEmailAddresses(txt):
#return [ m[0] for m in email_regex.findall(txt) ]
|
[
"invisigoth.kenshoto@gmail.com"
] |
invisigoth.kenshoto@gmail.com
|
cd852578580f51734828c8130405eaf66f147395
|
697af415566ba649502bd18751a6521ac526892c
|
/get_er2_mvis.py
|
27bdde56be929ded3f2d22aebef00f4cf2ef64c6
|
[] |
no_license
|
srbrodzik/impacts-scripts
|
df44c8f34746499b8397b5b1a4ad09859b4cc8d4
|
263c7545bbb912bbcea563a21d0619e5112b1788
|
refs/heads/master
| 2023-05-31T05:01:09.558641
| 2023-05-22T23:24:52
| 2023-05-22T23:24:52
| 215,638,568
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,561
|
py
|
#!/usr/bin/python3
# Inconsistent naming of daily subdirectories after unzip. Sometimes HH, othertimes HHMM
import os
import sys
import shutil
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from ftplib import FTP
from zipfile import ZipFile
def listFD(url, ext=''):
page = requests.get(url).text
#print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
def getImageHHMM(path):
flist = os.listdir(path)
hhmmList = []
for file in flist:
(base,ext) = os.path.splitext(file)
# assumes base is YYYYMMDDhhmmss
hhmm = base[8:12]
if hhmm not in hhmmList:
hhmmList.append(hhmm)
return hhmmList
if len(sys.argv) != 2:
print('Usage: sys.argv[0] [YYYY-MM-DD]')
sys.exit()
else:
date = sys.argv[1]
# User inputs
debug = 1
file_ext = 'zip'
#url = 'https://asp-archive.arc.nasa.gov/IMPACTS/N809NA/video_2022/'+date+'/MVIS'
url = 'https://asp-archive.arc.nasa.gov/IMPACTS/N809NA/still-images_2022/'+date+'/MVIS'
tempDir = "/tmp"
targetDirBase = "/home/disk/bob/impacts/images/MVIS"
catPrefix = 'aircraft.NASA_ER2'
catSuffix = 'MVIS'
ftpCatalogServer = 'catalog.eol.ucar.edu'
ftpCatalogUser = 'anonymous'
catalogDestDir = '/pub/incoming/catalog/impacts'
# Create image directory, if needed
targetDir = targetDirBase+'/'+date.replace('-','')
if not os.path.exists(targetDir):
os.makedirs(targetDir)
# Get filelist from url
urlFlist = listFD(url, file_ext)
# Save first file every minute
os.chdir(targetDir)
for file in urlFlist:
command = 'wget '+file
os.system(command)
# naming convention is:
# IMPACTS-MVIS_ER2_2022010815_R0_still-images-jpeg.zip
fname = os.path.basename(file)
(proj,plane,dateHour,junk,suffix) = fname.split('_')
# ONE OR THE OTHER - DUE TO INCONSISTENT DIRECTORY NAMING CONVENTIONS
#time = dateHour[-2:]+'00'
time = dateHour[-2:]
try:
with ZipFile(fname, 'r') as zip:
zip.extractall()
os.remove(fname)
if os.path.exists('__MACOSX'):
shutil.rmtree('__MACOSX')
os.chdir(targetDir+'/'+time)
for imgFile in os.listdir():
print(imgFile)
if '_' in imgFile or os.path.getsize(imgFile) == 0:
print(' {} removed'.format(imgFile))
os.remove(targetDir+'/'+time+'/'+imgFile)
else:
(base,ext) = os.path.splitext(imgFile)
hhmm = base[8:12]
if hhmm not in getImageHHMM(targetDir):
shutil.move(targetDir+'/'+time+'/'+imgFile,
targetDir+'/'+imgFile)
else:
os.remove(targetDir+'/'+time+'/'+imgFile)
os.chdir(targetDir)
os.rmdir(time)
except:
print('Unable to unzip {}'.format(fname))
"""
# Open ftp connection
catalogFTP = FTP(ftpCatalogServer,ftpCatalogUser)
catalogFTP.cwd(catalogDestDir)
# Rename jpg files & upload to catalog
for file in os.listdir(targetDir):
print(file)
(imageTime,ext) = os.path.splitext(file)
imageTime = imageTime[:-2]
catName = catPrefix+'.'+imageTime+'.'+catSuffix+ext
shutil.copy(targetDir+'/'+file,
tempDir+'/'+catName)
ftpFile = open(tempDir+'/'+catName,'rb')
catalogFTP.storbinary('STOR '+catName,ftpFile)
ftpFile.close()
os.remove(tempDir+'/'+catName)
# Close ftp connection
catalogFTP.quit()
"""
|
[
"brodzik@uw.edu"
] |
brodzik@uw.edu
|
e6371302f8591fb2a405866ff5192a52cc735e72
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_quadricepses.py
|
05b399424fc9c4dc2fe68ec222b1600fea37c16f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from xai.brain.wordbase.nouns._quadriceps import _QUADRICEPS
#calss header
class _QUADRICEPSES(_QUADRICEPS, ):
def __init__(self,):
_QUADRICEPS.__init__(self)
self.name = "QUADRICEPSES"
self.specie = 'nouns'
self.basic = "quadriceps"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4dd9fda777a418611e522466a3fcad13b7b456bf
|
080a6b7be74dc2d2fac61e0bb60a5402533294de
|
/week7/bc-ints-avg-float.py
|
3104214222f46aab04b7ba40ece6c87de394fb3c
|
[] |
no_license
|
rosmoke/DCU-Projects
|
cfec4c59ba00beb68d174cf869952b7a88e5c1dc
|
1478f476e1d81756d00a206b8f5bfcd0a1094649
|
refs/heads/master
| 2021-01-20T17:03:59.642966
| 2016-06-23T15:06:46
| 2016-06-23T15:06:46
| 61,814,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
i = 0
integer = raw_input()
total = 0.0
while integer != "end":
total = total + int(integer)
integer = raw_input()
i = i + 1
if i > 1:
print total / i
else:
print total
|
[
"danielasofiei@yahoo.ie"
] |
danielasofiei@yahoo.ie
|
6f7318386169ac167772e18f4034f3b8da28d5a7
|
f93fde3ad0c7f96710f8f8f8495adfa14484763b
|
/ld12/gene.py
|
d2678af23a96a69bc93a9d4e8569b51c75c8e227
|
[
"MIT"
] |
permissive
|
xapple/ld12
|
0f80b0b4fc353327779e3189d7152f110cc0cf78
|
e2dfc98beaec8d6dcecaec86fb7854ea5bb6f333
|
refs/heads/master
| 2021-01-10T01:52:50.282298
| 2016-04-04T19:19:34
| 2016-04-04T19:19:34
| 53,348,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# Built-in modules #
import re
# First party modules #
# Third party modules #
###############################################################################
class Gene(object):
"""A DNA sequence with an ID associated and belonging to a genome."""
def __repr__(self): return '<%s object %s>' % (self.__class__.__name__, self.name)
def __str__(self): return str(self.seq.seq)
def __len__(self): return len(self.seq)
def __init__(self, seq, genome):
self.seq = seq
self.name = seq.id
self.genome = genome
self.annotation = None # Filled in by the __init__.py
self.raw_hits = [] # Filled in by the duplications.py
self.best_tax = None # Filled in by the duplications.py
@property
def long_name(self):
"""A more descriptive name"""
return self.name + " (from " + self.genome.long_name + ")"
@property
def ribo_group(self):
"""If it is a ribosomal protein, what group is it part of ?"""
results = re.findall("ribosomal protein ([LS][1-9]+)", self.annotation)
if not results: return False
else: return results[0]
|
[
"lucas.sinclair@me.com"
] |
lucas.sinclair@me.com
|
06e0ddc21cdd990cd36cfa9d2d2fcbe3eddc2d2e
|
10d89b6e07a7c72c385eb1d1c60a3e0ed9f9fc3c
|
/boss/report/views/phone_fee.py
|
ead5501cffbbe306fc0cb441b004269ec0037dac
|
[] |
no_license
|
cash2one/pt
|
2a4998a6627cf1604fb64ea8ac62ff1c227f0296
|
8a8c12375610182747099e5e60e15f1a9bb3f953
|
refs/heads/master
| 2021-01-20T00:36:43.779028
| 2016-11-07T03:27:18
| 2016-11-07T03:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
#coding: utf-8
"""
服务质量追踪-充话费
"""
from report_pub import *
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
@add_common_var
def phone_fee(request, template_name):
app = request.GET.get("app")
report_check_app(request, app)
vers = get_app_versions(app)
channels = get_app_channels(app)
operators = get_report_filters("gy_fee_prod_isptype")
provinces = get_report_filters("gy_fee_prod_province")
faces = get_report_filters("gy_fee_prod_content")
faces.sort(key=lambda a: int(a))
product_type = get_product_type(ReportConst.PHONE_FEE)
cps = get_cp_info(product_type)
return report_render(request, template_name,{
"currentdate": get_datestr(1, "%Y-%m-%d"),
"operators": operators,
"provinces": provinces,
"faces": faces,
"cps": cps,
"vers": vers,
"channels": channels
})
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
def phone_fee_ajax(request):
start_date = request.POST.get("start_date")
end_date = request.POST.get("end_date")
app = request.POST.get("app")
report_check_app(request, app)
ver = request.POST.get("ver")
channel = request.POST.get("channel")
operator = request.POST.get("operator")
province = request.POST.get("province")
face = request.POST.get("face")
cp = request.POST.get("cp")
result = get_service_quality_data(start_date, end_date, app, ver, channel, operator, province, face, cp, ReportConst.PHONE_FEE)
return HttpResponse(json.dumps(result))
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
def phone_fee_csv(request):
start_date = request.GET.get("start_date")
end_date = request.GET.get("end_date")
app = request.GET.get("app")
report_check_app(request, app)
ver = request.GET.get("ver")
channel = request.GET.get("channel")
operator = request.GET.get("operator")
province = request.GET.get("province")
face = request.GET.get("face")
cp = request.GET.get("cp")
filename = '%s-质量追踪(%s-%s-%s).csv' % (ReportConst.PHONE_FEE, str(get_app_name(app)), str(start_date), str(end_date))
csv_data = [["日期",
"总单数",
"成功数",
"失败数",
"失败率",
"1分钟到账数",
"1分钟到账率",
"3分钟到账数",
"3分钟到账率",
"10分钟到账数",
"10分钟到账率",
"30分钟到账数",
"30分钟到账率",
"30分钟以上到账数",
"30分钟以上到账率"]]
csv_data.extend(get_service_quality_data(start_date, end_date, app, ver, channel, operator, province, face, cp, ReportConst.PHONE_FEE))
return get_csv_response(filename, csv_data)
|
[
"xl@putao.cn"
] |
xl@putao.cn
|
2032fcdbc5f7bfd3980087825cefef8a1b0f3e7e
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/arneb-django-export/allPythonContent.py
|
c1f0cbe7c45766156c8d3fdd4513c94e9d1ed073
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693
| 2016-06-27T05:15:15
| 2016-06-27T05:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,667
|
py
|
__FILENAME__ = models
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
urlpatterns = patterns('export.views',
url(r'^database/$', 'export_database', {}, name="export_database"),
url(r'^database_s3/$', 'export_to_s3', {}, name="export_database_s3"),
url(r'^media/$', 'export_media', {}, name="export_mediaroot"),
url(r'^list_s3/$', 'list_s3', {}, name="export_list_s3"),
url(r'^$', 'export_index', {}, name="export_index"),
)
########NEW FILE########
__FILENAME__ = views
import os, time
from datetime import date
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.views.generic.simple import direct_to_template
from django.contrib.admin.views.decorators import staff_member_required
try:
import S3
except ImportError:
S3 = None
# default dump commands, you can overwrite these in your settings.
MYSQLDUMP_CMD = getattr(settings, 'MYSQLDUMP_CMD', '/usr/bin/mysqldump -h %s --opt --compact --skip-add-locks -u %s -p%s %s | bzip2 -c')
SQLITE3DUMP_CMD = getattr(settings, 'SQLITE3DUMP_CMD', 'echo ".dump" | /usr/bin/sqlite3 %s | bzip2 -c')
DISABLE_STREAMING = getattr(settings, 'DISABLE_STREAMING', False)
@staff_member_required
def export_database(request):
"""
Dump the database directly to the browser
"""
if request.method == 'POST':
if settings.DATABASE_ENGINE == 'mysql':
cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
elif settings.DATABASE_ENGINE == 'sqlite3':
cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
else:
raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
stdin, stdout = os.popen2(cmd)
stdin.close()
if DISABLE_STREAMING:
stdout = stdout.read()
response = HttpResponse(stdout, mimetype="application/octet-stream")
response['Content-Disposition'] = 'attachment; filename=%s' % date.today().__str__()+'_db.sql.bz2'
return response
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database')})
@staff_member_required
def export_media(request):
"""
Tar the MEDIA_ROOT and send it directly to the browser
"""
if request.method == 'POST':
stdin, stdout = os.popen2('tar -cf - %s' % settings.MEDIA_ROOT)
stdin.close()
if DISABLE_STREAMING:
stdout = stdout.read()
response = HttpResponse(stdout, mimetype="application/octet-stream")
response['Content-Disposition'] = 'attachment; filename=%s' % date.today().__str__()+'_media.tar'
return response
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Media Root')})
@staff_member_required
def export_to_s3(request):
"""
Dump the database and upload the dump to Amazon S3
"""
if request.method == 'POST':
if settings.DATABASE_ENGINE == 'mysql':
cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
elif settings.DATABASE_ENGINE == 'sqlite3':
cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
else:
raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
stdin, stdout = os.popen2(cmd)
stdin.close()
file_name = 'dump_%s.sql.bz2' % time.strftime('%Y%m%d-%H%M')
conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
res = conn.put(settings.AWS_BUCKET_NAME, file_name, S3.S3Object(stdout.read()), {'Content-Type': 'application/x-bzip2',})
if res.http_response.status == 200:
request.user.message_set.create(message="%s" % _(u"%(filename)s saved on Amazon S3") % {'filename': file_name})
else:
request.user.message_set.create(message="%s" % _(u"Upload failed with %(status)s") % {'status': res.http_response.status})
stdout.close()
return HttpResponseRedirect('/admin/')
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database to S3'), 's3support': (S3 is not None), 's3': True})
@staff_member_required
def list_s3(request):
"""
List Amazon S3 bucket contents
"""
if S3 is not None:
conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
generator = S3.QueryStringAuthGenerator(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, calling_format=S3.CallingFormat.VANITY)
generator.set_expires_in(300)
bucket_entries = conn.list_bucket(settings.AWS_BUCKET_NAME).entries
entries = []
for entry in bucket_entries:
entry.s3url = generator.get(settings.AWS_BUCKET_NAME, entry.key)
entries.append(entry)
return direct_to_template(request, 'export/list_s3.html', {'object_list': entries, 's3support': True})
else:
return direct_to_template(request, 'export/list_s3.html', {'object_list': [], 's3support': False})
@staff_member_required
def export_index(request):
"""
List all available export views.
"""
return direct_to_template(request, 'export/index.html', {'s3support': (S3 is not None),})
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
c6fa6a2edb99f5bcef6bebbe9f0f17b78178e9aa
|
dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0
|
/src/main/scala/mock/25092020/ShortestPathInGridWithObstaclesElimination.py
|
d006e85a23d9c40ffbbddf03061da34dabd8a5b3
|
[] |
no_license
|
joestalker1/leetcode
|
8a5cdda17abd33c3eef859732f75d7bec77a9d0e
|
ae392ddbc7eb56cb814b9e9715043c98a89a6314
|
refs/heads/master
| 2023-04-13T22:09:54.407864
| 2023-04-09T19:22:54
| 2023-04-09T19:22:54
| 131,803,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
from heapq import heappop, heappush
class Solution:
def shortestPath(self, grid, k):
if not grid or not grid[0]:
return -1
n = len(grid)
m = len(grid[0])
q = [[0, 0, 0, 0]] # len, row,col, eliminated obstacles < k
seen = set()
seen.add((0,0))
while q:
d, r, c, elim = q.pop(0)
if r == n - 1 and c == m - 1:
return d
for dr, dc in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
nr = r + dr
nc = c + dc
if 0 <= nr < n and 0 <= nc < m:
# if (nr, nc) in seen:
# continue
if grid[nr][nc] == 0 or grid[nr][nc] == 1 and elim < k:
paths[nr][nc] = d + 1
q.append([d + 1, nr, nc, elim + 1 if grid[nr][nc] == 1 else elim])
return -1
sol = Solution()
# print(sol.shortestPath([[0, 0, 0],
# [1, 1, 0],
# [0, 0, 0],
# [0, 1, 1],
# [0, 0, 0]], 1))
print(sol.shortestPath([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]],
1))
|
[
"denys@dasera.com"
] |
denys@dasera.com
|
47f4c767c2a7b9c7065929c3f93e659bcb8be2b3
|
fd6525073b8bd0f9387ccd14b801fdb6bfecd374
|
/nur/path.py
|
260f0d7957dea351b7c4bc2b1ca3abc02dcda391
|
[
"MIT"
] |
permissive
|
demyanrogozhin/NUR
|
5a92757c52d28ff0bbe8684b4bf25fc8998bfc43
|
a7746bf35b2fda77e2cb7a3a1f22db3e4d21f399
|
refs/heads/master
| 2020-12-12T17:40:47.783164
| 2020-01-26T21:10:26
| 2020-01-26T21:38:16
| 234,187,682
| 1
| 0
|
MIT
| 2020-01-15T22:40:27
| 2020-01-15T22:40:27
| null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
import os
import subprocess
from pathlib import Path
from .error import NurError
def _is_repo(path: Path) -> bool:
return path.joinpath("lib/evalRepo.nix").exists()
def _find_root() -> Path:
source_root = Path(__file__).parent.parent.resolve()
if _is_repo(source_root):
# if it was not build with release.nix
return source_root
else:
root = Path(os.getcwd()).resolve()
while True:
if _is_repo(root):
return root
new_root = root.parent.resolve()
if new_root == root:
if _is_repo(new_root):
return new_root
else:
raise NurError("NUR repository not found in current directory")
ROOT = _find_root()
LOCK_PATH = ROOT.joinpath("repos.json.lock")
MANIFEST_PATH = ROOT.joinpath("repos.json")
EVALREPO_PATH = ROOT.joinpath("lib/evalRepo.nix")
_NIXPKGS_PATH = None
def nixpkgs_path() -> str:
global _NIXPKGS_PATH
if _NIXPKGS_PATH is not None:
return _NIXPKGS_PATH
cmd = ["nix-instantiate", "--find-file", "nixpkgs"]
path = subprocess.check_output(cmd).decode("utf-8").strip()
_NIXPKGS_PATH = str(Path(path).resolve())
return _NIXPKGS_PATH
|
[
"joerg@thalheim.io"
] |
joerg@thalheim.io
|
1c50b1cb0dad2f0bc854e38e79cd4a34774cb970
|
28dbe47aba287ed94ef7bba734203736bcc06249
|
/.history/dmac_20200715002741.py
|
56378030f386fb56196f8f2ccc62c0c32ea76ac5
|
[] |
no_license
|
ntung88/Trading_Algorithms
|
242fd816b19df95e02e9fcd8c5c91c862d2ede40
|
d96488b1754e3751f739d9c3f094a8f8dc54a0a9
|
refs/heads/master
| 2022-11-19T16:04:07.800344
| 2020-07-17T21:14:10
| 2020-07-17T21:14:10
| 276,239,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,471
|
py
|
import yfinance as yf
import numpy as np
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize
import matplotlib.pyplot as plt
'''
A library for running Dual Moving Average Crossover trading strategy, with backtesting,
period optimization, and vizualization tools.
'''
#Period of time (in years) that we look back when optimizing in return calculation
HINDSIGHT = 2
def clean_data(data):
'''
Removes row (days) with no data from dataframe or series
'''
incomplete_idxs = False
if isinstance(data, pd.DataFrame):
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
else:
incomplete_idxs |= np.isnan(data)
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
'''
Returns a dataframe containing only the rows where a crossover of the sma and lma
is detected. 1 indicates a buy point (sma moving above lma), -1 a sell point
'''
num_points = len(clean_data(lma))
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
trimmed = crossovers[crossovers != 0]
return trimmed
def profit(data, crossovers):
'''
Calculates profit assuming data covers a continuous time period with the given crossovers
'''
if len(crossovers) == 0:
return 0
total = 0
# If first crossover is a sell point assume implicit buy point at very start of data
print(crossovers.iloc[0])
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
# Add the difference between value at sell points and value at buy points to our profit
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
# If last crossover is a buy point assume implicit sell point at end of data (include
# profit we have made on current holding)
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
'''
Uses scipy's convex minimization library to find optimal short period and long period
for moving averages. Because the profit certainly isn't a truly convex function I use a
wide range of seeds as initial guesses in hopes of detecting all the local minimums
and comparing them to get a good guess of the global min
'''
cons = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0]},
{'type': 'ineq', 'fun': lambda x: x[0] - 5})
# Ranges of initial guesses for short and long periods
#30 and 40 step size for max accuracy, larger for faster runtime
short_seeds = range(5, 300, 50)
long_seeds = range(20, 800, 70)
# short_seeds = [100]
# long_seeds = [750]
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
# Use all combinations of ranges where long_seed > short_seed as initial guesses
if long_seed > short_seed:
res = minimize(run_analysis, [short_seed, long_seed], args=(data,), method='COBYLA', constraints=cons, options={'rhobeg': 10.0, 'catol': 0.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (int(round(best_short)), int(round(best_long)), minimum)
def run_analysis(periods, data):
'''
Objective function for minimization, runs profit calculation with given periods and data
Returns negative profit for minimization (maximization of profit)
'''
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
print('sma')
print(sma)
print('lma')
print(lma)
crossovers = calc_crossovers(sma, lma)
return -1 * profit(data, crossovers)
def visualize(data, short_period, long_period):
'''
Useful for visualizing the algorithm's decisions. Plots the stock price with colored
vertical bars at buy and sell points
'''
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
buys = pd.DataFrame(crossovers[crossovers == 1.0])
sells = pd.DataFrame(crossovers[crossovers == -1.0])
data.plot(color='black')
for buy in buys.index:
plt.axvline(buy, color="green")
for sell in sells.index:
plt.axvline(sell, color="red")
plt.show()
def split_year(data):
'''
Split dataframe into a list of dataframes, each corresponding to the data for each year
'''
years = np.unique(data.index.year)
split = []
for year in years:
split.append(data[data.index.year == year])
return split
def calc_returns(split_data):
'''
Calculate annual returns for periods optimized over slices (of size HINDSIGHT) of past data. Gives an idea of what kind of results to realistically expect
'''
annual_returns = []
max_return = float('-inf')
min_return = float('inf')
for i in range(2, len(split_data)):
test_year = split_data[i]
optimize_period = pd.DataFrame(np.concatenate(split_data[i-HINDSIGHT:i]))
print('optimize period:')
print(optimize_period)
periods = optimize(optimize_period)
print('periods:')
print(periods)
profit = run_analysis(periods, test_year)
annual_returns.append(profit)
if profit > max_return: max_return = profit
if profit < min_return: min_return = profit
return annual_returns, max_return, min_return
def main():
'''
Main's current functionality: Find optimal windows for TSLA and print them, along with profit since 6/29/2010
'''
ticker = yf.Ticker('MRNA')
# data = yf.download(tickers, period='max', group_by='ticker')
data = ticker.history(period="max")[:-4]
dirty = pd.DataFrame(data)
#Currently using only closing prices
frame = clean_data(dirty)['Close']
periods = optimize(frame)
# periods = calc_returns(split_year(frame))
print(periods)
# visualize(frame, periods[0], periods[1])
if __name__ == "__main__":
main()
'''
how to quantify number of shares you want to buy (steepness of trend, volatility, top 20 stocks?)
'''
|
[
"nathantung@Nathans-MacBook-Pro.local"
] |
nathantung@Nathans-MacBook-Pro.local
|
e56fe457c611b069400b8d96e73af45fe2389bdb
|
4474fb478f27f9caa5e4c9c465369230daf2c3ac
|
/project 2/task2.py
|
856b3e4454d95c47243c65920022d9319d6751e3
|
[] |
no_license
|
hariprasath95/computer_vision_image_processing
|
0a6a8f107028c498ba47de23d0e744eb9f9b34a4
|
3dbc2a82911afec1238206495507447997a63a23
|
refs/heads/master
| 2020-03-30T05:09:23.556754
| 2018-12-03T09:56:32
| 2018-12-03T09:56:32
| 150,783,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
UBIT = 'hparthas'
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
np.random.seed(sum([ord(c) for c in UBIT]))
# read image 1 and convert to BW
m1_clr = cv2.imread('data/tsucuba_left.png')
image1_bw= cv2.cvtColor(m1_clr,cv2.COLOR_BGR2GRAY)
# read image 2 and convert to BW
m2_clr = cv2.imread('data/tsucuba_right.png')
image2_bw = cv2.cvtColor(m2_clr,cv2.COLOR_BGR2GRAY)
# Extract Sift features and compute Descriptors for image 1 and image 2
sift = cv2.xfeatures2d.SIFT_create()
keypoints_mountain1 ,m1_des= sift.detectAndCompute(image1_bw,None)
image1_withkp = cv2.drawKeypoints(m1_clr,keypoints_mountain1,None)
cv2.imwrite('output/task2/task2_sift1.jpg',image1_withkp)
keypoints_mountain2,m2_des = sift.detectAndCompute(image2_bw,None)
image2_withkp = cv2.drawKeypoints(m2_clr,keypoints_mountain2,None)
cv2.imwrite('output/task2/task2_sift2.jpg',image2_withkp)
def drawlines(img1,img2,lines,pts1,pts2,color):
r,c = (cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)).shape
i = 0
for r,pt1,pt2 in zip(lines,pts1,pts2):
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color[i],1)
img1 = cv2.circle(img2,tuple(pt1),5,color[i],-1)
i = i+1
return img1
pts1 = []
pts2 = []
bf = cv2.BFMatcher()
matches = bf.knnMatch(m1_des,m2_des, k=2)
for i,(m,n) in enumerate(matches):
pts2.append(keypoints_mountain2[m.trainIdx].pt)
pts1.append(keypoints_mountain1[m.queryIdx].pt)
fundamentalmat, mask = cv2.findFundamentalMat(np.array(pts1),np.array(pts2),cv2.FM_RANSAC)
print(fundamentalmat)
pts1 = np.array(pts1)[mask.ravel() == 1]
pts2 = np.array(pts2)[mask.ravel() == 1]
random_points = np.random.randint(0, len(pts1), 10)
selected_point1,selected_point2 = list(), list()
for i, (p1, p2) in enumerate(zip(pts1, pts1)):
if i in random_points:
selected_point1.append(p1)
selected_point2.append(p2)
selected_point1 = np.float32(selected_point1)
selected_point2 = np.float32(selected_point2)
colors = []
for i in range(0,10):
colors.append(tuple(np.random.randint(0,255,3).tolist()))
img1_lines = cv2.computeCorrespondEpilines(selected_point1.reshape(-1, 1, 2), 2, fundamentalmat)
img1_lines = img1_lines.reshape(-1, 3)
img1_lines1 = drawlines(m1_clr,m2_clr,img1_lines,selected_point1,selected_point2,colors)
img2_lines = cv2.computeCorrespondEpilines(selected_point2.reshape(-1, 1, 2), 2, fundamentalmat)
img2_lines = img1_lines.reshape(-1, 3)
img2_lines1 = drawlines(m2_clr,m1_clr,img2_lines,selected_point2,selected_point1,colors)
stereo = cv2.StereoBM_create(96, blockSize=17)
stereo.setMinDisparity(16)
stereo.setDisp12MaxDiff(0)
stereo.setUniquenessRatio(10)
stereo.setSpeckleRange(32)
stereo.setSpeckleWindowSize(100)
disparity_map = stereo.compute(image1_bw, image2_bw).astype(np.float32) / 16.0
disp_map = (disparity_map - 16)/96
# printing out all the output
plt.imsave('output/task2/task2_disparity.jpg', disp_map, cmap=plt.cm.gray)
cv2.imwrite('output/task2/task2_epi_right.jpg', img2_lines1)
cv2.imwrite('output/task2/task2_epi_left.jpg', img1_lines1)
cv2.imwrite("output/task2/merged.jpg", np.hstack([img2_lines1, img1_lines1]))
|
[
"-"
] |
-
|
569d9b2f6f80fca2a538781490709f78f5bb87c9
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/not/sample_good810.py
|
2d825235e439c825c7db724e4c12e9a012a6fdd9
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674
| 2020-05-05T08:37:16
| 2020-05-05T08:37:16
| 138,386,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import difflib
import textwrap
import random
import readline
import datetime
nterms = 422
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 == True & 0 < 422:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count - -1
|
[
"barnsa@uni.coventry.ac.uk"
] |
barnsa@uni.coventry.ac.uk
|
a02f9b72024d40ecfced70d6044ca509b2e7e823
|
effce116340b7d937bd285e43b49e1ef83d56156
|
/data_files/673 Number of Longest Increasing Subsequence.py
|
324b988bc2e0f1fbbc1e5247543d6f56b9c932ca
|
[] |
no_license
|
DL2021Spring/CourseProject
|
a7c7ef57d69bc1b21e3303e737abb27bee3bd585
|
108cdd906e705e9d4d05640af32d34bfc8b124da
|
refs/heads/master
| 2023-04-11T18:52:30.562103
| 2021-05-18T09:59:59
| 2021-05-18T09:59:59
| 365,733,976
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
from typing import List
class LenCnt:
def __init__(self, l, c):
self.l = l
self.c = c
def __repr__(self):
return repr((self.l, self.c))
class Solution:
def findNumberOfLIS(self, A: List[int]) -> int:
if not A:
return 0
n = len(A)
F = [LenCnt(l=1, c=1) for _ in A]
mx = LenCnt(l=1, c=1)
for i in range(1, n):
for j in range(i):
if A[i] > A[j]:
if F[i].l < F[j].l + 1:
F[i].l = F[j].l + 1
F[i].c = F[j].c
elif F[i].l == F[j].l + 1:
F[i].c += F[j].c
if F[i].l > mx.l:
mx.l = F[i].l
mx.c = F[i].c
elif F[i].l == mx.l:
mx.c += F[i].c
return mx.c
if __name__ == "__main__":
assert Solution().findNumberOfLIS([1,1,1,2,2,2,3,3,3]) == 27
assert Solution().findNumberOfLIS([1, 3, 5, 4, 7]) == 2
assert Solution().findNumberOfLIS([2, 2, 2, 2, 2]) == 5
|
[
"1042448815@qq.com"
] |
1042448815@qq.com
|
c71847414fb17baa6000c236edad0ccc41ceef33
|
3fdad7e4cf4725e90354a674eddea4ec34f2344c
|
/myia/operations/macro_dtype.py
|
3a8c6fd0f5874ab38ee713d174cf5ba6a72c6c5f
|
[
"MIT"
] |
permissive
|
zangmunger/myia
|
1f2e9045af62da5a5d832eed0436de7c5813cd99
|
0aa38aa3c43648ee408dc031352ba442f6bed59f
|
refs/heads/master
| 2020-12-13T04:10:28.154027
| 2020-01-15T20:33:05
| 2020-01-15T20:33:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
"""Implementation of the 'dtype' macro."""
from ..lib import AbstractArray, Constant, macro
@macro
async def dtype(info, arr: AbstractArray):
"""Macro implementation for 'dtype'."""
return Constant((await arr.get()).element)
__operation_defaults__ = {
'name': 'dtype',
'registered_name': 'dtype',
'mapping': dtype,
'python_implementation': None,
}
|
[
"abergeron@gmail.com"
] |
abergeron@gmail.com
|
6674b616ab97613bb411dc42120f9e008a75d530
|
23ed45d816476bc2c73a0e0b0d6bf96c713c979d
|
/bearsong.py
|
45b321d8c2af89b38a3356ca648e9b1100f6e379
|
[] |
no_license
|
jason12360/zixue
|
e5fd0c6cd0ba8d4c72420360697ad23a5479615d
|
acc696732bb770f6fc0f0a8d4d076305ae39a9a6
|
refs/heads/master
| 2020-03-17T20:22:02.306897
| 2018-06-01T09:46:13
| 2018-06-01T09:46:13
| 133,906,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
word = 'bottles'
for beer_num in range(99,0,-1):
print(beer_num,word,'of beer on the wall.')
print(beer_num,word,'of beer.')
print('Take it down.')
print('Pass it around.')
if beer_num == 1:
print('No more bottle of beer on the wall')
else:
new_num = beer_num - 1
if new_num == 1:
word = 'bottle'
print(new_num,word,'of beer on the wall.')
|
[
"370828117@qq.com"
] |
370828117@qq.com
|
0c1bef08943f239f67d9037534080ced61668cfd
|
f6e83bc298b24bfec278683341b2629388b22e6c
|
/scripts/check_db_integrity.py
|
3a994897b46425ba6bb41e00d88a664abd1712c1
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
noaOrMlnx/sonic-utilities
|
8d8ee86a9c258b4a5f37af69359ce100c29ad99c
|
9881f3edaa136233456408190367a09e53386376
|
refs/heads/master
| 2022-08-17T23:15:57.577454
| 2022-05-18T21:49:32
| 2022-05-18T21:49:32
| 225,886,772
| 1
| 0
|
NOASSERTION
| 2022-07-19T08:49:40
| 2019-12-04T14:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
#!/usr/bin/env python3
"""
This is to verify if Database has critical tables present before warmboot can proceed.
If warmboot is allowed with missing critical tables, it can lead to issues in going
down path or during the recovery path. This test detects such issues before proceeding.
The verification procedure here uses JSON schemas to verify the DB entities.
In future, to verify new tables or their content, just the schema modification is needed.
No modification may be needed to the integrity check logic.
"""
import os, sys
import json, jsonschema
import syslog
import subprocess
import traceback
DB_SCHEMA = {
"COUNTERS_DB":
{
"$schema": "http://json-schema.org/draft-06/schema",
"type": "object",
"title": "Schema for COUNTERS DB's entities",
"required": ["COUNTERS_PORT_NAME_MAP"],
"properties": {
"COUNTERS_PORT_NAME_MAP": {"$id": "#/properties/COUNTERS_PORT_NAME_MAP", "type": "object"}
}
}
}
def main():
if not DB_SCHEMA:
return 0
for db_name, schema in DB_SCHEMA.items():
db_dump_file = "/tmp/{}.json".format(db_name)
dump_db_cmd = "sonic-db-dump -n 'COUNTERS_DB' -y > {}".format(db_dump_file)
p = subprocess.Popen(dump_db_cmd, shell=True, text=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(_, err) = p.communicate()
rc = p.wait()
if rc != 0:
print("Failed to dump db {}. Return code: {} with err: {}".format(db_name, rc, err))
try:
with open(db_dump_file) as fp:
db_dump_data = json.load(fp)
except ValueError as err:
syslog.syslog(syslog.LOG_DEBUG, "DB json file is not a valid json file. " +\
"Error: {}".format(str(err)))
return 1
# What: Validate if critical tables and entries are present in DB.
# Why: This is needed to avoid warmbooting with a bad DB; which can
# potentially trigger failures in the reboot recovery path.
# How: Validate DB against a schema which defines required tables.
try:
jsonschema.validate(instance=db_dump_data, schema=schema)
except jsonschema.exceptions.ValidationError as err:
syslog.syslog(syslog.LOG_ERR, "Database is missing tables/entries needed for reboot procedure. " +\
"DB integrity check failed with:\n{}".format(str(err.message)))
return 1
syslog.syslog(syslog.LOG_DEBUG, "Database integrity checks passed.")
return 0
if __name__ == '__main__':
res = 0
try:
res = main()
except KeyboardInterrupt:
syslog.syslog(syslog.LOG_NOTICE, "SIGINT received. Quitting")
res = 1
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc()))
res = 2
finally:
syslog.closelog()
try:
sys.exit(res)
except SystemExit:
os._exit(res)
|
[
"noreply@github.com"
] |
noaOrMlnx.noreply@github.com
|
b93802b90b96587bbc604e99f8afd807224026b6
|
d752046f2a056ca1b26323d431dc0a02153fe071
|
/corphub_app/views.py
|
81be1004dd1aa6fa1e8611c3b38e9714b8818496
|
[] |
no_license
|
calixo888/corphub
|
2cecac4a116ce49df64428da2f602cc00c7ed2d6
|
bc8e811b0edef18a906595e93c3ef8abf2198fca
|
refs/heads/master
| 2020-07-06T07:41:49.775105
| 2019-08-18T00:24:22
| 2019-08-18T00:24:22
| 202,943,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from corphub_app import forms
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import re
# Create your views here.
links = []
agent = UserAgent()
header = {'user-agent': agent.chrome}
query = ""
def index(request):
global links
global query
if request.method == "POST":
form = forms.SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['search']
links = []
queries = []
queries.append(query)
queries.append("\"{}\"".format(query))
for new_query in queries:
links = search_web(links, new_query, False)
links = search_web(links, new_query, True)
else:
form = forms.SearchForm()
query = ""
midpoint = len(links) // 2
return render(request, "corphub_app/index.html", context={"form": form, "links1": links[:20], "links2": links[20:40]})
def search_web(links, query, news):
if news:
page = requests.get("https://news.google.com/search?q=" + query + "&hl=en-US&gl=US&ceid=US%3Aen", headers=header)
soup = BeautifulSoup(page.content)
for i in soup.find_all('a', href=True):
if str(i['href']).startswith("./articles/"):
link = "https://news.google.com" + i['href'][1:]
links.append(link)
else:
page = requests.get("https://www.google.dz/search?q=see")
soup = BeautifulSoup(page.content)
for link in soup.find_all("a",href=re.compile("(?<=/url\?q=)(htt.*://.*)")):
new_link = re.split(":(?=http)",link["href"].replace("/url?q=",""))
links.append(new_link[0])
return list(set(links))
def viewall(request):
global query
links = []
queries = []
queries.append(query)
# queries.append(query + " news")
# queries.append(query + " speculations")
# queries.append(query + " stock")
# queries.append(query + " startup")
# queries.append(query + " development")
# queries.append(query + " founder")
# queries.append(query + " funding")
# queries.append(query + " products")
# queries.append(query + " market")
# queries.append(query + " evaluation")
# queries.append(query + " launches")
# queries.append("\"{}\"".format(query))
# queries.append("\"{} CEO\"".format(query))
for new_query in queries:
links = search_web(links, new_query, False)
links = search_web(links, new_query, True)
midpoint = len(links) // 2
return render(request, "corphub_app/viewall.html", context={"links1": links[:midpoint], "links2": links[midpoint:-1]})
|
[
"calix.huang1@gmail.com"
] |
calix.huang1@gmail.com
|
1767d3910fac27679191cb881124e7753f02b9dc
|
d29a5ce285083043a37cb0da2abb5a3045e05551
|
/reviewboard/admin/tests/test_related_user_widget.py
|
81f98b7213a9063a1f659f54219a18d85eaa4dc9
|
[
"MIT"
] |
permissive
|
wen501271303/reviewboard
|
83fa35123b851a5b42e3c2a3eb44f477a3da6198
|
a3b548437deb703792b805cf80f80313c7dd7f8a
|
refs/heads/master
| 2020-06-12T06:16:28.241753
| 2019-06-24T22:25:09
| 2019-06-24T22:25:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,736
|
py
|
"""Unit tests for reviewboard.admin.form_widgets.RelatedUserWidget."""
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from reviewboard.admin.form_widgets import RelatedUserWidget
from reviewboard.testing.testcase import TestCase
class TestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget."""
my_multiselect_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget())
class LocalSiteTestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget.
The RelatedUserWidget is defined to have a local_site_name.
"""
my_multiselect_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget(local_site_name='supertest'))
class SingleValueTestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget.
The RelatedUserWidget is defined as setting multivalued to False.
"""
my_select_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget(multivalued=False))
class RelatedUserWidgetTests(TestCase):
"""Unit tests for RelatedUserWidget."""
fixtures = ['test_users']
def test_render_empty(self):
"""Testing RelatedUserWidget.render with no initial data"""
my_form = TestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[],
{'id': 'default-users'})
self.assertHTMLEqual(
"""<input id="default-users" name="Default users" type="hidden" />
<script>
$(function() {
var view = new RB.RelatedUserSelectorView({
$input: $('#default\\u002Dusers'),
initialOptions: [],
useAvatars: true,
multivalued: true
}).render();
});
</script>""",
html)
def test_render_with_data(self):
"""Testing RelatedUserWidget.render with initial data"""
my_form = TestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[1, 2, 3],
{'id': 'default-users'})
self.assertHTMLEqual(
"""<input id="default-users" name="Default users"
type="hidden" value="1,2,3" />
<script>
$(function() {
var view = new RB.RelatedUserSelectorView({
$input: $('#default\\u002Dusers'),
initialOptions: [{"avatarURL": "https://secure.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\\u0026d=mm",
"fullname": "Admin User",
"id": 1,
"username": "admin"},
{"avatarURL": "https://secure.gravatar.com/avatar/b0f1ae4342591db2695fb11313114b3e?s=40\\u0026d=mm",
"fullname": "Doc Dwarf",
"id": 2,
"username": "doc"},
{"avatarURL": "https://secure.gravatar.com/avatar/1a0098e6600792ea4f714aa205bf3f2b?s=40\\u0026d=mm",
"fullname": "Dopey Dwarf",
"id": 3,
"username": "dopey"}],
useAvatars: true,
multivalued: true
}).render();
});
</script>""",
html)
def test_render_with_local_site(self):
"""Testing RelatedUserWidget.render with a local site defined"""
my_form = LocalSiteTestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[],
{'id': 'default-users'})
self.assertIn("localSitePrefix: 's/supertest/',", html)
def test_value_from_datadict(self):
"""Testing RelatedUserWidget.value_from_datadict"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{'people': ['1', '2']},
{},
'people'))
self.assertEqual(value, ['1', '2'])
def test_value_from_datadict_single_value(self):
"""Testing RelatedUserWidget.value_from_datadict with a single value"""
my_form = SingleValueTestForm()
value = (
my_form.fields['my_select_field']
.widget
.value_from_datadict(
{'people': ['1']},
{},
'people'))
self.assertEqual(value, ['1'])
def test_value_from_datadict_with_no_data(self):
"""Testing RelatedUserWidget.value_from_datadict with no data"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{'people': []},
{},
'people'))
self.assertEqual(value, [])
def test_value_from_datadict_with_missing_data(self):
"""Testing RelatedUserWidget.value_from_datadict with missing data"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{},
{},
'people'))
self.assertIsNone(value)
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
62ae74f067f9e799dea7e452ee0644d0e64f3f79
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_091/ch25_2020_09_30_19_23_04_594122.py
|
56f82a63d1ac642611703c6c1b15d4481a550f7c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import math
v=int(input('Digite o valor da velocidade da jaca: '))
m=math.degrees(input('Digite em graus o ângulo de lançamento: '))
d=((v**2)*math.sin(math.degrees(2*m)))/9.8
if d<98:
print('Muito perto')
elif d>=98 and d<=102:
print('Acertou')
elif d>102:
print('Muito longe')
|
[
"you@example.com"
] |
you@example.com
|
7595dee2388e7d0424519ce001bd1a177e831d2c
|
de707c94c91f554d549e604737b72e6c86eb0755
|
/supervised_learning/0x12-transformer_apps/1-dataset.py
|
ecbc295eff78c8f60161c3dbab97b078db9527a8
|
[] |
no_license
|
ejonakodra/holbertonschool-machine_learning-1
|
885cf89c1737573228071e4dc8e26304f393bc30
|
8834b201ca84937365e4dcc0fac978656cdf5293
|
refs/heads/main
| 2023-07-10T09:11:01.298863
| 2021-08-11T03:43:59
| 2021-08-11T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
#!/usr/bin/env python3
"""
Defines class Dataset that loads and preps a dataset for machine translation
"""
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class Dataset:
"""
Loads and preps a dataset for machine translation
class constructor:
def __init__(self)
public instance attributes:
data_train:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset train split, loaded as_supervided
data_valid:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset validate split, loaded as_supervided
tokenizer_pt:
the Portuguese tokenizer created from the training set
tokenizer_en:
the English tokenizer created from the training set
instance method:
def tokenize_dataset(self, data):
that creates sub-word tokenizers for our dataset
def encode(self, pt, en):
that encodes a translation into tokens
"""
def __init__(self):
"""
Class constructor
Sets the public instance attributes:
data_train:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset train split, loaded as_supervided
data_valid:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset validate split, loaded as_supervided
tokenizer_pt:
the Portuguese tokenizer created from the training set
tokenizer_en:
the English tokenizer created from the training set
"""
self.data_train = tfds.load("ted_hrlr_translate/pt_to_en",
split="train",
as_supervised=True)
self.data_valid = tfds.load("ted_hrlr_translate/pt_to_en",
split="validation",
as_supervised=True)
self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(
self.data_train)
def tokenize_dataset(self, data):
"""
Creates sub_word tokenizers for our dataset
parameters:
data [tf.data.Dataset]:
dataset to use whose examples are formatted as tuple (pt, en)
pt [tf.Tensor]:
contains the Portuguese sentence
en [tf.Tensor]:
contains the corresponding English sentence
returns:
tokenizer_pt, tokenizer_en:
tokenizer_pt: the Portuguese tokenizer
tokenizer_en: the English tokenizer
"""
SubwordTextEncoder = tfds.deprecated.text.SubwordTextEncoder
tokenizer_pt = SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in data),
target_vocab_size=(2 ** 15))
tokenizer_en = SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in data),
target_vocab_size=(2 ** 15))
return tokenizer_pt, tokenizer_en
def encode(self, pt, en):
"""
Encodes a translation into tokens
parameters:
pt [tf.Tensor]:
contains the Portuguese sentence
en [tf.Tensor]:
contains the corresponding English sentence
returns:
pt_tokens, en_tokens:
pt_tokens [np.ndarray]: the Portuguese tokens
en_tokens [np.ndarray]: the English tokens
"""
pt_start_index = self.tokenizer_pt.vocab_size
pt_end_index = pt_start_index + 1
en_start_index = self.tokenizer_en.vocab_size
en_end_index = en_start_index + 1
pt_tokens = [pt_start_index] + self.tokenizer_pt.encode(
pt.numpy()) + [pt_end_index]
en_tokens = [en_start_index] + self.tokenizer_en.encode(
en.numpy()) + [en_end_index]
return pt_tokens, en_tokens
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
349a7c36c49b2122284b4e8861c63a455bcb3fb4
|
e77b92df446f0afed18a923846944b5fd3596bf9
|
/Inflearn_algo/section5_StackQueueHash/pro_8.py
|
4b75091911002a18320692cf54624df1ef6ac406
|
[] |
no_license
|
sds1vrk/Algo_Study
|
e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e
|
fbbc21bb06bb5dc08927b899ddc20e6cde9f0319
|
refs/heads/main
| 2023-06-27T05:49:15.351644
| 2021-08-01T12:43:06
| 2021-08-01T12:43:06
| 356,512,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
# 단어 찾기
# 시에 쓰이지 않는 단어 찾기
import sys
# sys.stdin=open("3190.txt","r")
n=int(input())
node=[]
content=[]
for i in range(n):
node.append(input())
for j in range(n-1):
content.append(input())
# 오름차순으로 정렬 ==> 무조건 답은 1개이기 때문에
node.sort()
content.sort()
# print(node)
# print(content)
result=""
for i in range(len(content)):
if content[i]!=node[i]:
# print(content[i])
result=node[i]
break
if result=="none":
result=content[-1]
print(result)
|
[
"51287886+sds1vrk@users.noreply.github.com"
] |
51287886+sds1vrk@users.noreply.github.com
|
98ff009947702d0d5bfca3498b6c22479e18e62e
|
e07ba3eeea2d9a3ce44bdc6fb26386db5b72d8fc
|
/ijvine_ebay/ijvine_ebay_base/wizard/imports/__init__.py
|
c8511c6ba816f058973a66f4a88db0a32fef9ec2
|
[] |
no_license
|
tosink/ab
|
8e4b931214eb333e141fd5c6512ba956e5cde3d4
|
1c410562edce9be367ad6cab7ac3370353e395c8
|
refs/heads/master
| 2023-06-14T10:34:21.120996
| 2021-07-15T20:09:14
| 2021-07-15T20:09:14
| 275,185,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2021-Present IjVine Corporation (<https://ijvine.com/>)
##############################################################################
from . import import_operation
from . import import_attribute
from . import import_attribute_value
from . import import_category
from . import import_order
from . import import_partner
from . import import_template
from . import import_product
|
[
"komolafetosin@gmail.com"
] |
komolafetosin@gmail.com
|
16433d05f74a4f2012471c58b28b8c8e80c34dbd
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/src/transformers/models/poolformer/__init__.py
|
904dd02ac05522a70491dedf6e4862494d96cd6c
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...file_utils import _LazyModule, is_torch_available, is_vision_available
_import_structure = {
"configuration_poolformer": ["POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig"],
}
if is_vision_available():
_import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"]
if is_torch_available():
_import_structure["modeling_poolformer"] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig
if is_vision_available():
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
if is_torch_available():
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
23297565cf5f63a35de1cce9992af035d53c2a35
|
21e177a4d828f4e0a003e9424c4952dbc0b47d29
|
/testlints/test_lint_ext_ian_space_dns_name.py
|
f149b7266693b0ec79697e0bc1d1d61d1bd555a3
|
[] |
no_license
|
846468230/Plint
|
1071277a55144bb3185347a58dd9787562fc0538
|
c7e7ca27e5d04bbaa4e7ad71d8e86ec5c9388987
|
refs/heads/master
| 2020-05-15T12:11:22.358000
| 2019-04-19T11:46:05
| 2019-04-19T11:46:05
| 182,255,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
import sys
sys.path.append("..")
from lints import base
from lints import lint_ext_ian_space_dns_name
import unittest
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
class TestIANEmptyDNS(unittest.TestCase):
'''test lint_ext_ian_space_dns_name.py'''
def test_IANEmptyDNS(self):
certPath ='..\\testCerts\\IANEmptyDNS.pem'
lint_ext_ian_space_dns_name.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["e_ext_ian_space_dns_name"].Execute(cert)
self.assertEqual(base.LintStatus.Error,out.Status)
def test_IANNotEmptyDNS(self):
certPath ='..\\testCerts\\SANNoEntries.pem'
lint_ext_ian_space_dns_name.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["e_ext_ian_space_dns_name"].Execute(cert)
self.assertEqual(base.LintStatus.Pass,out.Status)
if __name__=="__main__":
unittest.main(verbosity=2)
|
[
"846468230@qq.com"
] |
846468230@qq.com
|
f3a4f9d862a4e6d05ac0f4a9d2af4620e88d4183
|
a9cd70686c362d946f40ed4314f6cf871a0149aa
|
/appsflyer_processor.py
|
04ad2789d92c543bdc2c728fa6e1e744b0b7c473
|
[
"MIT"
] |
permissive
|
lxzero/bot_appsflyer
|
73ec3f33784f4fadd2d60416fddf28098a8dea26
|
e0e7c0439e7448e5645c262151c7d35fd7295886
|
refs/heads/main
| 2023-03-19T16:01:47.367603
| 2020-10-21T17:45:50
| 2020-10-21T17:45:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
import pandas as pd
import numpy as np
from datetime import date
from pathlib import Path
from typing import Optional, Dict
class AppsFlyerProcessor:
source_directory_path: Path
platform_directory_map: Dict[str, str]
processed_data: Optional[pd.DataFrame]=None
def __init__(self, source_directory_path: Path, platform_directory_map: Dict[str, str]):
self.source_directory_path = source_directory_path
self.platform_directory_map = platform_directory_map
def process(self):
processed_data = pd.DataFrame()
for platform, app_id in self.platform_directory_map.items():
files_path = self.source_directory_path / app_id
for path in files_path.glob('*.csv'):
file_name = path.absolute()
df = pd.read_csv(file_name)
day_list = [
x
for x in df.columns
if x not in ('Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id', 'Users',
'Cost', 'Average eCPI', 'Users')
]
df_final = pd.DataFrame()
for i in day_list:
event_day = i.split(' ')[-1]
if event_day == 'partial':
event_day = i.split(' ')[-3]
df_temp = df[['Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id']]
# Ensure Campaign Id can be read as a string
df_temp['Campaign Id'] = df_temp['Campaign Id'].astype(str)
df_temp['Campaign Id'] = '"' + df_temp['Campaign Id'] + '"'
df_temp['event_day'] = event_day
df_temp['cohort_revenue'] = df[[i]]
df_temp.cohort_revenue = df_temp.cohort_revenue.apply(lambda s: float(s.split('/')[0]) / float(s.split('/')[1]) if isinstance(s, str) and '/' in s else s)
df_temp['platform'] = platform
df_temp['install'] = df[['Users']]
df_final = df_temp.append(df_final, sort=True)
processed_data = processed_data.append(df_final, sort=True)
self.processed_data = processed_data
def process_old(self):
today = date.today()
file_name = input('Please enter file name: ')
platform = ''
if file_name.find('ios') != -1: platform = 'ios'
elif file_name.find('android') != -1: platform = 'android'
else: platform = 'error'
df = pd.read_csv('{}.csv'.format(file_name))
day_list = [x for x in df.columns if x not in ('Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id', 'Users',
'Cost', 'Average eCPI','Users')]
df_final = pd.DataFrame()
for i in day_list:
event_day = i.split(' ')[-1]
df_temp = df[['Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id']]
# Ensure Campaign Id can be read as a string
df_temp['Campaign Id'] = df_temp['Campaign Id'].astype(str)
df_temp['Campaign Id'] = '"' + df_temp['Campaign Id'] + '"'
df_temp['event_day'] = event_day
df_temp['cohort_revenue'] = df[[i]]
df_temp['platform'] = platform
df_temp['install'] = df[['Users']]
df_final = df_temp.append(df_final, sort = True)
df_final.to_csv('AF Total Revenue Data Lot - {}.csv'.format(today), index=False)
print('Exported CSV')
|
[
"leif@leifmeyer.io"
] |
leif@leifmeyer.io
|
0a82b035a5f5e69b90154c800f8c8daa9dde3af8
|
6371acdb640e62e4e6addac2ba1aa70002a8c1b1
|
/Algorithms/pySINDy/env/lib/python3.6/site-packages/matplotlib/backends/_gtk3_compat.py
|
e0ac33c8d3433fbe37af268ab3bbd11ec424236c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
M-Vause/SEED
|
263307152ebac1e4f49cd81dcd5207ecbdf51139
|
cda94a02a5ef47a1e9a885d330eef2821301ebed
|
refs/heads/master
| 2022-12-13T20:11:58.893994
| 2020-04-27T16:10:09
| 2020-04-27T16:10:09
| 252,790,026
| 3
| 3
|
MIT
| 2022-12-08T01:52:05
| 2020-04-03T16:55:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
"""
GObject compatibility loader; supports ``gi`` and ``pgi``.
The binding selection rules are as follows:
- if ``gi`` has already been imported, use it; else
- if ``pgi`` has already been imported, use it; else
- if ``gi`` can be imported, use it; else
- if ``pgi`` can be imported, use it; else
- error out.
Thus, to force usage of PGI when both bindings are installed, import it first.
"""
import importlib
import sys
if "gi" in sys.modules:
import gi
elif "pgi" in sys.modules:
import pgi as gi
else:
try:
import gi
except ImportError:
try:
import pgi as gi
except ImportError:
raise ImportError("The GTK3 backends require PyGObject or pgi")
from .backend_cairo import cairo # noqa
# The following combinations are allowed:
# gi + pycairo
# gi + cairocffi
# pgi + cairocffi
# (pgi doesn't work with pycairo)
# We always try to import cairocffi first so if a check below fails it means
# that cairocffi was unavailable to start with.
if gi.__name__ == "pgi" and cairo.__name__ == "cairo":
raise ImportError("pgi and pycairo are not compatible")
if gi.__name__ == "pgi" and gi.version_info < (0, 0, 11, 2):
raise ImportError("The GTK3 backends are incompatible with pgi<0.0.11.2")
gi.require_version("Gtk", "3.0")
globals().update(
{name:
importlib.import_module("{}.repository.{}".format(gi.__name__, name))
for name in ["GLib", "GObject", "Gtk", "Gdk"]})
|
[
"58262117+M-Vause@users.noreply.github.com"
] |
58262117+M-Vause@users.noreply.github.com
|
27b11c1cfa45069236e8505d414d2d41fd14cbba
|
00946ddaec6fc10781a5cd4c6242c4674e599c90
|
/TwoPointers/986. Interval List Intersections.py
|
cdbf19243dd41d55282757efc2ebb683634791d9
|
[] |
no_license
|
XihangJ/leetcode
|
618f15c1fb57a57499924145afaa93be0dfebc4c
|
f7d215ef4780d88b91d2478b75ae09aed0e257f1
|
refs/heads/main
| 2023-08-22T00:59:55.239744
| 2021-10-26T05:21:58
| 2021-10-26T05:21:58
| 375,885,476
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
'''
You are given two lists of closed intervals, firstList and secondList, where firstList[i] = [starti, endi] and secondList[j] = [startj, endj]. Each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
A closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b.
The intersection of two closed intervals is a set of real numbers that are either empty or represented as a closed interval. For example, the intersection of [1, 3] and [2, 4] is [2, 3].
'''
class Solution:
# method 1. 2 pointers. O(m + n), S(1)
def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:
if not firstList or not secondList: return []
res = []
i1 = 0
i2 = 0
while i1 < len(firstList) and i2 < len(secondList):
first = firstList[i1]
second = secondList[i2]
left, right = max(first[0], second[0]), min(first[1], second[1])
if left <= right: res.append([left, right])
if first[1] < second[1]:
i1 += 1
elif first[1] > second[1]:
i2 += 1
else:
i1 += 1
i2 += 1
return res
|
[
"noreply@github.com"
] |
XihangJ.noreply@github.com
|
10cc915e429d025238de2714b821afd172faa197
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/ABC/A/065_a.py
|
9045ea98ef9fe34b57a5544b42250b279866012e
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """4 3 6"""
output = """safe"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """6 5 1"""
output = """delicious"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """3 7 12"""
output = """dangerous"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
def resolve():
x,a,b = map(int, input().split())
if b <= a:
print("delicious")
elif b <= (a+x):
print("safe")
else:
print("dangerous")
|
[
"kanai@wide.ad.jp"
] |
kanai@wide.ad.jp
|
849d6da1c93aa71164cd222145fb72163c6366c0
|
66358f0897dd92882344a9ec87adff2003c9bc76
|
/leetcode/501~600/501. Find Mode in Binary Search Tree.py
|
32f12787db36506d9437248100b324015ec7da2e
|
[] |
no_license
|
Parkyes90/algo
|
973c5f84ed1cae41bb963a5838b835473c8dc984
|
86490aad1774631ad947bdf12818e9ddba8a8ed0
|
refs/heads/master
| 2023-04-06T23:17:08.372040
| 2023-03-30T10:18:11
| 2023-03-30T10:18:11
| 244,273,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findMode(self, root: TreeNode) -> List[int]:
if not root:
return []
values = []
values_map = {}
nodes = [root]
ret = []
while nodes:
node = nodes.pop(0)
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
if isinstance(node.val, int):
values.append(node.val)
if not values:
return []
for value in values:
values_map[value] = values_map.get(value, 0) + 1
maximum = max(values_map.values())
for key, value in values_map.items():
if maximum == value:
ret.append(key)
return ret
if __name__ == "__main__":
r = TreeNode(0)
s = Solution()
answer = s.findMode(r)
print(answer)
|
[
"parkyes90@gmail.com"
] |
parkyes90@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.