blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b90cc6344c7868d92b0bac983f668cc7aad38c4c
|
56e469a1bfd29004fa258a54668dfbbc4459663d
|
/python3-pandas-tutorial/src/lesson6.py
|
2f6229fcc0269b0b8e6b2bdd47c4b9d40ff8c905
|
[] |
no_license
|
wind86/learning
|
bfce4a6795b58b27d0148b878299cacfe96aa26f
|
4449ba0eed0a8f803a2bb9fbd663faf43148f03a
|
refs/heads/master
| 2020-04-05T23:28:40.082439
| 2017-11-04T11:36:40
| 2017-11-04T11:36:40
| 83,236,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
'''
Created on May 15, 2017
Joining and Merging Dataframes - p.6 Data Analysis with Python and Pandas Tutorial
based on https://www.youtube.com/watch?v=XMjSGGej9y8&index=6&list=PLQVvvaa0QuDc-3szzjeP6N6b0aDrrKyL-
@author: ubuntu
'''
import pandas as pd
# df1 = pd.DataFrame({'HPI':[80,85,88,85],
# 'Int_rate':[2, 3, 2, 2],
# 'US_GDP_Thousands':[50, 55, 65, 55]},
# index = [2001, 2002, 2003, 2004])
#
# df2 = pd.DataFrame({'HPI':[80,85,88,85],
# 'Int_rate':[2, 3, 2, 2],
# 'US_GDP_Thousands':[50, 55, 65, 55]},
# index = [2005, 2006, 2007, 2008])
#
# df3 = pd.DataFrame({'HPI':[80,85,88,85],
# 'Int_rate':[2, 3, 2, 2],
# 'Low_tier_HPI':[50, 52, 50, 53]},
# index = [2001, 2002, 2003, 2004])
#print(pd.merge(df1,df3, on='HPI'))
#print(pd.merge(df1,df2, on=['HPI','Int_rate']))
# df4 = pd.merge(df1,df3, on='HPI')
# df4.set_index('HPI', inplace=True)
# print(df4)
# df1.set_index('HPI', inplace=True)
# df3.set_index('HPI', inplace=True)
#
# joined = df1.join(df3)
# print(joined)
df1 = pd.DataFrame({
'Int_rate':[2, 3, 2, 2],
'US_GDP_Thousands':[50, 55, 65, 55],
'Year':[2001, 2002, 2003, 2004]
})
df3 = pd.DataFrame({
'Unemployment':[7, 8, 9, 6],
'Low_tier_HPI':[50, 52, 50, 53],
'Year':[2001, 2003, 2004, 2005]})
# merged = pd.merge(df1,df3, on='Year')
# print(merged)
# merged = pd.merge(df1,df3, on='Year')
# merged.set_index('Year', inplace=True)
# print(merged)
# merged = pd.merge(df1,df3, on='Year', how='left')
# merged.set_index('Year', inplace=True)
# print(merged)
# merged = pd.merge(df1,df3, on='Year', how='right')
# merged.set_index('Year', inplace=True)
# print(merged)
# merged = pd.merge(df1,df3, on='Year', how='outer')
# merged.set_index('Year', inplace=True)
# print(merged)
# merged = pd.merge(df1,df3, on='Year', how='inner')
# merged.set_index('Year', inplace=True)
# print(merged)
df1.set_index('Year', inplace=True)
df3.set_index('Year', inplace=True)
joined = df1.join(df3, how="outer")
print(joined)
|
[
"wind86@meta.ua"
] |
wind86@meta.ua
|
c582ec9b4a70e9179e20a14245487ba928f81ead
|
ba0cea0d05962e8a84016a80e3b9c9c358aa64c8
|
/trading/TradingModel.py
|
1458d04546c0e16e201695d145ce969e96281167
|
[] |
no_license
|
acjones212/tudorials
|
48b7445dc832127021495bd5061b5e79588dc1bb
|
7998da771e4623697b1b73c4a43c3e84fe67322f
|
refs/heads/master
| 2020-06-30T06:13:03.055008
| 2019-02-17T04:00:43
| 2019-02-17T04:00:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
import pandas as pd
import requests
import json
import plotly.graph_objs as go
from plotly.offline import plot
from pyti.smoothed_moving_average import smoothed_moving_average as sma
class TradingModel:
def __init__(self, symbol):
self.symbol = symbol
self.df = self.getData()
def getData(self):
# define URL
base = 'https://api.binance.com'
endpoint = '/api/v1/klines'
params = '?&symbol='+self.symbol+'&interval=1h'
url = base + endpoint + params
# download data
data = requests.get(url)
dictionary = json.loads(data.text)
# put in dataframe and clean-up
df = pd.DataFrame.from_dict(dictionary)
df = df.drop(range(6, 12), axis=1)
# rename columns
col_names = ['time', 'open', 'high', 'low', 'close', 'volume']
df.columns = col_names
# transform values from strings to floats
for col in col_names:
df[col] = df[col].astype(float)
# add the moving averages
df['fast_sma'] = sma(df['close'].tolist(), 10)
df['slow_sma'] = sma(df['close'].tolist(), 30)
return df
def strategy(self):
'''If Price is 3% below Slow Moving Average, then Buy
Put selling order for 2% above buying price'''
df = self.df
buy_signals = []
for i in range(1, len(df['close'])):
if df['slow_sma'][i] > df['low'][i] and (df['slow_sma'][i] - df['low'][i]) > 0.03 * df['low'][i]:
buy_signals.append([df['time'][i], df['low'][i]])
self.plotData(buy_signals = buy_signals)
def plotData(self, buy_signals = False):
df = self.df
# plot candlestick chart
candle = go.Candlestick(
x = df['time'],
open = df['open'],
close = df['close'],
high = df['high'],
low = df['low'],
name = "Candlesticks")
# plot MAs
fsma = go.Scatter(
x = df['time'],
y = df['fast_sma'],
name = "Fast SMA",
line = dict(color = ('rgba(102, 207, 255, 50)')))
ssma = go.Scatter(
x = df['time'],
y = df['slow_sma'],
name = "Slow SMA",
line = dict(color = ('rgba(255, 207, 102, 50)')))
data = [candle, ssma, fsma]
if buy_signals:
buys = go.Scatter(
x = [item[0] for item in buy_signals],
y = [item[1] for item in buy_signals],
name = "Buy Signals",
mode = "markers",
)
sells = go.Scatter(
x = [item[0] for item in buy_signals],
y = [item[1]*1.02 for item in buy_signals],
name = "Sell Signals",
mode = "markers",
)
data = [candle, ssma, fsma, buys, sells]
# style and display
layout = go.Layout(title = self.symbol)
fig = go.Figure(data = data, layout = layout)
plot(fig, filename=self.symbol)
def Main():
symbol = "BTCUSDT"
model = TradingModel(symbol)
model.strategy()
if __name__ == '__main__':
Main()
|
[
"tudor.barbu7@gmail.com"
] |
tudor.barbu7@gmail.com
|
d5aecc30265afc5225221cf01160890cd3124607
|
e31c41ee342d0c07cb134f545a89cdc36b4577d2
|
/tests.py
|
6afd8736483e506604c6dd3f671e386c6f5e91f9
|
[] |
no_license
|
peiyanz/lab23_test-py-2
|
cc3072e301d2e405e438a2e958da87e32ec2f431
|
8bb5fc3ae37e713e6c46b7b2d6e8e9f66ac1c69a
|
refs/heads/master
| 2021-06-10T23:15:11.659163
| 2017-02-03T20:52:31
| 2017-02-03T20:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,660
|
py
|
import unittest
from party import app
from model import db, example_data, connect_to_db
class PartyTests(unittest.TestCase):
"""Tests for my party site."""
def setUp(self):
self.client = app.test_client()
app.config['TESTING'] = True
def tearDown(self):
"""Do at end of every test."""
# with self.client as c:
# with c.session_transaction() as sess:
# # sess['email'] = ''
# # sess['name'] = ''
# del sess['RSVP']
# # db.session.close()
# db.drop_all()
def test_homepage(self):
result = self.client.get("/")
self.assertIn("board games, rainbows, and ice cream sundaes", result.data)
def test_no_rsvp_yet(self):
# FIXME: Add a test to show we see the RSVP form, but NOT the
# party details
result = self.client.get("/", follow_redirects=True)
self.assertIn("I'm having an after-party! I'd love for you to come!", result.data)
self.assertNotIn("123 Magic Unicorn Way", result.data)
# print "FIXME"
def test_rsvp(self):
result = self.client.post("/rsvp",
data={"name": "Jane",
"email": "jane@jane.com"},
follow_redirects=True)
self.assertNotIn("Please RSVP", result.data)
self.assertIn("123 Magic Unicorn Way", result.data)
# FIXME: Once we RSVP, we should see the party details, but
# not the RSVP form
class PartyTestsDatabase(unittest.TestCase):
"""Flask tests that use the database."""
def setUp(self):
"""Stuff to do before every test."""
self.client = app.test_client()
app.config['TESTING'] = True
with self.client.session_transaction() as sess:
sess['RSVP'] = True
# self.client.session_transaction()['RSVP'] = True
# Connect to test database (uncomment when testing database)
connect_to_db(app, "postgresql:///testdb")
# Create tables and add sample data (uncomment when testing database)
db.create_all()
example_data()
def tearDown(self):
"""Do at end of every test."""
# (uncomment when testing database)
db.session.close()
db.drop_all()
def test_games(self):
#FIXME: test that the games page displays the game from example_data()
result = self.client.get("/games", follow_redirects=True)
self.assertIn("new game", result.data)
self.assertNotIn("whatever", result.data)
if __name__ == "__main__":
unittest.main()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
01d887cb06b9f0c3e3d6f268aad5bd2cabab5e3b
|
3b604fe8f03f25859991cdab37804bcda51a4f18
|
/dublyou/apps/competitions/migrations/0019_auto_20170208_1052.py
|
0943f1c9608da03e38b8e3377f5dc03b26507e4e
|
[] |
no_license
|
dublyou/matchup-games
|
e6238cbca7c30c6d4b4ddd161b84dfd5cc1bbacd
|
07b2db2e7d52ac6590ab55a1a05e6076d8c9d680
|
refs/heads/master
| 2020-03-11T11:10:10.506719
| 2018-04-17T20:41:30
| 2018-04-17T20:41:30
| 129,956,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,036
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-08 16:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profile', '0012_auto_20170130_1013'),
('competitions', '0018_auto_20170206_2245'),
]
operations = [
migrations.AddField(
model_name='boxstat',
name='added_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='profile.PlayerProfile'),
preserve_default=False,
),
migrations.AddField(
model_name='boxstat',
name='verified',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='matchupcompetitor',
name='place',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='matchupcompetitor',
name='matchup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mc_set', to='competitions.Matchup'),
),
migrations.AlterField(
model_name='statmember',
name='child_type',
field=models.IntegerField(choices=[(0, 'Competitor'), (1, 'PlayerProfile'), (2, 'Team'), (3, 'LeaguePlayer')]),
),
migrations.AlterField(
model_name='statmember',
name='parent_type',
field=models.IntegerField(choices=[(0, 'Competition'), (1, 'Matchup'), (2, 'Series'), (3, 'Tournament'), (4, 'Season'), (5, 'Olympics'), (6, 'MatchupCompetitor')]),
),
migrations.AlterUniqueTogether(
name='boxstat',
unique_together=set([('stat_member', 'stat_name', 'added_by')]),
),
migrations.AlterUniqueTogether(
name='matchupcompetitor',
unique_together=set([('matchup', 'competitor')]),
),
]
|
[
"jgriff@Calebs-MacBook-Pro.local"
] |
jgriff@Calebs-MacBook-Pro.local
|
13e77167020da45609f0cd75bedd565b03cd3509
|
426b80c3c966d3b3734d2929bf9a9854311d4867
|
/arl-python/arl/imaging/facets.py
|
4ed2aee58e31c050ea54c6b6917712f9230bf269
|
[
"Apache-2.0"
] |
permissive
|
Song655/arlo
|
7dcbfd5ff304a525a749fc8f22726a568e92459b
|
cee1613d4a2b2e1263da9d5b4b9930eef569509c
|
refs/heads/master
| 2020-04-19T21:42:53.614248
| 2019-02-19T04:00:53
| 2019-02-19T04:00:53
| 168,449,766
| 0
| 0
|
Apache-2.0
| 2019-01-31T02:31:29
| 2019-01-31T02:31:29
| null |
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
""" The wide field imaging equation can be approximated by partitioning the image plane into small regions, treating each separately and then glueing the resulting partitions into one image. We call this image plane partitioning image plane faceting.
.. math::
V(u,v,w) = \\sum_{i,j} \\frac{1}{\\sqrt{1- l_{i,j}^2- m_{i,j}^2}} e^{-2 \\pi j (ul_{i,j}+um_{i,j} + w(\\sqrt{
1-l_{i,j}^2-m_{i,j}^2}-1))} \\int I(\\Delta l, \\Delta m) e^{-2 \\pi j (u\\Delta l_{i,j}+u \\Delta m_{i,j})} dl dm
"""
import numpy
from arl.data.data_models import Visibility, Image
from arl.imaging.base import predict_2d_base, invert_2d_base
from arl.image.iterators import image_raster_iter
from arl.imaging.iterated import predict_with_raster_iterator, invert_with_raster_iterator
import logging
log = logging.getLogger(__name__)
def predict_facets(vis: Visibility, model: Image, predict_function=predict_2d_base, **kwargs) -> Visibility:
""" Predict using image facets, calling specified predict function
:param vis: Visibility to be predicted
:param model: model image
:param predict_function: Function to be used for prediction (allows nesting) (default predict_2d)
:return: resulting visibility (in place works)
"""
log.info("predict_facets: Predicting by image facets")
return predict_with_raster_iterator(vis, model, image_iterator= image_raster_iter, predict_function=predict_function,
**kwargs)
def invert_facets(vis: Visibility, im: Image, dopsf=False, normalize=True, invert_function=invert_2d_base, **kwargs) \
-> (Image, numpy.ndarray):
""" Invert using image partitions, calling specified Invert function
:param vis: Visibility to be inverted
:param im: image template (not changed)
:param dopsf: Make the psf instead of the dirty image
:param normalize: Normalize by the sum of weights (True)
:param invert_function: Function to be used for inverting (allows nesting) (default invert_2d)
:return: resulting image[nchan, npol, ny, nx], sum of weights[nchan, npol]
"""
log.info("invert_facets: Inverting by image facets")
return invert_with_raster_iterator(vis, im, normalize=normalize, image_iterator= image_raster_iter, dopsf=dopsf,
invert_function=invert_function, **kwargs)
|
[
"chrisyoung_yrk@163.com"
] |
chrisyoung_yrk@163.com
|
e798a6e4262bf870dc3d3512db94aba356ee540c
|
d786fe979eef20b0538f3e9fecedc8d021b55a17
|
/cubeRoot-example/cubeRoot.py
|
e7462b2ba7866a205a468f891cff86d86463e48c
|
[] |
no_license
|
Waylonryan/mis3640
|
962ae36080de08d3ada9971c4529808cb718c072
|
5182a8fbe12a2e0116f61d9e541311537909ed07
|
refs/heads/master
| 2020-04-04T06:57:10.342733
| 2018-10-30T21:19:56
| 2018-10-30T21:19:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
x = int(input('Enter an integer: '))
ans = 0
while ans**3 < x:
ans = ans + 1
if ans**3 != x:
print(str(x) + ' is not a perfect cube')
else:
print('Cube root of ' + str(x) + ' is ' + str(ans))
# to-do
# make it work for negative integers.
|
[
"zli@babson.edu"
] |
zli@babson.edu
|
cdfbedf06247d69bc7d9bd3e665f2d5b323587d5
|
a8f3204139d7f68c23bd8411b8594899ba792e79
|
/sequana/scripts/vcf_filter.py
|
55781d1ce859c85b9ef780f5ca4131234c5cb118
|
[
"BSD-3-Clause"
] |
permissive
|
switt4/sequana
|
874189c869ccc07a592c0a6a3c77999adcabe025
|
7bd4f32607d62bebfd709628abc25bfda504761b
|
refs/heads/master
| 2023-02-13T13:06:26.021426
| 2020-12-01T14:49:02
| 2020-12-01T14:49:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,617
|
py
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Extract head of a zipped or unzipped FastQ file"""
from sequana.vcf_filter import VCF
import sys
import argparse
from sequana.scripts.tools import SequanaOptions
from sequana import logger
from easydev.console import purple
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
epilog = purple("""
----
AUTHORS: Thomas Cokelaer
Documentation: http://sequana.readthedocs.io
Issues: http://github.com/sequana/sequana
""")
class Options(argparse.ArgumentParser, SequanaOptions):
def __init__(self, prog="sequana_vcf_filter"):
usage = """%s Only for VCF using mpileup version 4.1 for now\n""" % prog
usage += """usage2: %s vcf_filter""" % prog
usage += """Examples:
sequana_vcf_filter --input test.vcf --quality 40
--filter "AF1>0.95&AF1<0.05"
--filter "MQ<30"
You should look into the VCF file to figure out the valid TAGs. Then, you
can apply various filters.
A filter should be interpreted as :
''filter out variants that agree with the filter''
For example::
--filter "DP<30"
means ''remove SNPs with DP below 30''. We accept those types of comparison:
DP<30
DP<=30
DP>30
DP>=30
For some tags, you want to keep values within or outside a range of
values. You can then use the & and | characters::
DP<30|>60 # to keep only values in the ranges [0-30] and [60-infinite]
or
DP>30&<60 # to keep only values in the range [30-60]
Some tags stores a list of values. For instance DP4 contains 4 values.
To filter the value at position 1, use e.g.::
DP4[0]<0.5
you can use the same convention for the range as above::
DP4[0]>0.05&<0.95
you may also need something like:
sum(DP4[2]+DP4[3]) <2
Note that you must use quotes to surround the filter values.
"""
super(Options, self).__init__(usage=usage, prog=prog,
epilog=epilog,
formatter_class=CustomFormatter)
self.add_argument("--input", dest='input_filename', type=str,
required=True, help="input fastq gzipped or not")
self.add_argument("--quality", dest="quality",
type=int, default=-1, help="filter sites below this quality")
self.add_argument("--apply-indel-filter", dest="apply_indel_filter",
action="store_true", help="remove INDELs")
self.add_argument("--apply-dp4-filter", dest="apply_dp4_filter",
action="store_true",
help="apply DP4 filters. see online doc, vcf_filter section")
self.add_argument("--apply-af1-filter", dest="apply_af1_filter",
action="store_true",
help="apply AF1 filters. see online doc, vcf_filter section")
self.add_argument("--minimum-af1", dest="minimum_af1",
type=float, default=0.95, help="default to 0.95")
self.add_argument("--minimum-ratio", dest="minimum_ratio",
type=float, default=0.75, help="default to 0.75")
self.add_argument("--minimum-depth", dest="minimum_depth",
type=float, default=4, help="default to 4")
self.add_argument("--minimum_depth-strand", dest="minimum_depth_strand",
type=float, default=2, help="default to 2")
self.add_argument("--filter", dest="filter", action="append",
nargs=1, type=str, default=[],
help="Provide as many filters as you want. See example above ")
self.add_argument("--output", dest="output_filename",
default="remaining.vcf", type=str)
self.add_argument("--output-filtered", dest="output_filtered_filename",
default="filtered.vcf", type=str)
self.add_argument('--level', dest="level",
default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
self.add_version(self)
def main(args=None):
if args is None:
args = sys.argv[:]
print("Welcome to sequana_vcf_filter")
user_options = Options(prog="sequana_vcf_filter")
if "--version" in args:
import sequana
print(sequana.version)
sys.exit()
elif len(args) == 1 or "--help" in args:
user_options.parse_args(["prog", "--help"])
elif len(args) == 2:
class SimpleOpt():
pass
options = SimpleOpt()
options.input_filename = args[1]
else:
options = user_options.parse_args(args[1:])
# set the level
logger.level = options.level
vcf = VCF(options.input_filename)
try:
vcf.vcf.filter_dict['QUAL'] = options.quality
except:
vcf.vcf.filter_dict = {}
vcf.vcf.filter_dict['QUAL'] = options.quality
vcf.vcf.apply_indel_filter = options.apply_indel_filter
vcf.vcf.apply_dp4_filter = options.apply_dp4_filter
vcf.vcf.apply_af1_filter = options.apply_af1_filter
vcf.vcf.dp4_minimum_depth = options.minimum_depth
vcf.vcf.dp4_minimum_depth_strand = options.minimum_depth_strand
vcf.vcf.dp4_minimum_ratio = options.minimum_ratio
vcf.vcf.minimum_af1 = options.minimum_af1
vcf.vcf.filter_dict['INFO'] = {}
vcf.vcf.filter_dict['QUAL'] = options.quality
for this in options.filter:
this = this[0]
signs = [">", "<", ">=", "<="]
for sign in signs:
if sign in this:
key, value = this.split(sign, 1)
key = key.strip()
value = sign.strip() + value.strip()
vcf.vcf.filter_dict['INFO'][key] = value
break
logger.info(vcf.vcf.filter_dict)
res = vcf.vcf.filter_vcf(options.output_filename,
output_filtered=options.output_filtered_filename)
print()
#print(res)
return res
if __name__ == "__main__":
import sys
main(sys.argv)
|
[
"cokelaer@gmail.com"
] |
cokelaer@gmail.com
|
bb7deba4e4d085d42507355673c51bbf2f4a36de
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-batchai/azure/mgmt/batchai/models/jobs_list_by_resource_group_options_py3.py
|
befaa301bc381da6dd25ca329eb37d1a97dd8402
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobsListByResourceGroupOptions(Model):
"""Additional parameters for list_by_resource_group operation.
:param filter: An OData $filter clause.. Used to filter results that are
returned in the GET respnose.
:type filter: str
:param select: An OData $select clause. Used to select the properties to
be returned in the GET respnose.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 files can be returned. Default value: 1000 .
:type max_results: int
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
}
def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, **kwargs) -> None:
super(JobsListByResourceGroupOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.max_results = max_results
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
71244379010bf6cc5b3ea22e05ebe42ed3e01507
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Apps/GUI Calculator/App.py
|
567e8dad37e33764bda12b5b45f2a6eb961c6b50
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4c907f593448d1cbfbc11948da94ab550ae20556e795a8b0fe1fb45fc9d68ac7
size 2810
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
74de3618e035512437ee47362e4808c6dc173b16
|
929a816fc299959d0f8eb0dd51d064be2abd6b78
|
/LeetCode/easy - Binary Tree/226. Invert Binary Tree/.ipynb_checkpoints/recursion-checkpoint.py
|
b1490441905a8905dc5b84ba3b028b65a00c088d
|
[
"MIT"
] |
permissive
|
vincent507cpu/Comprehensive-Algorithm-Solution
|
27940da7bc0343921930a2eafbd649da93a5395d
|
04e01e49622457f09af2e1133954f043c0c92cb9
|
refs/heads/master
| 2023-07-20T07:12:15.590313
| 2021-08-23T23:42:17
| 2021-08-23T23:42:17
| 258,644,691
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return root
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
|
[
"vincent507cpu@gmail.com"
] |
vincent507cpu@gmail.com
|
7873de2b4692d3ff9b1e48c09365bc144c04070e
|
53ba0b6f172abcade631ae1f52852c400302559e
|
/python_developer_tools/cv/bases/attentions/SimAM-master/mmdetection/configs/faster_rcnn/faster_rcnn_r101simam_fpn_1x_coco.py
|
4f303a3883c2031e6b27de79add6502d76aa9e6a
|
[
"Apache-2.0"
] |
permissive
|
sssssshf/python_developer_tools
|
f97c64ee0aa0a7e9d31d173192805771c83abb7f
|
44d2e67a2e2495a12d6b32da12c76cf0010ac7ea
|
refs/heads/main
| 2023-08-19T02:44:53.536200
| 2021-10-13T02:10:19
| 2021-10-13T02:10:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
_base_ = './faster_rcnn_r50simam_fpn_1x_coco.py'
model = dict(pretrained='checkpoints/simam-net/resnet101.pth.tar', backbone=dict(depth=101))
|
[
"zengxh@chint.com"
] |
zengxh@chint.com
|
0e4a97c2bc6c7e6c2cfcd660c9c7457f9b9f3029
|
59fcc364f9a07aa7047824c66645ee406ea8aed4
|
/tagger.py
|
ae7402a3284fee0e57f476bcedde92de0efcffb7
|
[] |
no_license
|
vaastav/VA102
|
b8f1db3802320c9fd1c83788c0d0b027072439bf
|
78137946a0325155f7a7cc26b894e79f66c97a00
|
refs/heads/master
| 2016-08-08T23:47:33.815551
| 2016-03-27T02:22:00
| 2016-03-28T01:36:00
| 47,371,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
import nltk
import csv
keywordTags = ['ADJ','ADV','NOUN','NUM','VERB']
def isKeywordTag(tag):
return tag in keywordTags
def find_keywords(review):
text = word_tokenize(review.decode("utf8"))
tagged_review = pos_tag(text)
simplified_tagged_review = [(word,map_tag('en-ptb','universal',tag)) for word, tag in tagged_review]
keywords = []
for word,tag in simplified_tagged_review:
if isKeywordTag(tag):
keywords += [word]
return keywords
def readfile(filename):
inf = open(filename,'rU')
filereader = csv.DictReader(inf,delimiter=',')
reviews = [row["Cleaned_Review"] for row in filereader]
keywords = []
for review in reviews:
lok = [word.encode("utf8") for word in find_keywords(review)]
keywords += [lok]
inf.close()
with open(filename,'rb') as fin, open('keyworded_items.csv','wb') as fout:
reader = csv.reader(fin,lineterminator='\n')
writer = csv.writer(fout,lineterminator='\n')
writer.writerow(next(reader) + ["Keywords"])
for row,val in zip(reader,keywords):
writer.writerow(row + [' '.join(val)])
def main():
readfile("cleaned_items.csv")
if __name__ == '__main__':
main()
|
[
"vaastav.anand05@gmail.com"
] |
vaastav.anand05@gmail.com
|
94f8efdcedc3b47468b0d18a9b274757ad8ba2c1
|
e638e9fda0e672fa9a414515d0c05a24ab55ad38
|
/GroupAnagrams.py
|
233b7f261dff5493b1a06428b1859c3ffd2a8664
|
[] |
no_license
|
zjuzpz/Algorithms
|
8d1c7d50429aa5540eb817dc5495a20fc3f11125
|
2df1a58aa9474f2ecec2ee7c45ebf12466181391
|
refs/heads/master
| 2021-01-21T05:55:48.768728
| 2020-08-04T22:44:08
| 2020-08-04T22:44:08
| 44,586,024
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
"""
49. Group Anagrams
Given an array of strings, group anagrams together.
For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"],
Return:
[
["ate", "eat","tea"],
["nat","tan"],
["bat"]
]
Note:
For the return value, each inner list's elements must follow the lexicographic order.
All inputs will be in lower-case.
"""
# O(nlogm) m is the max size of groups
# O(n)
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
res, n = [], []
lookup = {}
for i in range(len(strs)):
if strs[i] == "":
n.append("")
else:
s = "".join(sorted(strs[i]))
if s not in lookup:
lookup[s] = [strs[i]]
else:
lookup[s].append(strs[i])
for key in lookup:
lookup[key].sort()
res.append(lookup[key])
if n:
res.append(n)
return res
if __name__ == "__main__":
strs = ["ac", "", "", "ca"]
print(Solution().groupAnagrams(strs))
|
[
"zjuzpz@gmail.com"
] |
zjuzpz@gmail.com
|
37f5c70ea8454936494a75f8f31143cebca074ab
|
b9e295b2ee933d134d0526445eac9ac022e39dde
|
/contentcuration/contentcuration/utils/sentry.py
|
aaa3dc10a3b7863e4b023b7159f5b0e8a9509249
|
[
"MIT"
] |
permissive
|
ivanistheone/studio
|
b40fb9ab70e8f23e87013985470a8e44df30fe61
|
47605c51a0b806e580013b2b733f5bf68688aa59
|
refs/heads/develop
| 2021-06-04T01:35:05.589507
| 2020-12-09T00:03:40
| 2020-12-09T00:03:40
| 88,676,886
| 0
| 2
|
MIT
| 2019-04-10T04:43:10
| 2017-04-18T22:44:48
|
Python
|
UTF-8
|
Python
| false
| false
| 210
|
py
|
from django.conf import settings
def report_exception(exception=None):
if getattr(settings, "SENTRY_ACTIVE", False):
from sentry_sdk import capture_exception
capture_exception(exception)
|
[
"richard@learningequality.org"
] |
richard@learningequality.org
|
12b14afbdcc78dddb3e1859f8bb22e7a1a08fb43
|
2d4af29250dca8c72b74e190e74d92f1467120a0
|
/TaobaoSdk/Response/TradeContactGetResponse.py
|
fc57445cfc9a3544b5dffda3880e500cfa7b950f
|
[] |
no_license
|
maimiaolmc/TaobaoOpenPythonSDK
|
2c671be93c40cf487c0d7d644479ba7e1043004c
|
d349aa8ed6229ce6d76a09f279a0896a0f8075b3
|
refs/heads/master
| 2020-04-06T03:52:46.585927
| 2014-06-09T08:58:27
| 2014-06-09T08:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,132
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 通过交易号获取单笔订单的联系信息
# @author wuliang@maimiaotech.com
# @date 2013-09-22 16:52:43
# @version: 0.0.0
from datetime import datetime
import os
import sys
import time
_jsonEnode = None
try:
import demjson
_jsonEnode = demjson.encode
except Exception:
try:
import simplejson
except Exception:
try:
import json
except Exception:
raise Exception("Can not import any json library")
else:
_jsonEnode = json.dumps
else:
_jsonEnode = simplejson.dumps
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__parentPath = os.path.normpath(os.path.join(__getCurrentPath(), os.path.pardir))
if __parentPath not in sys.path:
sys.path.insert(0, __parentPath)
from Domain.TradeContact import TradeContact
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">Response: 通过交易号获取单笔订单的联系信息</SPAN>
# <UL>
# </UL>
class TradeContactGetResponse(object):
def __init__(self, kargs=dict()):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的返回信息,包含状态等</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">dict</SPAN>
# </LI>
# </UL>
self.responseStatus = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的响应内容</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.responseBody = None
self.code = None
self.msg = None
self.sub_code = None
self.sub_msg = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">单笔订单联系信息</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">TradeContact</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Object</SPAN>
# </LI>
# </UL>
self.contact = None
self.__init(kargs)
def isSuccess(self):
return self.code == None and self.sub_code == None
def _newInstance(self, name, value):
types = self._getPropertyType(name)
propertyType = types[0]
isArray = types[1]
if propertyType == bool:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
return value
elif propertyType == datetime:
format = "%Y-%m-%d %H:%M:%S"
if isArray:
if not value:
return []
return [datetime.strptime(x, format) for x in value[value.keys()[0]]]
else:
return datetime.strptime(value, format)
elif propertyType == str:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
#like taobao.simba.rpt.adgroupbase.get, response.rpt_adgroup_base_list is a json string,but will be decode into a list via python json lib
if not isinstance(value, basestring):
#the value should be a json string
return _jsonEnode(value)
return value
else:
if isArray:
if not value:
return []
return [propertyType(x) for x in value[value.keys()[0]]]
else:
return propertyType(value)
def _getPropertyType(self, name):
properties = {
"contact": "TradeContact",
}
levels = {
"contact": "Object",
}
nameType = properties[name]
pythonType = None
if nameType == "Number":
pythonType = int
elif nameType == "String":
pythonType = str
elif nameType == 'Boolean':
pythonType = bool
elif nameType == "Date":
pythonType = datetime
elif nameType == 'Field List':
pythonType == str
elif nameType == 'Price':
pythonType = float
elif nameType == 'byte[]':
pythonType = str
else:
pythonType = getattr(sys.modules["Domain.%s" % nameType], nameType)
# 是单个元素还是一个对象
level = levels[name]
if "Array" in level:
return (pythonType, True)
else:
return (pythonType, False)
def __init(self, kargs):
if kargs.has_key("contact"):
self.contact = self._newInstance("contact", kargs["contact"])
if kargs.has_key("code"):
self.code = kargs["code"]
if kargs.has_key("msg"):
self.msg = kargs["msg"]
if kargs.has_key("sub_code"):
self.sub_code = kargs["sub_code"]
if kargs.has_key("sub_msg"):
self.sub_msg = kargs["sub_msg"]
|
[
"chenke@maimiaotech.com"
] |
chenke@maimiaotech.com
|
bc2651b0aa33406897aec6872ca646ab3015a056
|
ebe6a5bf993c80c9d3d55e275c1b8a23c456c83b
|
/abcclassroom/notebook.py
|
aad12aa764d5faec3f06dfca2f379ae0eb6d50f2
|
[
"BSD-3-Clause"
] |
permissive
|
betatim/grading-workflow-experiments
|
fd5d2ec7ea23e460d26644c242034ff11c78a59a
|
b38742547a43f376724fee1e04e540688b1f5019
|
refs/heads/master
| 2020-03-23T22:15:47.125374
| 2018-12-20T23:04:37
| 2018-12-20T23:04:37
| 142,164,781
| 0
| 0
| null | 2018-07-24T13:47:45
| 2018-07-24T13:47:44
| null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
import ast
import os
import nbformat
import papermill as pm
from nbclean import NotebookCleaner
from .utils import chdir
try:
from IPython.core.inputsplitter import IPythonInputSplitter
except ImportError:
raise ImportError('IPython needs to be installed for notebook grading')
def split_notebook(notebook, student_path, autograder_path):
"""Split a master notebook into student and autograder notebooks"""
print('Processing', notebook)
_, nb_name = os.path.split(notebook)
base_name, extension = os.path.splitext(nb_name)
# create test files and notebook for the student
nb = NotebookCleaner(notebook)
nb.create_tests(tag='private',
oktest_path=base_name,
base_dir=autograder_path)
nb.create_tests(tag='public',
oktest_path=base_name,
base_dir=student_path)
text_replace_begin = '### BEGIN SOLUTION'
text_replace_end = '### END SOLUTION'
nb.replace_text(text_replace_begin, text_replace_end)
nb.save(os.path.join(student_path, nb_name))
# create test files for the autograder
nb = NotebookCleaner(notebook)
nb.create_tests(tag='private',
oktest_path=base_name,
base_dir=autograder_path)
nb.create_tests(tag='public',
oktest_path=base_name,
base_dir=autograder_path)
def find_check_definition(tree):
"""Walk an AST and check for definitions of a function called `check`
Return True if one is found, False otherwise.
"""
for stmt in ast.walk(tree):
if not isinstance(stmt, ast.FunctionDef):
continue
if stmt.name == 'check':
return True
return False
def find_check_assignment(tree):
"""Walk an AST and check for assignments to a variable called `check`
Return True if one is found, False otherwise.
"""
for stmt in ast.walk(tree):
if not isinstance(stmt, ast.Assign):
continue
# check id for tuple target
target_names = []
for target in stmt.targets:
if isinstance(target, tuple):
target_names += [t.id for t in target]
else:
target_names.append(target.id)
if 'check' in target_names:
return True
return False
def execute_notebook(nb_path):
"""Execute a notebook under grading conditions"""
graded_nb_path = os.path.splitext(nb_path)[0] + '-graded.ipynb'
nb_directory = os.path.split(nb_path)[0]
# read in input notebook and check the source for shenanigans
nb = nbformat.read(nb_path, as_version=4)
source = ""
for cell in nb.cells:
if cell.cell_type != "code":
continue
isp = IPythonInputSplitter(line_input_checker=False)
cell_source = isp.transform_cell(cell.source)
source += cell_source
tree = ast.parse(source)
# no points for you if you try and cheat
# XXX add a check for people importing a function called `check`
if find_check_assignment(tree) or find_check_definition(tree):
return
# run the notebook
with chdir(nb_directory):
pm.execute_notebook(nb_path, graded_nb_path)
graded_nb = nbformat.read(graded_nb_path, as_version=4)
return graded_nb
|
[
"betatim@gmail.com"
] |
betatim@gmail.com
|
8c4d5fd485c84b3512ba5be24c00b3af11158b39
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/costmanagement/azure-mgmt-costmanagement/generated_samples/generate_cost_details_report_by_subscription_and_time_period.py
|
fa959fac431751278f479b18f052ba906ad83b60
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.costmanagement import CostManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-costmanagement
# USAGE
python generate_cost_details_report_by_subscription_and_time_period.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CostManagementClient(
credential=DefaultAzureCredential(),
)
response = client.generate_cost_details_report.begin_create_operation(
scope="subscriptions/00000000-0000-0000-0000-000000000000",
parameters={"metric": "ActualCost", "timePeriod": {"end": "2020-03-15", "start": "2020-03-01"}},
).result()
print(response)
# x-ms-original-file: specification/cost-management/resource-manager/Microsoft.CostManagement/stable/2022-10-01/examples/GenerateCostDetailsReportBySubscriptionAndTimePeriod.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
c664c730762bad69858a72e5f797d0714ba2e6da
|
f52997ac7e1b41f34018c3a0028ced8638072b2b
|
/src/extended_search/backends/backend.py
|
aed89d730ad7c0b675eb42d9352b4874e2ed0828
|
[
"MIT"
] |
permissive
|
uktrade/digital-workspace-v2
|
49fae1fca819b625c6f6949fb5ce51b89fbcab96
|
7e328d0d55c9aa73be61f476823a743d96e792d0
|
refs/heads/main
| 2023-09-03T12:03:47.016608
| 2023-09-01T12:07:55
| 2023-09-01T12:07:55
| 232,302,840
| 6
| 0
|
MIT
| 2023-09-13T15:50:24
| 2020-01-07T10:41:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,967
|
py
|
from wagtail.search.backends.elasticsearch7 import (
Elasticsearch7SearchBackend,
Elasticsearch7SearchQueryCompiler,
)
from wagtail.search.index import SearchField
from wagtail.search.query import MATCH_NONE, Fuzzy, MatchAll, Phrase, PlainText
from extended_search.backends.query import Nested, OnlyFields
from extended_search.index import RelatedFields
class ExtendedSearchQueryCompiler(Elasticsearch7SearchQueryCompiler):
"""
Acting as a placeholder for upstream merges to Wagtail in a PR; this class
doesn't change any behaviour but instead assigns responsibility for
particular aspects to smaller methods to make it easier to override. In the
PR maybe worth referencing https://github.com/wagtail/wagtail/issues/5422
"""
# def __init__(self, *args, **kwargs):
# """
# This override doesn't do anything, it's just here as a reminder to
# modify the underlying class in this way when creating the upstream PR
# """
# super().__init__(*args, **kwargs)
# self.mapping = self.mapping_class(self.queryset.model)
# self.remapped_fields = self._remap_fields(self.fields)
def _remap_fields(self, fields):
"""
Convert field names into index column names
"""
if fields is None:
return None
remapped_fields = []
searchable_fields = {
f.field_name: f
for f in self.queryset.model.search_fields
if isinstance(f, SearchField) or isinstance(f, RelatedFields)
}
for field_name in fields:
if field_name in searchable_fields:
field_name = self.mapping.get_field_column_name(
searchable_fields[field_name]
)
else:
field_name_parts = field_name.split(".")
if (
len(field_name_parts) == 2
and field_name_parts[0] in searchable_fields
):
field_name = self.mapping.get_field_column_name(
searchable_fields[field_name_parts[0]]
)
field_name = f"{field_name}.{field_name_parts[1]}"
remapped_fields.append(field_name)
return remapped_fields
def _join_and_compile_queries(self, query, fields, boost=1.0):
"""
Handle a generalised situation of one or more queries that need
compilation and potentially joining as siblings. If more than one field
then compile a query for each field then combine with disjunction
max (or operator which takes the max score out of each of the
field queries)
"""
if len(fields) == 1:
return self._compile_query(query, fields[0], boost)
else:
field_queries = []
for field in fields:
field_queries.append(self._compile_query(query, field, boost))
return {"dis_max": {"queries": field_queries}}
def get_inner_query(self):
"""
This is a brittle override of the Elasticsearch7SearchQueryCompiler.
get_inner_query, acting as a standin for getting these changes merged
upstream. It exists in order to break out the _join_and_compile_queries
method
"""
if self.remapped_fields:
fields = self.remapped_fields
else:
fields = [self.mapping.all_field_name]
if len(fields) == 0:
# No fields. Return a query that'll match nothing
return {"bool": {"mustNot": {"match_all": {}}}}
# Handle MatchAll and PlainText separately as they were supported
# before "search query classes" was implemented and we'd like to
# keep the query the same as before
if isinstance(self.query, MatchAll):
return {"match_all": {}}
elif isinstance(self.query, PlainText):
return self._compile_plaintext_query(self.query, fields)
elif isinstance(self.query, Phrase):
return self._compile_phrase_query(self.query, fields)
elif isinstance(self.query, Fuzzy):
return self._compile_fuzzy_query(self.query, fields)
else:
return self._join_and_compile_queries(self.query, fields)
class OnlyFieldSearchQueryCompiler(ExtendedSearchQueryCompiler):
"""
Acting as a placeholder for upstream merges to Wagtail in a separate PR to
the ExtendedSearchQueryCompiler; this exists to support the new OnlyFields
SearchQuery
"""
def _compile_query(self, query, field, boost=1.0):
"""
Override the parent method to handle specifics of the OnlyFields
SearchQuery, and allow boosting of Fuzzy and Phrase queries
"""
if not isinstance(query, (Fuzzy, Phrase, OnlyFields, Nested)):
return super()._compile_query(query, field, boost)
# Overrides the existing functionality only to be able to pass Boost
# values to Fuzzy and Phrase types as well as PlainText
if isinstance(query, Fuzzy):
return self._compile_fuzzy_query(query, [field], boost)
elif isinstance(query, Phrase):
return self._compile_phrase_query(query, [field], boost)
# Handle Nested fields for RelatedFields on models
elif isinstance(query, Nested):
return self._compile_nested_query(query, [field], boost)
# Handle OnlyFields
remapped_fields = self._remap_fields(query.fields)
# Handle RelatedFields passing a list at this point
if isinstance(field, list) and len(field) == 1:
field = field[0]
if field == self.mapping.all_field_name:
# We are using the "_all_text" field proxy (i.e. the search()
# method was called without the fields kwarg), but now we want to
# limit the downstream fields compiled to those explicitly defined
# in the OnlyFields query
return self._join_and_compile_queries(
query.subquery, remapped_fields, boost
)
elif field in remapped_fields:
# Fields were defined explicitly upstream, and we are dealing with
# one that's in the OnlyFields filter
return self._compile_query(query.subquery, field, boost)
else:
# Exclude this field from any further downstream compilation: it
# was defined in the search() method but has been excluded from
# this part of the tree with an OnlyFields filter
return self._compile_query(MATCH_NONE, field, boost)
def _compile_fuzzy_query(self, query, fields, boost=1.0):
"""
Support boosting
"""
match_query = super()._compile_fuzzy_query(query, fields)
if boost != 1.0:
match_query["match"][fields[0]]["boost"] = boost
return match_query
def _compile_phrase_query(self, query, fields, boost=1.0):
"""
Support boosting
"""
match_query = super()._compile_phrase_query(query, fields)
if boost != 1.0:
if "multi_match" in match_query:
match_query["multi_match"]["boost"] = boost
else:
match_query["match_phrase"][fields[0]] = {
"query": match_query["match_phrase"][fields[0]],
"boost": boost,
}
return match_query
def _compile_nested_query(self, query, fields, boost=1.0):
"""
Add OS DSL elements to support Nested fields
"""
return {
"nested": {
"path": query.path,
"query": self._compile_query(query.subquery, fields, boost),
}
}
class CustomSearchBackend(Elasticsearch7SearchBackend):
query_compiler_class = OnlyFieldSearchQueryCompiler
SearchBackend = CustomSearchBackend
|
[
"noreply@github.com"
] |
uktrade.noreply@github.com
|
b6e028147abb5f242c4fbbc9df615e138999ea5a
|
bba618de189d579c1cc6026a94e0734dc8b89330
|
/data/data_cluster.py
|
13df72e75b0e45451decf75fcde1cc7b5f5b0011
|
[] |
no_license
|
Tetuwo181/KMeans
|
9f9ab5b04a6d1d730f8db5a40ab99284e82ec444
|
7fbf62665af79e58df5e8d5bc882ded59f39938a
|
refs/heads/master
| 2020-03-28T17:22:03.748501
| 2018-09-14T12:02:43
| 2018-09-14T12:02:43
| 148,782,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
import numpy as np
from typing import Union
from typing import Tuple
from typing import Optional
from typing import Callable
from typing import List
Position = Union[np.ndarray, Tuple]
class AbsData(object):
"""
データを格納するクラスのベース
"""
def __init__(self, position: Position):
"""
:param position: データの座標
"""
self.__position = np.array(position)
@property
def position(self):
return self.__position
class Data(AbsData):
"""
データを格納するクラス
"""
def __init__(self, position: Position, cluster: Optional[int] = None, cluster_real: Optional[int] = None):
"""
:param position: データの座標
:param cluster: データが所属すると判定されたクラスタ ミュータブル
:param cluster_real: オリジナルのデータに記されているクラスタ
"""
self.cluster = cluster
self.__cluster_real = cluster_real
super.__init__(position)
@property
def cluster_real(self):
return self.__cluster_real
class ClusterCentral(AbsData):
"""
クラスタの中心地を表すクラス
"""
def __init__(self, position: Position, cluster: int):
"""
:param position: クラスタの中心地
:param cluster: クラスタ
"""
self.__cluster = cluster
super.__init__(position)
@property
def cluster(self):
return self.__cluster
DataInput = Union[AbsData, np.ndarray]
FuncDistance = Callable[[DataInput, DataInput], float]
def convert_data(raw_data: DataInput)->np.ndarray:
if type(raw_data) is np.ndarray:
return raw_data
return raw_data.position
def get_distance(calc_distance: FuncDistance,
raw_data1: DataInput,
raw_data2: DataInput)->float:
"""
データや座標から距離を求める
:param calc_distance: 距離を求める関数
:param raw_data1: 1個目のデータ
:param raw_data2: 2個目のデータ
:return: 2点間の距離
"""
data1 = convert_data(raw_data1)
data2 = convert_data(raw_data2)
return calc_distance(data1, data2)
def init_calc_distance(calc_distance: FuncDistance)->FuncDistance:
"""
先に距離を求める関数だけ初期化する
:param calc_distance:距離を求める関数
:return: 2つのデータや座標から距離を求める関数
"""
return lambda data1, data2: get_distance(calc_distance, data1, data2)
|
[
"hamakaze181and189amarube@gmail.com"
] |
hamakaze181and189amarube@gmail.com
|
d0202a7b6d83f2e2b9581177ccc335588b29f1e4
|
7d1fec7f929c0d80707a8f46a5379abecb513ad8
|
/crab_horizon_movement.py
|
a3e02b8fbcc5d67816798a218ddb17d6fbd70beb
|
[] |
no_license
|
francescofilippini6/Sky_Coverage
|
4cacdc8f2963d16c61c315772f311e6cc1f12e63
|
963d0fe18332eb6d0093c36402371b5f74655e82
|
refs/heads/main
| 2023-03-11T19:25:49.347341
| 2021-02-26T08:40:43
| 2021-02-26T08:40:43
| 326,218,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
import math as m
import numpy as np
import csv
import matplotlib.pyplot as plt
import pandas as pd
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from astropy.coordinates import SkyCoord, EarthLocation
from astropy import coordinates as coord
from astropy.coordinates.tests.utils import randomly_sample_sphere
from astropy.time import Time
from astropy import units as u
import numpy as np
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
times = Time.now() + np.linspace(-5, 5, 300)*u.hour
lapalma = EarthLocation.from_geocentric(5327448.9957829, -1718665.73869569, 3051566.90295403, unit='m')
aa_frame = coord.AltAz(obstime=times[:, np.newaxis], location=lapalma)
#aa_coos = coos.transform_to(aa_frame)
obstime = Time('2010-01-01T20:00') + np.linspace(0, 6, 10000) * u.hour
location = EarthLocation(lon=-17.89 * u.deg, lat=28.76 * u.deg, height=2200 * u.m)
frame = AltAz(obstime=obstime, location=location)
crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s')
crab_altaz = crab.transform_to(frame)
print(len(crab_altaz))
plt.subplot(111, projection='aitoff')
plt.grid(True)
plt.scatter(crab_altaz.az.wrap_at('180d').radian, crab_altaz.alt.radian)
plt.savefig('crab_movement.png')
plt.show()
|
[
"francesco.filippini3@gmail.com"
] |
francesco.filippini3@gmail.com
|
6a342b5c99bf52551cdf9a41ee3935805787d9f3
|
c1a9da6e041c406edaf419338b46d497071a21f2
|
/todo/migrations/0003_alter_todo_is_completed.py
|
58f741a4001e33e16bf101448afed67c4baa85fc
|
[] |
no_license
|
alex1the1great/learn-todo
|
a0016b89cebb8c35aaac2e9872be7cee33f99bb8
|
e2faa1edaac7d450999c7aacb0f335bf40665e89
|
refs/heads/master
| 2023-04-20T21:47:23.012887
| 2021-05-05T13:08:51
| 2021-05-05T13:08:51
| 364,278,364
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 3.2.1 on 2021-05-05 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0002_alter_todo_is_completed'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='is_completed',
field=models.BooleanField(default=False),
),
]
|
[
"asimshrestha608@gmail.com"
] |
asimshrestha608@gmail.com
|
ef06ee1a907c60dea3b5c90465285f9435c942f6
|
2f5d5149c064dfd18c0456db4326eca8b3cdb843
|
/pqmf.py
|
871405affaee1a846645216bf7716d9611fd1e26
|
[
"MIT"
] |
permissive
|
BridgetteSong/multiband-hifigan
|
a201cebc29d8d7b0d166303e824aca9651134cff
|
718a8117211ceb735c8916cfae8662a68dad8876
|
refs/heads/master
| 2023-03-11T14:59:26.101235
| 2021-03-02T11:44:52
| 2021-03-02T11:44:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
import numpy as np
import torch
import torch.nn.functional as F
from scipy import signal as sig
# adapted from
# https://github.com/kan-bayashi/ParallelWaveGAN/tree/master/parallel_wavegan
class PQMF(torch.nn.Module):
def __init__(self, N=4, taps=62, cutoff=0.15, beta=9.0):
super(PQMF, self).__init__()
self.N = N
self.taps = taps
self.cutoff = cutoff
self.beta = beta
QMF = sig.firwin(taps + 1, cutoff, window=('kaiser', beta))
H = np.zeros((N, len(QMF)))
G = np.zeros((N, len(QMF)))
for k in range(N):
constant_factor = (2 * k + 1) * (np.pi /
(2 * N)) * (np.arange(taps + 1) -
((taps - 1) / 2)) # TODO: (taps - 1) -> taps
phase = (-1)**k * np.pi / 4
H[k] = 2 * QMF * np.cos(constant_factor + phase)
G[k] = 2 * QMF * np.cos(constant_factor - phase)
H = torch.from_numpy(H[:, None, :]).float()
G = torch.from_numpy(G[None, :, :]).float()
self.register_buffer("H", H)
self.register_buffer("G", G)
updown_filter = torch.zeros((N, N, N)).float()
for k in range(N):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.N = N
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def forward(self, x):
return self.analysis(x)
def analysis(self, x):
return F.conv1d(x, self.H, padding=self.taps // 2, stride=self.N)
def synthesis(self, x):
x = F.conv_transpose1d(x,
self.updown_filter * self.N,
stride=self.N)
x = F.conv1d(x, self.G, padding=self.taps // 2)
return x
|
[
"rishikksh20@gmail.com"
] |
rishikksh20@gmail.com
|
7a48086a1a8ce564a8b688393160f9619cc2d920
|
bfb6ccbcb2707bca5eb44f2b64c0084aa6561b5a
|
/docs/examples/textbook/stellar_minimal.py
|
0eb338ca801befefe4cf1b76f358451e7f1a3879
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
cgroeneveld/amuse
|
79c8ece558f484df4494609e95274cffd5c37c60
|
9684fd22ce8293b837d2c78f56948e3ec3d04032
|
refs/heads/master
| 2020-08-16T16:44:46.702465
| 2019-10-14T19:16:04
| 2019-10-14T19:16:04
| 215,526,071
| 0
| 0
|
Apache-2.0
| 2019-10-16T10:57:34
| 2019-10-16T10:57:34
| null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
"""
Minimal routine for running a stellar evolution code
"""
###BOOKLISTSTART###
from amuse.lab import *
def main(m, z, model_time):
stellar = MESA()
stellar.parameters.metallicity = z
stellar.particles.add_particle(Particle(mass=m))
initial_luminosity = stellar.particles.luminosity
dt = 1 | units.Myr
while stellar.model_time < model_time:
stellar.evolve_model(stellar.model_time+dt)
print "at T=", stellar.model_time.in_(units.Myr), \
"L(t=0)=", initial_luminosity, \
", L (t=", stellar.particles.age.in_(units.Myr), \
")=", stellar.particles.luminosity.in_(units.LSun), \
", m=", stellar.particles.mass.in_(units.MSun), \
", R=", stellar.particles.radius.in_(units.RSun)
stellar.stop()
###BOOKLISTSTOP###
###BOOKLISTSTART2###
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-m", unit=units.MSun,
dest="m", type="float", default=1.0|units.MSun,
help="stellar mass [%default]")
result.add_option("-t", unit=units.Myr,
dest="model_time", type="float",
default=4700.0|units.Myr,
help="end time of the simulation [%default]")
result.add_option("-z", dest="z", type="float",
default=0.02, help="metallicity [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
###BOOKLISTSTOP2###
|
[
"steven@rieder.nl"
] |
steven@rieder.nl
|
52fc3c13de1c7ab9d2f4226fda0f15568c18f21f
|
b7d01719eb6d5c504747674227df3410b22f75fa
|
/scripts/ibm/get_activation.py
|
aa337733f12853b6f6b01afe8d12ac631e6c42f4
|
[] |
no_license
|
lee212/FaaS-Evaluation
|
c16c0d5b16ae8ac5d45d6cf6638ba60e712094b1
|
38876242553d0bf36e262e91da244fd1bdd55121
|
refs/heads/master
| 2021-09-23T02:36:41.500656
| 2018-09-20T01:41:18
| 2018-09-20T01:41:18
| 103,572,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import sys
import json
import requests
import os
import argparse
url = 'https://openwhisk.ng.bluemix.net/api/v1/namespaces/_/activations/'
auth_string = os.environ['IBM_OPENWHISK_AUTH_STRING']
def argument_parser():
parser = argparse.ArgumentParser("IBM OpenWhisk Activation Results")
parser.add_argument("fname", help="filename to obtain activation ids")
args = parser.parse_args()
return args
def collect_activation_ids(fname):
with open(fname) as f:
r = json.load(f)
try:
actlist = r.keys()
except:
actlist = []
for i in r:
tmp = i['result'].keys()
actlist = actlist + list(tmp)
return actlist
def read_activation_through_rest(actlist):
actdict = {}
for i in actlist:
# curl -H 'Authorization: Basic
# NTNjNmY5ZDctM2JhYy00YjQ1LWI3N2ItNGVhMDMzYzg5YmUwOmNjTWxnaW5GU1VtZENuNGI0aWwxb0RaMVI2RlRNdm9QNUdtaUdlc3A3d25ucDR4QjdKQjZzUVpFQzBkTlZjclI='
# -L
# 'https://openwhisk.ng.bluemix.net/api/v1/namespaces/_/activations/2cd2e85819ba4a9592e85819ba5a957e'
headers = {'Authorization': auth_string}
rdata = requests.get(url + i, headers=headers)
rdict = rdata.json()
actdict[i] = rdict
return actdict
def to_file(fname, data):
with open(fname, "w") as f:
json.dump(data, f, indent=4)
if __name__ == "__main__":
args = argument_parser()
actids = collect_activation_ids(args.fname)
actdict = read_activation_through_rest(actids)
to_file("{}.activation".format(args.fname), actdict)
|
[
"hroe.lee@gmail.com"
] |
hroe.lee@gmail.com
|
3da489896f1d4b8481f999e4ab87ffaca902aa69
|
dfc2c18053b8e7576f88e7b2524d7ca3a8f47282
|
/ch09/session4/21.py
|
22f613217b0cbb380b591a0d5e7f7acd6804e7a8
|
[] |
no_license
|
Xoozi/tchomework
|
a6eed3bbf697ff12af8d42249ec58a139aed0c4c
|
627c98b0b652ef20fd93025a17341bba76fbfce6
|
refs/heads/master
| 2021-01-23T21:18:15.793703
| 2018-10-21T11:05:55
| 2018-10-21T11:05:55
| 57,583,655
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
#燃烧的箭
#例3中的火炬台直径12英尺. 用方程5和例3c, 求燃烧的箭通过边沿之间的水平距离需要多长时间
#箭在边沿上方时有多高
#由例3c v0 cos α = 90*32/sqrt(68*64)
#半程时间为t/2
#期间竖直方向位移为-g(t/2)²
v0h = 90.0*32.0/sqrt(68.0*64)
t = 12.0/v0h
g = 32.15
h = 74-g*(t/2.0)**2
print 't:%f, h:%f' % (t, h)
#xoozi 答案是通过时间1.9秒, 这绝对是错了, 我的答案合理
|
[
"wwfxtt@gmail.com"
] |
wwfxtt@gmail.com
|
8809f892e839ecb6296a61b91988e02177d6fd1e
|
ef90992dc00640f42ec615075a9b030b771f81e4
|
/python-machine-learning/ch04/ch04-6/mushroom-download.py
|
cc4bf516b5258e4e5b4c8be4642807012331b96c
|
[] |
no_license
|
korea7030/pythonwork
|
88f5e67b33e9143eb40f6c10311a29e08317b77e
|
70741acb0477c9348ad3f1ea07a183dda82a5402
|
refs/heads/master
| 2023-01-08T01:47:15.141471
| 2020-09-09T13:28:20
| 2020-09-09T13:28:20
| 54,378,053
| 0
| 0
| null | 2022-12-26T20:25:43
| 2016-03-21T10:00:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 523
|
py
|
# -*- coding: utf-8 -*-
import urllib.request as req
local = "mushroom.csv"
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data"
req.urlretrieve(url, local)
print("ok")
"""
첫번째 열 : 독의 유무(독: p / 식용 : e)
두번째 열 : 버섯머리모양(벨 : b / 혹 : k / 오목 : s/ 평평한 : f)
네번째 열 : 머리색(갈색 : n / 황갈색 : b / 연한 갈색 : c/ 회색 : g/ 녹색 : r/분홍색 : p/보라색 : u/ 붉은색 : c / 흰색 : w/노란색 : y)
"""
|
[
"korea7030@naver.com"
] |
korea7030@naver.com
|
17fec2fc6f5b0c5fd1cd55adb95430ecf92baa2a
|
9f6ca792f8ef4fac137ddab6b5af3ae4629759d8
|
/realize_bj_2941.py
|
18991a5526a58b5a875b70f7f4f0e667d86f1cdf
|
[] |
no_license
|
pjhq2/Baekjoon
|
d01374eca0fc0b4d68d2209fc57d83db3349c89c
|
921822f0cdaca1456f167d271c9efe84ddee2bd4
|
refs/heads/main
| 2023-08-11T11:48:30.364066
| 2021-09-22T12:43:07
| 2021-09-22T12:43:07
| 386,900,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
word = input()
cro = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z=']
result = len(word)
for c in cro:
result -= word.count(c)
print(result)
|
[
"pkonu7@gmail.com"
] |
pkonu7@gmail.com
|
c9aa4f532ea1ce1fd202c31db44912e2f536d887
|
b3e147ac438246d60644725fa93c16c9bae7fa7e
|
/Django Social Authentication/msg/communities/migrations/0001_initial.py
|
deadef4e706daa8a0473ca511f741db8917598ed
|
[] |
no_license
|
Ehsan-Molavi/teamtreehouse
|
c55180b3d8eac8c18c03f335056fae1088c769e4
|
cbe90b2eff0708e4c95a6909d7edec494ddd9615
|
refs/heads/master
| 2020-09-09T05:54:49.828487
| 2017-04-25T16:09:19
| 2017-04-25T16:09:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-18 21:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Community',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(blank=True, default='')),
],
options={
'verbose_name_plural': 'communities',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='CommunityMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.IntegerField(choices=[(0, 'banned'), (1, 'member'), (2, 'moderator'), (3, 'admin')], default=1)),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='members', to='communities.Community')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='communities', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='communitymember',
unique_together=set([('community', 'user')]),
),
]
|
[
"brian@ubuntu.ubuntu-domain"
] |
brian@ubuntu.ubuntu-domain
|
ad16a47079c9d48d939186dd69f791eb8776e562
|
eba3e4a3935d6422d1ed85aaf69337f5ba15fc74
|
/tg-build/WebApp/bbwi/bbwi/commands.py
|
bc1be16efe88e2d957500026b277c85035637373
|
[] |
no_license
|
arianepaola/tg2jython
|
2ae74250ca43b021323ef0951a9763712c2eb3d6
|
971b9c3eb8ca941d1797bb4b458f275bdca5a2cb
|
refs/heads/master
| 2021-01-21T12:07:48.815690
| 2009-03-27T02:38:11
| 2009-03-27T02:38:11
| 160,242
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
# -*- coding: utf-8 -*-
"""This module contains functions called from console script entry points."""
import sys
from os import getcwd
from os.path import dirname, exists, join
import pkg_resources
pkg_resources.require("TurboGears>=1.0.5")
pkg_resources.require("SQLObject>=0.7.1")
import cherrypy
import turbogears
cherrypy.lowercase_api = True
class ConfigurationError(Exception):
pass
def start():
"""Start the CherryPy application server."""
setupdir = dirname(dirname(__file__))
curdir = getcwd()
# First look on the command line for a desired config file,
# if it's not on the command line, then look for 'setup.py'
# in the current directory. If there, load configuration
# from a file called 'dev.cfg'. If it's not there, the project
# is probably installed and we'll look first for a file called
# 'prod.cfg' in the current directory and then for a default
# config file called 'default.cfg' packaged in the egg.
if len(sys.argv) > 1:
configfile = sys.argv[1]
elif exists(join(setupdir, "setup.py")):
configfile = join(setupdir, "dev.cfg")
elif exists(join(curdir, "prod.cfg")):
configfile = join(curdir, "prod.cfg")
else:
try:
configfile = pkg_resources.resource_filename(
pkg_resources.Requirement.parse("bbwi"),
"config/default.cfg")
except pkg_resources.DistributionNotFound:
raise ConfigurationError("Could not find default configuration.")
turbogears.update_config(configfile=configfile,
modulename="bbwi.config")
from bbwi.controllers import Root
turbogears.start_server(Root())
|
[
"arianepaola@gmail.com"
] |
arianepaola@gmail.com
|
7026e3a545655b602456d212833fb1d8827ea6eb
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/discord/message/message/tests/test__validate_reactions.py
|
9cefa7864a1e54b584a1deb4686712a2d416e78a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044
| 2023-08-20T13:09:03
| 2023-08-20T13:09:03
| 163,677,173
| 3
| 3
|
Apache-2.0
| 2019-12-18T03:46:12
| 2018-12-31T14:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import vampytest
from ....core import BUILTIN_EMOJIS
from ....emoji import ReactionMapping
from ....user import User
from ..fields import validate_reactions
def test__validate_reactions__0():
"""
Tests whether ``validate_reactions`` works as intended.
Case: passing.
"""
reactions = ReactionMapping()
for input_value, expected_output in (
(None, None),
(reactions, reactions),
):
output = validate_reactions(input_value)
vampytest.assert_is(output, expected_output)
def test__validate_reactions__1():
"""
Tests whether ``validate_reactions`` works as intended.
Case: `TypeError`.
"""
for input_value in (
12.6,
):
with vampytest.assert_raises(TypeError):
validate_reactions(input_value)
def test__validate_reactions__2():
"""
Tests whether ``validate_reactions`` works as intended.
Case: Successful conversion.
"""
emoji_1 = BUILTIN_EMOJIS['heart']
emoji_2 = BUILTIN_EMOJIS['x']
user_id_0 = 202305010021
user_id_1 = 202305010022
user_0 = User.precreate(user_id_0)
user_1 = User.precreate(user_id_1)
input_value = {
emoji_1: [user_0, user_1],
emoji_2: [user_1]
}
expected_output = ReactionMapping(input_value)
output = validate_reactions(input_value)
vampytest.assert_eq(output, expected_output)
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
eca9d2184d08829e57c6e53dc200435a62c27dca
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_131_l_4/replay_config.py
|
81dd578b4a0ae58eed76c481e6a8fe40e4d236b9
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606
| 2014-06-11T23:55:11
| 2014-06-11T23:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_131_l_4/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
pass_through_whitelisted_messages=True)
# Invariant check: 'None'
|
[
"cs@cs.berkeley.edu"
] |
cs@cs.berkeley.edu
|
13690714d2c28a995bb0fdc95c5e71261ffd971b
|
091a301b966f3141fc6020c754916ca1828216f9
|
/exercises/05_basic_scripts/task_5_1c.py
|
0f175b39c372c5c8c949a367a202937c462f389a
|
[] |
no_license
|
netproglogic/pyneng-examples-exercises-en
|
f9d6a9b04873fc79ef6d3362844fb6464715cd3d
|
299676a575f1d97922d8e60e8773ad9ea0301ce5
|
refs/heads/main
| 2023-06-28T23:12:02.153433
| 2021-07-27T02:38:51
| 2021-07-27T02:38:51
| 389,825,380
| 0
| 0
| null | 2021-07-27T02:30:05
| 2021-07-27T02:30:04
| null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
# -*- coding: utf-8 -*-
"""
Task 5.1c
Copy and modify the script from task 5.1b so that when you request a parameter
that is not in the device dictionary, the message 'There is no such parameter' is displayed.
The assignment applies only to the parameters of the devices, not to the devices themselves.
> Try typing a non-existent parameter, to see what the result will be. And then complete the task.
If an existing parameter is selected, print information about the corresponding parameter.
An example of script execution:
$ python task_5_1c.py
Enter device name: r1
Enter parameter name (ios, model, vendor, location, ip): ips
There is no such parameter
Restriction: You cannot modify the london_co dictionary.
All tasks must be completed using only the topics covered. That is, this task can be
solved without using the if condition.
"""
london_co = {
"r1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.1",
},
"r2": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2",
},
"sw1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True,
},
}
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
35a312ce72be62ae3e48ff98179b21d0e9debdf3
|
425db5a849281d333e68c26a26678e7c8ce11b66
|
/LeetCodeSolutions/LeetCode_0159.py
|
84026e92728dd9d32811d5f23f0d556a9e0d1ec8
|
[
"MIT"
] |
permissive
|
lih627/python-algorithm-templates
|
e8092b327a02506086414df41bbfb2af5d6b06dc
|
a61fd583e33a769b44ab758990625d3381793768
|
refs/heads/master
| 2021-07-23T17:10:43.814639
| 2021-01-21T17:14:55
| 2021-01-21T17:14:55
| 238,456,498
| 29
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
class Solution:
def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:
ret, l, r = 0, 0, 0
cnt = dict()
hash_set = set()
while r < len(s):
cs = s[r]
# print(hash_set, cnt, l, r)
if cs not in cnt:
cnt[cs] = 1
else:
cnt[cs] += 1
hash_set.add(cs)
if len(hash_set) > 2:
while cnt[s[l]] != 1:
cnt[s[l]] -= 1
l += 1
cnt[s[l]] -= 1
hash_set.remove(s[l])
l += 1
ret = max(ret, r - l + 1)
r += 1
return ret
|
[
"lih627@outlook.com"
] |
lih627@outlook.com
|
bebca619b51381ed8388bff56f2aa99999713b2d
|
1515e55e6695bf6e385da86e489fddbbe64a667f
|
/Tree Based/617. Merge Two Binary Trees.py
|
f66b02c3bde3a1c74686589f056d8aa1764c7a0d
|
[] |
no_license
|
Stella2019/leetcode_stella
|
f9d9789ef6815c05feb04587718fb528d1c0331d
|
253a5cc51394d3c15c64d398af5442ccc65ae7aa
|
refs/heads/master
| 2022-12-11T18:18:47.685881
| 2020-09-07T00:24:23
| 2020-09-07T00:24:23
| 293,383,967
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
"""
把两个树重叠,重叠部分求和,不重叠部分是两个树不空的节点
"""
"""
题目大意
将两个二叉树进行merge操作。操作方式是把两个树进行重叠,如果重叠部分都有值,那么这个新节点是他们的值的和;如果重叠部分没有值,那么新的节点就是他们两个当中不为空的节点。
解题方法
递归
如果两个树都有节点的话就把两个相加,左右孩子为两者的左右孩子。
否则选不是空的节点当做子节点。
时间复杂度是O(N1+N2),空间复杂度O(N)。N = t1 的 t2交集。
"""
class Solution:
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
newT = TreeNode(t1.val + t2.val)
newT.left = self.mergeTrees(t1.left, t2.left)
newT.right = self.mergeTrees(t1.right, t2.right)
return newT
else:
return t1 or t2
#也可以换一种写法,没有任何区别:
class Solution:
def mergeTrees(self, t1, t2):
if not t2:
return t1
if not t1:
return t2
newT = TreeNode(t1.val + t2.val)
newT.left = self.mergeTrees(t1.left, t2.left)
newT.right = self.mergeTrees(t1.right, t2.right)
return newT
|
[
"noreply@github.com"
] |
Stella2019.noreply@github.com
|
8875cb041f81b6df9792c3edb27badd2bb779332
|
23ec6adce704bff40d04cd6fc0ba446375405b68
|
/firstBadVersion.py
|
f20f5f8d9fe87eb060c05515325ef8b18688ed84
|
[] |
no_license
|
amoghrajesh/Coding
|
1845be9ea8df2d13d2a21ebef9ee6de750c8831d
|
a7dc41a4963f97dfb62ee4b1cab5ed80043cfdef
|
refs/heads/master
| 2023-08-31T10:10:48.948129
| 2023-08-30T15:04:02
| 2023-08-30T15:04:02
| 267,779,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
def bs(a,l,r,key):
while l<r:
m= l + (r-l)//2
if a[m]==key:
r=m
else:
l=m+1
return l,a[l]
a=[True,True,True,True,True,True,True,True,True,True,False,False,False,False,False,False]
n=len(a)
print(bs(a,0,n-1,False))
|
[
"amoghrajesh1999@gmail.com"
] |
amoghrajesh1999@gmail.com
|
3732d7628f4b49ef7cd4f940283873df9defe2b4
|
f0b741f24ccf8bfe9bd1950425d83b6291d21b10
|
/components/aws/sagemaker/workteam/src/sagemaker_workteam_spec.py
|
5b53cf8749125007f9bca1eb2cf8f0c92c1fadd4
|
[
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"Unlicense",
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-protobuf"
] |
permissive
|
kubeflow/pipelines
|
e678342b8a325559dec0a6e1e484c525fdcc8ce8
|
3fb199658f68e7debf4906d9ce32a9a307e39243
|
refs/heads/master
| 2023-09-04T11:54:56.449867
| 2023-09-01T19:07:33
| 2023-09-01T19:12:27
| 133,100,880
| 3,434
| 1,675
|
Apache-2.0
| 2023-09-14T20:19:06
| 2018-05-12T00:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,485
|
py
|
"""Specification for the SageMaker workteam component."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
from common.sagemaker_component_spec import (
SageMakerComponentSpec,
SageMakerComponentBaseOutputs,
)
from common.spec_input_parsers import SpecInputParsers
from common.common_inputs import (
COMMON_INPUTS,
SageMakerComponentCommonInputs,
SageMakerComponentInput as Input,
SageMakerComponentOutput as Output,
SageMakerComponentInputValidator as InputValidator,
SageMakerComponentOutputValidator as OutputValidator,
)
@dataclass(frozen=True)
class SageMakerWorkteamInputs(SageMakerComponentCommonInputs):
"""Defines the set of inputs for the workteam component."""
team_name: Input
description: Input
user_pool: Input
user_groups: Input
client_id: Input
sns_topic: Input
@dataclass
class SageMakerWorkteamOutputs(SageMakerComponentBaseOutputs):
"""Defines the set of outputs for the workteam component."""
workteam_arn: Output
class SageMakerWorkteamSpec(
SageMakerComponentSpec[SageMakerWorkteamInputs, SageMakerWorkteamOutputs]
):
INPUTS: SageMakerWorkteamInputs = SageMakerWorkteamInputs(
team_name=InputValidator(
input_type=str, required=True, description="The name of your work team."
),
description=InputValidator(
input_type=str, required=True, description="A description of the work team."
),
user_pool=InputValidator(
input_type=str,
required=False,
description="An identifier for a user pool. The user pool must be in the same region as the service that you are calling.",
),
user_groups=InputValidator(
input_type=str,
required=False,
description="A list of identifiers for user groups separated by commas.",
default="",
),
client_id=InputValidator(
input_type=str,
required=False,
description="An identifier for an application client. You must create the app client ID using Amazon Cognito.",
),
sns_topic=InputValidator(
input_type=str,
required=False,
description="The ARN for the SNS topic to which notifications should be published.",
default="",
),
**vars(COMMON_INPUTS),
)
OUTPUTS = SageMakerWorkteamOutputs(
workteam_arn=OutputValidator(description="The ARN of the workteam."),
)
def __init__(self, arguments: List[str]):
super().__init__(arguments, SageMakerWorkteamInputs, SageMakerWorkteamOutputs)
@property
def inputs(self) -> SageMakerWorkteamInputs:
return self._inputs
@property
def outputs(self) -> SageMakerWorkteamOutputs:
return self._outputs
@property
def output_paths(self) -> SageMakerWorkteamOutputs:
return self._output_paths
|
[
"noreply@github.com"
] |
kubeflow.noreply@github.com
|
1f73e465808ce7005cd026a05ec7c9f9142b53dc
|
4d1039cb53135c002dbee160c13e6a89bf57e57d
|
/main/migrations/0010_auto_20200901_1742.py
|
faacead814c375600d0625c3082f60b71c5ad347
|
[] |
no_license
|
nova-sangeeth/drix-url
|
235c59c3692c294f84d94bb4bcd633bf20172aaf
|
1fe32b45397f853e406a3641a23bdd5bb128d346
|
refs/heads/master
| 2023-08-06T07:40:55.387540
| 2021-09-24T04:48:01
| 2021-09-24T04:48:01
| 275,612,983
| 0
| 0
| null | 2021-09-24T04:48:02
| 2020-06-28T15:25:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 762
|
py
|
# Generated by Django 3.0.7 on 2020-09-01 17:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0009_user_created_url'),
]
operations = [
migrations.AddField(
model_name='short_urls',
name='created_time',
field=models.DateTimeField(editable=False, null=True),
),
migrations.AddField(
model_name='short_urls',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"novasangeeth@outlook.com"
] |
novasangeeth@outlook.com
|
e17ffa077e98c9701b914e8fd82a3c615792b1f2
|
1e17d31c1e8e6db8e42b9c1358b1cbd7d58b1eb9
|
/examples/readme.py
|
d559b18c0a0c60d197f7baf168e4fce4fa9656c0
|
[
"MIT"
] |
permissive
|
pedrogclp/MerossIot
|
69970526734b7b4762da888d1bcf09567ca8fd3f
|
5913704375a4a20e7626519dd05d20143001adaf
|
refs/heads/0.4.X.X
| 2023-05-27T22:56:48.291245
| 2021-03-27T10:16:21
| 2021-03-27T10:16:21
| 371,523,608
| 0
| 0
|
MIT
| 2021-05-27T22:58:37
| 2021-05-27T22:58:36
| null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
import asyncio
import os
from meross_iot.http_api import MerossHttpClient
from meross_iot.manager import MerossManager
EMAIL = os.environ.get('MEROSS_EMAIL') or "YOUR_MEROSS_CLOUD_EMAIL"
PASSWORD = os.environ.get('MEROSS_PASSWORD') or "YOUR_MEROSS_CLOUD_PASSWORD"
async def main():
# Setup the HTTP client API from user-password
http_api_client = await MerossHttpClient.async_from_user_password(email=EMAIL, password=PASSWORD)
# Setup and start the device manager
manager = MerossManager(http_client=http_api_client)
await manager.async_init()
# Discover devices.
await manager.async_device_discovery()
meross_devices = manager.find_devices()
# Print them
print("I've found the following devices:")
for dev in meross_devices:
print(f"- {dev.name} ({dev.type}): {dev.online_status}")
# Close the manager and logout from http_api
manager.close()
await http_api_client.async_logout()
if __name__ == '__main__':
# On Windows + Python 3.8, you should uncomment the following
# asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
[
"albertogeniola@gmail.com"
] |
albertogeniola@gmail.com
|
2a9d0b5364557a152ab30182bfae2f7f02da9b62
|
9bbb00c09aaaa19565d3fb8091af568decb5820f
|
/3_Intro_To_Importing_Data_Python/1_Introduction_To_Flat_Files/7_Importing_with_Mixed_DataTypes.py
|
eb846cc9f26170ad1f3cd993191f6acb801e650f
|
[] |
no_license
|
PeterL64/UCDDataAnalytics
|
4417fdeda9c64c2f350a5ba53b2a01b4bdc36fc7
|
d6ff568e966caf954323ecf641769b7c79ccb83a
|
refs/heads/master
| 2023-06-14T04:10:41.575025
| 2021-07-07T15:23:50
| 2021-07-07T15:23:50
| 349,780,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# Importing with Mixed Data Types 2
# Similar to the np.genfromtxt() function there is another function, np.recfromcsv() for CSV files.
# np.recfromcsv has some defaults, so you do not need to enter them. dtype=None, delimiter=',' and names=True
# Assign the filename: file
file = 'titanic.csv'
# Import file using np.recfromcsv and assign it the variable: d
d = np.recfromcsv(file)
# Print out first three entries of d
print(d[:3])
|
[
"peterlyonscbar@gmail.com"
] |
peterlyonscbar@gmail.com
|
e04dffd49d4e6b770fadf40d765cf2c417a9b93a
|
6f255449d5790a1124ca56bec0e3dc457c1b3958
|
/quzzi/quiz-5/z5103095@unsw.edu.au.files/quiz_5.py
|
3e5b2471c2d36032c333902037e21a3206dfc3af
|
[] |
no_license
|
tomtang110/comp9021
|
ac8995f3f558ffdfff7af76a08c67e208fe26aa4
|
6c9e6404f515a72bc94a185c1c98d5aba49266c8
|
refs/heads/master
| 2020-03-23T18:56:41.177586
| 2018-07-24T04:56:23
| 2018-07-24T04:56:23
| 141,943,053
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
# Randomly fills a grid of size 10 x 10 with 0s and 1s and computes:
# - the size of the largest homogenous region starting from the top left corner,
# so the largest region consisting of connected cells all filled with 1s or
# all filled with 0s, depending on the value stored in the top left corner;
# - the size of the largest area with a checkers pattern.
#
# Written by *** and Eric Martin for COMP9021
import sys
from random import seed, randint
dim = 10
grid = [[None] * dim for _ in range(dim)]
def display_grid():
for i in range(dim):
print(' ', ' '.join(str(int(grid[i][j] != 0)) for j in range(dim)))
# Possibly define other functions
try:
arg_for_seed, density = input('Enter two nonnegative integers: ').split()
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
try:
arg_for_seed, density = int(arg_for_seed), int(density)
if arg_for_seed < 0 or density < 0:
raise ValueError
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
seed(arg_for_seed)
# We fill the grid with randomly generated 0s and 1s,
# with for every cell, a probability of 1/(density + 1) to generate a 0.
for i in range(dim):
for j in range(dim):
grid[i][j] = int(randint(0, density) != 0)
print('Here is the grid that has been generated:')
display_grid()
size_of_largest_homogenous_region_from_top_left_corner = 0
# Replace this comment with your code
from copy import deepcopy
grid1 = deepcopy(grid)
def count_1(i,j,R):
global grid
if grid[i][j] == R:
grid[i][j] = '*'
if i:
count_1(i-1,j,R)
if i<dim -1:
count_1(i+1,j,R)
if j:
count_1(i,j-1,R)
if j < dim-1:
count_1(i,j+1,R)
#question1
if grid[0][0] == 1:
count_1(0,0,1)
elif grid[0][0] == 0:
count_1(0,0,0)
size_top_lef=sum(i.count('*') for i in grid)
size_of_largest_homogenous_region_from_top_left_corner += size_top_lef
print('The size_of the largest homogenous region from the top left corner is '
f'{size_of_largest_homogenous_region_from_top_left_corner}.'
)
max_size_of_region_with_checkers_structure = 0
# Replace this comment with your code
def count_2(i,j,grid,grid1,emp_list):
ab=(i,j)
if ab not in emp_list:
emp_list.append(ab)
grid[i][j] = '*'
if i:
if grid1[i][j] != grid1[i-1][j]:
count_2(i-1,j,grid,grid1,emp_list)
if i<dim - 1:
if grid1[i][j] != grid1[i+1][j]:
count_2(i+1,j,grid,grid1,emp_list)
if j:
if grid1[i][j] != grid1[i][j-1]:
count_2(i,j-1,grid,grid1,emp_list)
if j<dim - 1:
if grid1[i][j] != grid1[i][j+1]:
count_2(i,j+1,grid,grid1,emp_list)
q2=[]
for i in range(len(grid1)):
for j in range(len(grid1)):
grid=deepcopy(grid1)
count_2(i,j,grid,grid1,[])
answer = sum(k.count('*') for k in grid)
q2.append(answer)
max_size_of_region_with_checkers_structure += max(q2)
print('The size of the largest area with a checkers structure is '
f'{max_size_of_region_with_checkers_structure}.'
)
|
[
"tomtang110@outlook.com"
] |
tomtang110@outlook.com
|
38a5a0665f4b5838c1c02d622affa06df9ded96a
|
4191b25485148f003193d556a34b8d3cca2f2e27
|
/code_testing/visualization.py
|
893b8df97219af3de336af1efe0f42d194b0f4cf
|
[] |
no_license
|
anilkunwar/FEA-Net
|
429b7077d3d4c3a5e8a5edde52c049f2a985b5e7
|
858d3e3aed8f851082ac6f95756f382118e97908
|
refs/heads/master
| 2022-03-17T03:01:59.483998
| 2019-12-01T17:42:11
| 2019-12-01T17:42:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
import matplotlib.pyplot as plt
import numpy as np
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def fmg_net_complexity():
x=np.asarray([64,128,256,512,1024,2048,4096,8192])
y=np.asarray([16,27,36,49,84,101,120,141])
plt.semilogx(x*x,y,'o-')
plt.xlabel('DOF')
plt.ylabel('# of LU block in FMG-NET')
|
[
"hope-yao@asu.edu"
] |
hope-yao@asu.edu
|
a77a1d75e259fc62a385e27de488bc5836d93512
|
bc7d019d3c88cfb637fdcc15ec800ed6e7db565c
|
/voyage_prevu/migrations/0035_aeroport_port.py
|
ef2ac653179f3370811641731034e92a65b4d838
|
[] |
no_license
|
Zaenma/paiement-django
|
dec9fca85b2cad0c62b7ec3fa416b34420dea31f
|
7dfb777425320daf5a165dcc36ec7c0df898aa34
|
refs/heads/master
| 2023-03-19T07:31:53.409214
| 2020-11-09T14:58:51
| 2020-11-09T14:58:51
| 311,372,216
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
# Generated by Django 3.1.2 on 2020-11-06 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voyage_prevu', '0034_auto_20201105_2023'),
]
operations = [
migrations.CreateModel(
name='Aeroport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('non', models.CharField(max_length=50, verbose_name='Les villes où se situent les aéroports')),
],
),
migrations.CreateModel(
name='Port',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('non', models.CharField(max_length=50, verbose_name='Les villes où se situent les ports')),
],
),
]
|
[
"zaenma.halidisalim@gmail.com"
] |
zaenma.halidisalim@gmail.com
|
784027b7fe592ac2a4ca726e8c9251121601027f
|
07306da5b68336715383a705f85f90aefeccfe96
|
/tests/test_stream_protocol.py
|
7f9a7d0ce1b20d3e43cd386f75a3f12224c7dcc2
|
[
"Apache-2.0"
] |
permissive
|
datarobot/aiohttp
|
ebbb9e3c2af3c1b8a7fddc888195c09ad7ab55f9
|
19d95a5ad84cf1453b8091b9bd8640317831e15f
|
refs/heads/master
| 2021-05-13T17:29:51.844981
| 2018-06-12T15:24:36
| 2018-06-12T15:24:36
| 116,824,901
| 0
| 1
|
Apache-2.0
| 2018-06-12T15:32:02
| 2018-01-09T14:15:15
|
Python
|
UTF-8
|
Python
| false
| false
| 961
|
py
|
from unittest import mock
from aiohttp import parsers
def test_connection_made(loop):
tr = mock.Mock()
proto = parsers.StreamProtocol(loop=loop)
assert proto.transport is None
proto.connection_made(tr)
assert proto.transport is tr
def test_connection_lost(loop):
proto = parsers.StreamProtocol(loop=loop)
proto.connection_made(mock.Mock())
proto.connection_lost(None)
assert proto.transport is None
assert proto.writer is None
assert proto.reader._eof
def test_connection_lost_exc(loop):
proto = parsers.StreamProtocol(loop=loop)
proto.connection_made(mock.Mock())
exc = ValueError()
proto.connection_lost(exc)
assert proto.reader.exception() is exc
def test_data_received(loop):
proto = parsers.StreamProtocol(loop=loop)
proto.connection_made(mock.Mock())
proto.reader = mock.Mock()
proto.data_received(b'data')
proto.reader.feed_data.assert_called_with(b'data')
|
[
"andrew.svetlov@gmail.com"
] |
andrew.svetlov@gmail.com
|
03fef4d8423aa08251d98b642634ce53a2e4542a
|
607dc8df19fc5248f6289cdda97857b5d58ca16f
|
/smac/runner/__init__.py
|
4bda4cb5ebae36ef239b127e1fa675d9cb31b1b9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
automl/SMAC3
|
7dce243a33023c52d6819deff966f7b502e90ed0
|
541ee7e0383b491b86d1a23dcff669f2efad616d
|
refs/heads/main
| 2023-08-31T17:36:06.067579
| 2023-08-01T13:02:51
| 2023-08-01T13:02:51
| 65,900,469
| 943
| 259
|
NOASSERTION
| 2023-09-11T02:36:57
| 2016-08-17T10:58:05
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
from smac.runner.abstract_runner import AbstractRunner
from smac.runner.dask_runner import DaskParallelRunner
from smac.runner.exceptions import (
FirstRunCrashedException,
TargetAlgorithmAbortException,
)
from smac.runner.target_function_runner import TargetFunctionRunner
__all__ = [
# Runner
"AbstractRunner",
"TargetFunctionRunner",
"DaskParallelRunner",
# Exceptions
"TargetAlgorithmAbortException",
"FirstRunCrashedException",
]
|
[
"noreply@github.com"
] |
automl.noreply@github.com
|
beb4ebfba4d36ed804bf6dbd3b0fe7eb3512b978
|
1d61bf0b287533c9eb89bf71e217ead8cffb7811
|
/System/Tester/tester.py
|
1463da9e5ee33340fd14192bc1502171fc16b93f
|
[] |
no_license
|
chukotka12/PP4E-GitHub
|
2f6bf5e431a211beb9e1b6aa56b495770f07e6e4
|
c9347ffa20f598b8c469082788a964549cd5df2b
|
refs/heads/master
| 2020-05-30T04:43:30.570872
| 2019-09-10T14:33:14
| 2019-09-10T14:33:14
| 189,545,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,056
|
py
|
"""
##############################################################################
Тестирует сценарии Python в каталоге, передает им аргументы командной строки,
выполняет перенаправление stdin, перехватывает stdout, stderr и код завершения,
чтобы определить наличие ошибок и отклонений от предыдущих результатов
выполнения. Запуск сценариев и управление потоками ввода-вывода производится
с помощью переносимого модуля subprocess (как это делает функция os.popen3
в Python 2.X). Потоки ввода-вывода всегда интерпретируются модулем subprocess
как двоичные. Стандартный ввод, аргументы, стандартный вывод и стандартный вывод
ошибок отображаются в файлы, находящиеся в подкаталогах.
Этот сценарий командной строки позволяет указать имя тестируемого каталога
и флаг принудительной генерации выходного файла. Этот программный код можно было
бы упаковать в функцию, однако то обстоятельство, что результатами сценария
являются сообщения и выходные файлы, снижает практическую пользу модели вызов/
возвращаемое значение.
Дополнительные возможные расширения: можно было бы реализовать по несколько
наборов аргументов командной строки и/или входных файлов для каждого
тестируемого сценария и запускать их по несколько раз (использовать функцию glob
для выборки нескольких файлов “.in*” в каталоге Inputs).
Возможно, было бы проще хранить все файлы, необходимые для проведения тестов,
в одном и том же каталоге, но с различными расширениями, однако с течением
времени их объем мог бы оказаться слишком большим.
В случае ошибок можно было бы сохранять содержимое потоков вывода stderr
и stdout в подкаталоге Errors, но я предпочитаю иметь ожидаемый/фактический
вывод в подкаталоге Outputs.
##############################################################################
"""
import os, sys, glob, time
from subprocess import Popen, PIPE
# конфигурационные аргументы
testdir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
forcegen = len(sys.argv) > 2
print('Start tester:', time.asctime())
print('in', os.path.abspath(testdir))
def verbose(*args):
print('-' * 80)
for arg in args: print(arg)
def quiet(*args): pass
trace = quiet
# trace = verbose
# отбор сценариев для тестирования
testpatt = os.path.join(testdir, 'Scripts', '*.py')
testfiles = glob.glob(testpatt)
testfiles.sort()
trace(os.getcwd(), *testfiles)
numfail = 0
for testpath in testfiles:
testname = os.path.basename(testpath)
# получить входной файл и аргументы для тестируемого сценария
infile = testname.replace('.py', '.in')
inpath = os.path.join(testdir, 'Inputs', infile)
indata = open(inpath, 'rb').read() if os.path.exists(inpath) else b''
argfile = testname.replace('.py', '.args')
argpath = os.path.join(testdir, 'Args', argfile)
argdata = open(argpath).read() if os.path.exists(argpath) else ''
# местоположение файлов для сохранения stdout и stderr,
# очистить предыдущие результаты
outfile = testname.replace('.py', '.out')
outpath = os.path.join(testdir, 'Outputs', outfile)
outpathbad = outpath + '.bad'
if os.path.exists(outpathbad): os.remove(outpathbad)
errfile = testname.replace('.py', '.err')
errpath = os.path.join(testdir, 'Errors', errfile)
if os.path.exists(errpath): os.remove(errpath)
# запустить тестируемый сценарий, перенаправив потоки ввода-вывода
pypath = sys.executable
command = '%s %s %s' % (pypath, testpath, argdata)
trace(command, indata)
process = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
process.stdin.write(indata)
process.stdin.close()
outdata = process.stdout.read()
errdata = process.stderr.read()
exitstatus = process.wait()
trace(outdata, errdata, exitstatus)
# проанализировать результаты
if exitstatus != 0:
print('ERROR status:', testname, exitstatus)
if errdata:
print('ERROR stream:', testname, errpath)
open(errpath, 'wb').write(errdata)
if exitstatus or errdata:
numfail += 1
open(outpathbad, 'wb').write(outdata)
elif not os.path.exists(outpath) or forcegen:
print('generating:', outpath)
open(outpath, 'wb').write(outdata)
else:
priorout = open(outpath, 'rb').read()
if priorout == outdata:
print('passed:', testname)
else:
numfail += 1
print('FAILED output:', testname, outpathbad)
open(outpathbad, 'wb').write(outdata)
print('Finished:', time.asctime())
print('%s tests were run, %s tests failed.' % (len(testfiles), numfail))
|
[
"chukreev.g@gmail.com"
] |
chukreev.g@gmail.com
|
bab250c929a45a4303cf4458404b5564c91d3e7e
|
7374204324f6326663d12b3dd1fecc5bebb6854e
|
/offer/21.py
|
92c2b908ffa610098c7214b6d91323a1b69d4e39
|
[] |
no_license
|
KevinChen1994/leetcode-algorithm
|
c18b58df398027078b0c0f468c4c873e9419433b
|
1bcf3206cd3acc428ec690cb883c612aaf708aac
|
refs/heads/master
| 2023-02-07T11:35:47.747207
| 2023-01-29T11:08:49
| 2023-01-29T11:08:49
| 230,386,123
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2020/8/27 22:54
class Solution:
def exchange(self, nums):
right = 0
for i in range(len(nums)):
if nums[i] % 2 != 0:
nums[i], nums[right] = nums[right], nums[i]
right += 1
return nums
if __name__ == '__main__':
solution = Solution()
nums = [1, 3, 8, 4, 2, 2, 3, 5, 90, 2, 1]
print(solution.exchange(nums))
|
[
"346521888@qq.com"
] |
346521888@qq.com
|
72edb51cbaee18ff1b5ed216dfe4334d588619c4
|
7bf617f77a55d8ec23fa8156c1380b563a5ac7f6
|
/CG/SciPy/7900_08_04.py
|
347dafa320f3570a8b98232c11f7348e03708ca2
|
[] |
no_license
|
anyatran/school
|
c06da0e08b148e3d93aec0e76329579bddaa85d5
|
24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec
|
refs/heads/master
| 2021-06-17T10:45:47.648361
| 2017-05-26T12:57:23
| 2017-05-26T12:57:23
| 92,509,148
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
#!/usr/bin/python
# select a non-GUI backend
import matplotlib
matplotlib.use('Agg')
# import plotting module
import matplotlib.pyplot as plt
# used to 'fake' file writing
from cStringIO import StringIO
# used to generate the graph
import numpy as np
# function called by mod_python upon request on this file
def index(req):
# clean the axes
plt.cla()
# generate the graph
x = np.arange(0, 6, .01)
plt.plot(x, np.sin(x)**3 + 0.5*np.cos(x))
# instantiate a StringIO object
s = StringIO()
# and save the plot on it
plt.savefig(s)
# set the content-type for the respons
req.content_type = "image/png"
# and write the content of StringIO object
req.write(s.getvalue())
|
[
"panhtran249@gmail.com"
] |
panhtran249@gmail.com
|
a60335fd3c0161a7831115494dd5523f809e1519
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/ndxkee009/question3.py
|
5bbb84a08292c8d8d7b0d5a8882340aedd384aa1
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
"""Keegan Naidoo
NDXKEE009
4 May 2014"""
text=input("Enter a message:\n")
def Encrypt(text):
if text =="":
return ""
elif text[0]==" ":
return " " + Encrypt(text[1:])
elif(ord(text[0])<97):
return text[0] +Encrypt(text[1:])
elif(text[0]=='z'):
return "a" + Encrypt(text[1:])
elif not text[0].isalpha():
return text[0] + Encrypt(text[1:])
else:
return chr(ord(text[0])+1)+Encrypt(text[1:])
x=Encrypt(text)
print("Encrypted message:\n"+str(x))
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
2ebf12f787421a9fff6f50523fcb9c583a664292
|
680185d233bdc0a1b2f404923d69e1d2e5b94d9d
|
/rambler/net/compareExpressions.py
|
6dc3058383d1cf10c04989605ccfdad5836882f0
|
[] |
no_license
|
pombredanne/rambler.net
|
e0c5d36d3495d85fa07edbaa2c52c6ce69c2ae70
|
065d5ec4d1eee086b0b37910e3a6887ae748d83e
|
refs/heads/master
| 2020-12-31T02:49:31.666102
| 2012-04-12T18:24:58
| 2012-04-12T18:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
import cStringIO
REGULAR_EXPRESSION_CHARS='.*+?'
IGNORE_CHARS='()'
def compareExpressions(exp1,exp2):
"""Given two strings which represent regular expressions returns compare how greedy an expressin
-1 if exp1 is more precicse than than exp2
0 if exp1 is as precise as exp2
1 if exp1 is less precise than exp2
This function is useful when you want to test a string against a series of regular expressions
and you want more precises expressions to have a chance at evaluating a string.
For examlpe consider the following regular expressions
>>> expressions = ['.*', 'foo.*']
If we were to evaluate a string against each expression and stop
on the first match like this code
>>> import re
>>> for expr in expressions:
... if re.match(expr, 'foo'):
... break
The second expression 'foo.*' would never be tested because '.*'
matches everything.
>>> expr == '.*'
True
Therefore we want the more specific match to run first, which
means we need to sort the list.
>>> expressions.sort(compareExpressions)
>>> expressions
['foo.*', '.*']
"""
# We delibertly flip exp2 and exp1 when calling cmp() because we
# want higher precision expressions to come first, not last.
return cmp(expressionPrecision(exp2), expressionPrecision(exp1))
def expressionPrecision(expStr):
""" Return the precision of an expression. The precision is simply the
number of non regular expression characters from the begining of the
string before reaching the first character that is part of a regular
expression.
For example
>>> expressionPrecision('blah')
4
Because 'blah' contains no regular expression characters
This next examlpe the precision is 2 because the expresion can
match either "blah", "bloh", "blue herring"
>>> expressionPrecision('bl(.*)h')
2
Now in this example the precision is three because the grouping
character has no impact on the precission of the expression.
>>> expressionPrecision('bl(a.*)h')
3
Escaped regulare expression characters should count as normal characters
>>> expressionPrecision('blah\.')
5
"""
stream = cStringIO.StringIO(expStr)
precision = 0
char = stream.read(1)
while char:
if char == '\\': # Skip over the next character and raise the precision
char = stream.read(1)
precision += 1
elif char in IGNORE_CHARS:
# It's a ( or something else that has no impacto on the
# precision of the string.
pass
elif char not in REGULAR_EXPRESSION_CHARS:
precision += 1
else:
# We found a regular expression character, return the precission
break
char = stream.read(1)
return precision
if __name__ == "__main__":
import sys, doctest
mod = sys.modules[__name__]
doctest.testmod(mod)
|
[
"srobertson@codeit.com"
] |
srobertson@codeit.com
|
7b764c9a795922647c4f3acaf33d0114c1f2c22c
|
4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c
|
/剑指offer/第四遍/19.正则表达式匹配.py
|
e839a9c0ddfc157700ff3f37508a06178b532c62
|
[] |
no_license
|
leilalu/algorithm
|
bee68690daf836cc5807c3112c2c9e6f63bc0a76
|
746d77e9bfbcb3877fefae9a915004b3bfbcc612
|
refs/heads/master
| 2020-09-30T15:56:28.224945
| 2020-05-30T03:28:39
| 2020-05-30T03:28:39
| 227,313,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
"""
请实现一个函数用来匹配包含'. '和'*'的正则表达式。模式中的字符'.'表示任意一个字符,而'*'表示它前面的字符可以出现任意次(含0次)。在本题中,匹配是指字符串的所有字符匹配整个模式。例如,字符串"aaa"与模式"a.a"和"ab*ac*a"匹配,但与"aa.a"和"ab*a"均不匹配。
示例 1:
输入:
s = "aa"
p = "a"
输出: false
解释: "a" 无法匹配 "aa" 整个字符串。
示例 2:
输入:
s = "aa"
p = "a*"
输出: true
解释: 因为 '*' 代表可以匹配零个或多个前面的那一个元素, 在这里前面的元素就是 'a'。因此,字符串 "aa" 可被视为 'a' 重复了一次。
示例 3:
输入:
s = "ab"
p = ".*"
输出: true
解释: ".*" 表示可匹配零个或多个('*')任意字符('.')。
示例 4:
输入:
s = "aab"
p = "c*a*b"
输出: true
解释: 因为 '*' 表示零个或多个,这里 'c' 为 0 个, 'a' 被重复一次。因此可以匹配字符串 "aab"。
示例 5:
输入:
s = "mississippi"
p = "mis*is*p*."
输出: false
s 可能为空,且只包含从 a-z 的小写字母。
p 可能为空,且只包含从 a-z 的小写字母,以及字符 . 和 *。
"""
class Solution:
def isMatch(self, s, p):
s, p = '#' + s, '#' + p
m, n = len(s), len(p)
# dp[i][j]表示s的前i个字符和p的前j个字符是匹配的
dp = [[False] * n for _ in range(m)]
# base case 两个空字符是匹配的
dp[0][0] = True
# 字符串
for i in range(m):
# 模式
for j in range(1, n):
# 如果i==0,则是拿空字符串去匹配p的前j个字符, 除非p[j]为*,选择匹配0个dp[i][j-2]
if i == 0:
dp[i][j] = j > 1 and p[j] == '*' and dp[i][j - 2]
# 如果当前字符匹配上了,直接看上一个字符匹配上没
elif p[j] in [s[i], '.']:
dp[i][j] = dp[i - 1][j - 1]
# 如果当前模式p为*,则有两种情况,不匹配,或者重复一次,
elif p[j] == '*':
dp[i][j] = j > 1 and dp[i][j - 2] or (p[j - 1] in [s[i], '.'] and dp[i - 1][j])
else:
dp[i][j] = False
return dp[-1][-1]
if __name__ == '__main__':
s = "aaaaaaaaaaaaab"
p = "a*a*a*a*a*a*a*a*a*a*a*a*b"
res = Solution().isMatch(s, p)
print(res)
|
[
"244492644@qq.com"
] |
244492644@qq.com
|
4291138f901a7718cffecda75a832431563fc9a5
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2453486_0/Python/yakirsudry/solution.py
|
ea43b0bf5caa42220edc99eeee8ce197f20cdefd
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
import os
input = r"A-small-attempt0.in"
output = r"testoutput.txt"
def get_solution_str(table):
o_wins_string = "O won"
x_wins_string = "X won"
draw_string = "Draw"
not_finished_string = "Game has not completed"
o_good_chars = ["O","T"]
x_good_chars = ["X","T"]
o_wins = False
x_wins = False
for i in xrange(4):
if table[i][0] in o_good_chars and table[i][1] in o_good_chars and table[i][2] in o_good_chars and table[i][3] in o_good_chars:
return o_wins_string
if table[i][0] in x_good_chars and table[i][1] in x_good_chars and table[i][2] in x_good_chars and table[i][3] in x_good_chars:
return x_wins_string
if table[0][i] in o_good_chars and table[1][i] in o_good_chars and table[2][i] in o_good_chars and table[3][i] in o_good_chars:
return o_wins_string
if table[0][i] in x_good_chars and table[1][i] in x_good_chars and table[2][i] in x_good_chars and table[3][i] in x_good_chars:
return x_wins_string
if table[0][0] in o_good_chars and table[1][1] in o_good_chars and table[2][2] in o_good_chars and table[3][3] in o_good_chars:
return o_wins_string
if table[0][0] in x_good_chars and table[1][1] in x_good_chars and table[2][2] in x_good_chars and table[3][3] in x_good_chars:
return x_wins_string
if table[0][3] in o_good_chars and table[1][2] in o_good_chars and table[2][1] in o_good_chars and table[3][0] in o_good_chars:
return o_wins_string
if table[0][3] in x_good_chars and table[1][2] in x_good_chars and table[2][1] in x_good_chars and table[3][0] in x_good_chars:
return x_wins_string
for i in xrange(4):
if "." in table[i]:
return not_finished_string
return draw_string
def decode(line):
return [i for i in line]
lines = open(input, "r").readlines()
num_cases = int(lines[0])
out = open(output, "w")
cur_line = 1
table = [""] * 4
cur_case = 1
for i in xrange(num_cases):
for i in xrange(4):
table[i] = decode(lines[cur_line][:-1])
cur_line += 1
cur_line += 1
out.write("Case #%d: %s\n" %(cur_case, get_solution_str(table)))
cur_case += 1
print table
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
b6ee2ce9b062a51c42f418699bc88c5290f2d92e
|
8d3835e39cbc2c74d8535b809686d6ab3033c0d0
|
/ecommerce/carts/migrations/0001_initial.py
|
ae9854cb961e0bd4a352d399116cca10ab49b218
|
[] |
no_license
|
gayatribasude/GayatrisWorld
|
125698955cd8b98a5aa2377331293587a57f2911
|
552ea2ef946e95f5bccc4e51d4030484ab0bc438
|
refs/heads/master
| 2023-06-25T19:45:03.232059
| 2021-08-02T16:43:47
| 2021-08-02T16:43:47
| 384,343,617
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
# Generated by Django 2.1.3 on 2018-12-06 16:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0003_auto_20181202_0848'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=30)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('products', models.ManyToManyField(blank=True, to='products.Product')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"gayatribasude"
] |
gayatribasude
|
6b38251500d3171ba1a90aee8bf9ea0e04f55017
|
faf45ce5687f45b1c1a9aad272dcec6c2543db24
|
/imaginet/evaluate.py
|
2c82c70e8b09db334924e7f825b1d587f5660e18
|
[
"MIT"
] |
permissive
|
gchrupala/reimaginet
|
b8b7ee73bfdbf5d4d293bd5704e554fb9800ac1e
|
f583b62877a62d8c06e2dcd1e39363f4cc4976f9
|
refs/heads/master
| 2020-04-12T06:15:43.473654
| 2017-05-22T15:47:38
| 2017-05-22T15:47:38
| 37,725,869
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
# encoding: utf-8
# Copyright (c) 2015 Grzegorz Chrupała
from __future__ import division
import theano
import theano.tensor as T
import numpy
from scipy.spatial.distance import cdist
def paraphrase_ranking(vectors, group):
"""Rank sentences by projection and return evaluation metrics."""
return ranking(vectors, vectors, group, ns=[4], exclude_self=True)
def ranking(candidates, vectors, correct, ns=[1,5,10], exclude_self=False):
"""Rank `candidates` in order of similarity for each vector and return evaluation metrics.
`correct[i][j]` indicates whether for vector i the candidate j is correct.
"""
#distances = cdist(vectors, candidates, metric='cosine')
distances = Cdist(batch_size=2**13)(vectors, candidates)
result = {'ranks' : [] , 'precision' : {}, 'recall' : {}, 'overlap' : {} }
for n in ns:
result['precision'][n] = []
result['recall'][n] = []
result['overlap'][n] = []
for j, row in enumerate(distances):
ranked = numpy.argsort(row)
if exclude_self:
ranked = ranked[ranked!=j]
id_correct = numpy.where(correct[j][ranked])[0]
rank1 = id_correct[0] + 1
topn = {}
for n in ns:
id_topn = ranked[:n]
overlap = len(set(id_topn).intersection(set(ranked[id_correct])))
result['precision'][n].append(overlap/n)
result['recall' ][n].append(overlap/len(id_correct))
result['overlap' ][n].append(overlap)
result['ranks'].append(rank1)
return result
class Cdist():
"""Return cosine distances between two sets of vectors."""
def __init__(self, batch_size=None):
self.batch_size = batch_size
self.U = T.matrix('U')
self.V = T.matrix('V')
self.U_norm = self.U / self.U.norm(2, axis=1).reshape((self.U.shape[0], 1))
self.V_norm = self.V / self.V.norm(2, axis=1).reshape((self.V.shape[0], 1))
self.W = T.dot(self.U_norm, self.V_norm.T)
self.cosine = theano.function([self.U, self.V], self.W)
def __call__(self, A, B):
if self.batch_size is None:
chunks = [A]
else:
chunks = numpy.split(A, [i for i
in range(self.batch_size, A.shape[0], self.batch_size) ])
cosines = numpy.vstack([self.cosine(chunk, B) for chunk in chunks])
return 1 - cosines
import json
import imaginet.defn.visual as visual
from imaginet.simple_data import phonemes
from scipy.spatial.distance import cosine
def eval_bestimg(modelpath, testpath, tokenize=phonemes):
rows = [ json.loads(line) for line in open(testpath)]
model = visual.load(path=modelpath)
scaler = model.scaler
batcher = model.batcher
mapper = batcher.mapper
img_fs = {}
sent_ids = {}
prov = dp.getDataProvider('coco', root='/home/gchrupala/repos/reimaginet')
for split in ['val','test','restval']:
for img in prov.iterImages(split=split):
img_fs[img['cocoid']] = scaler.transform([ img['feat'] ])[0]
for sent in img['sentences']:
sent_ids[sent['sentid']]=sent
def response(row):
sent = sent_ids[row['meta']['id']]
inputs = list(mapper.transform([tokenize(sent) ]))
pred = model.Visual.predict(batcher.batch_inp(inputs))[0]
return 1+numpy.argmin([ cosine(pred, img_fs[cocoid]) for cocoid in row['meta']['candidates']])
preds = numpy.array([ response(row) for row in rows ])
target = numpy.array([ row['meta']['response'] for row in rows])
return numpy.mean(preds==target)
|
[
"g.chrupala@uvt.nl"
] |
g.chrupala@uvt.nl
|
ec03231406e390198411099c19d5d3824fc7495c
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/GEM/YW_CYBMM_SZSJ_143.py
|
13ea7b42f76855712e5a8114bc47a20da2331362
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,065
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_CYBMM_SZSJ_143(xtp_test_case):
# YW_CYBMM_SZSJ_143
def test_YW_CYBMM_SZSJ_143(self):
title = '交易日五档即成转撤销卖-非最后一次卖为非100的倍数'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010123,
'errorMSG': queryOrderErrorMsg(11010123),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('300130', '2', '2', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 88,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
e94b34f7a4b61d2270b468fc4f32388eb0bd1a4b
|
a8d5cb55b80f4c160323b6c6fbe094c6e6634c75
|
/users/migrations/0002_user_date_validated.py
|
f66d0dab256b23098a3db314f10101d8bcfdba4b
|
[] |
no_license
|
birkoss/mtg-achievements-api
|
00d5d954e3c5bbf919a44ef49bde9feaf3ceee61
|
133806c668f92b4b94d0f731edec5c08041010cc
|
refs/heads/master
| 2023-03-28T18:16:06.077949
| 2021-04-03T13:34:15
| 2021-04-03T13:34:15
| 347,626,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.1.7 on 2021-03-14 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_validated',
field=models.DateTimeField(null=True),
),
]
|
[
"admin@birkoss.com"
] |
admin@birkoss.com
|
c432544fd2dcaf1b9c22fc5905cc20ab2b72813e
|
88bbf27deb0b2a1b96985c0a94ff0b7a3d012820
|
/hq/wsgi.py
|
30b1d7a4018f35aa170b75ea1e1d0f1b1cca4721
|
[] |
no_license
|
Code-Community99/Hiq-django
|
e8efb7d63bd4fc0bc8e2af193fdec9aaab0975b0
|
af62622648ad88f6e8d94e86a8dc5d6660e3bbe2
|
refs/heads/master
| 2022-12-14T01:12:45.218318
| 2020-05-18T23:29:35
| 2020-05-18T23:29:35
| 233,811,384
| 2
| 1
| null | 2022-12-08T03:34:53
| 2020-01-14T10:02:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hq.settings')
application = get_wsgi_application()
|
[
"duncansantiago18@gmail.com"
] |
duncansantiago18@gmail.com
|
11e3806d74fcbc6ee3de46136854f7d5e113f04a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/2374.py
|
daaadf5cd464dc0f520aa0689946e9fa40c58e1b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
global flipcounter
def flip(line,start,k):
global flipcounter
for t in range(k):
if(line[start + t]) == '+':
line[start + t] = '-'
else:
line[start + t] = '+'
flipcounter= flipcounter +1
def myfunc(line,k):
global flipcounter
flipcounter = 0;
pluscounter =0
for i in range(len(line)-k+1):
if(line[i] == '+'):
pluscounter = pluscounter + 1
continue
else:
flip(line,i,k)
if(pluscounter == len(line)):
return
pluscounter =0;
for i in range(len(line)):
if(line[i] == '+'):
pluscounter = pluscounter + 1
if(pluscounter == len(line)):
return
line = list(reversed(line))
for i in range(len(line)-k+1):
if(line[i] == '+'):
pluscounter = pluscounter + 1
continue
else:
flip(line,i,k)
return "impossible"
myfile = open("A-large.in", "r")
noofcases = myfile.readline();
outfile = open("outputlarge", 'w')
for i in range(int(noofcases)):
myinput = myfile.readline();
mylist = myinput.split( );
returnval = myfunc (list(mylist[0]),int(mylist[1]),)
if(returnval == "impossible"):
outfile.write("Case #" + str(i+1) + ": IMPOSSIBLE\n" ) # python will convert \n to os.linesep
else:
outfile.write("Case #" + str(i+1) + ": " + str(flipcounter)+"\n" ) # python will convert \n to os.linesep
outfile.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
67f81d788c523b73d542f8546f97f8bef336fb9f
|
13faa0d553ed6c6a57791db3dfdb2a0580a1695b
|
/CodeChef/Long/August 2017/MATDW.py
|
45224dc97400725d10522949e774031fc7ef9126
|
[] |
no_license
|
kautsiitd/Competitive_Programming
|
ba968a4764ba7b5f2531d03fb9c53dc1621c2d44
|
a0d8ae16646d73c346d9ce334e5b5b09bff67f67
|
refs/heads/master
| 2021-01-17T13:29:52.407558
| 2017-10-01T09:58:23
| 2017-10-01T09:58:23
| 59,496,650
| 0
| 0
| null | 2017-05-20T17:27:18
| 2016-05-23T15:56:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
import sys
def printIt(s):
print s
sys.stdout.flush()
def bestDisk(diskInfo):
return diskInfo[0]/(diskInfo[3]*1.0)
n,h = map(int,raw_input().split())
hardDisksInfo = sorted([map(int,raw_input().split())+[i] for i in range(h)], key=bestDisk)
penalty = input()
numberOfHardDisk = 0
currentHardDisk = -1
userMap = {}
hardDiskMap = {}
exceed = False
hardDiskUsed = True
for _ in range(n):
# Buying hardDisk
currentCapacity = 0
if numberOfHardDisk < 1050 and hardDiskUsed:
printIt("p b "+str(hardDisksInfo[0][-1]))
hardDiskUsed = False
numberOfHardDisk += 1
currentHardDisk += 1
currentCapacity = hardDisksInfo[0][3]
else:
exceed = True
# Asking query
printIt("g")
q = map(int,raw_input().split())
qType = q[0]
if qType == 0:
userId = q[1]
userData = q[2]
if userData < currentCapacity and not(exceed):
userMap[userId] = currentHardDisk
hardDiskUsed = True
hardDiskMap[currentHardDisk] = userId
hardDiskMap[currentHardDisk-1] = userId
printIt("p s "+str(currentHardDisk)+" 0")
else:
printIt("p s -1 -1")
elif qType == 1:
userId = q[1]
userData = q[2]
if userId in userMap:
printIt("p i "+str(userMap[userId])+" "+str(userData))
else:
printIt("p i -1 -1")
else:
erasedHardDisk = q[1]
if erasedHardDisk in hardDiskMap:
erasedUser = hardDiskMap[erasedHardDisk]
if erasedUser in userMap:
del userMap[erasedUser]
print "end"
|
[
"kautsiitd@gmail.com"
] |
kautsiitd@gmail.com
|
53cc9c9f7fc41839ec9ba889a34855ff10ac768b
|
e5a044708032b853f1cdf8906da63502716fd410
|
/test/test_payment_tokenization_error_response.py
|
e86d56dc5ffac418b05c1b6b00de8fdc69fafcad
|
[] |
no_license
|
GBSEcom/Python
|
4b93bab80476051fc99f379f018ac9fa109a8a6a
|
5fa37dba8d0c3853686fdc726f863743376060c9
|
refs/heads/master
| 2021-12-04T12:55:29.605843
| 2021-11-19T22:01:03
| 2021-11-19T22:01:03
| 136,058,345
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.5.0.20211029.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.payment_tokenization_error_response import PaymentTokenizationErrorResponse # noqa: E501
from openapi_client.rest import ApiException
class TestPaymentTokenizationErrorResponse(unittest.TestCase):
"""PaymentTokenizationErrorResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentTokenizationErrorResponse(self):
"""Test PaymentTokenizationErrorResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.payment_tokenization_error_response.PaymentTokenizationErrorResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"emargules@bluepay.com"
] |
emargules@bluepay.com
|
2fd7d116a398cf706f0f19d4f65963b0537f9d24
|
0323561eacf19846e7e293d9cbc0b5e0e2de1d91
|
/Step_8/A3C_NETWORK.py
|
0203c47f042512a2d9d7c3992d69d133fb3ece7a
|
[
"Apache-2.0"
] |
permissive
|
LeeDaeil/Process_A3C
|
77cb3760f579fb1d80e191871bf853e27089bff9
|
1876fbe1b928e13b9c8766095b2d13abfda94019
|
refs/heads/master
| 2020-03-31T15:39:23.567327
| 2019-07-10T09:40:25
| 2019-07-10T09:40:25
| 152,345,573
| 1
| 2
|
Apache-2.0
| 2018-12-25T02:34:36
| 2018-10-10T01:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
from keras.layers import Dense, Input, LSTM
from keras.models import Model
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import RMSprop
from keras import backend as K
from Step_6.Parameter import PARA
class A3C_net_model:
def __init__(self):
if PARA.Model == 'LSTM':
# self.input_shape = (10, 2) # basic LSTM (1, 2, 3) shape
self.input_shape = (10, 3) # basic LSTM (1, 2, 3) shape
elif PARA.Model == 'DNN':
self.input_shape = (3,) # basic DNN (1, 3) shape
self.action_size = 3
def _make_model(self):
in_put = Input(shape=self.input_shape)
if PARA.Model == 'LSTM':
hidden_layer = TimeDistributed(Dense(64), input_shape=self.input_shape)(in_put)
hidden_layer = LSTM(32, return_sequences=True)(hidden_layer)
hidden_layer = LSTM(16)(hidden_layer)
elif PARA.Model == 'DNN':
hidden_layer = Dense(64, activation='relu')(in_put)
hidden_layer = Dense(32, activation='relu')(hidden_layer)
policy = Dense(self.action_size, activation='softmax')(hidden_layer)
critic = Dense(1, activation='linear')(hidden_layer)
actor = Model(inputs=in_put, outputs=policy)
cric = Model(inputs=in_put, outputs=critic)
return actor, cric
class A3C_shared_network:
def __init__(self):
print('Main_net')
self.A3C_net_model = A3C_net_model()
self.actor, self.cric = self._make_actor_critic_network()
self.optimizer = [self._actor_optimizer(), self._critic_optimizer()]
self.conter = 0
def _make_actor_critic_network(self):
# 네트워크를 구성하기 위해서 아래와 같이 작성한다.
actor, cric = self.A3C_net_model._make_model()
actor._make_predict_function()
cric._make_predict_function()
if PARA.show_model:
actor.summary()
cric.summary()
return actor, cric
def _actor_optimizer(self):
action = K.placeholder(shape=[None, self.A3C_net_model.action_size])
advantage = K.placeholder(shape=[None, ])
policy = self.actor.output
# 정책 크로스 엔트로피 오류함수
action_prob = K.sum(action * policy, axis=1)
cross_entropy = K.log(action_prob + 1e-10) * advantage
cross_entropy = -K.sum(cross_entropy)
# 탐색을 지속적으로 하기 위한 엔트로피 오류
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
entropy = K.sum(entropy)
# 두 오류함수를 더해 최종 오류함수를 만듬
loss = cross_entropy + 0.01 * entropy
optimizer = RMSprop(lr=2.5e-4, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
train = K.function([self.actor.input, action, advantage], [loss], updates=updates)
return train
def _critic_optimizer(self):
discount_prediction = K.placeholder(shape=(None,))
value = self.cric.output
# [반환값 - 가치]의 제곱을 오류함수로 함.
loss = K.mean(K.square(discount_prediction - value))
optimizer = RMSprop(lr=2.5e-4, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.cric.trainable_weights, [], loss)
train = K.function([self.cric.input, discount_prediction], [loss], updates=updates)
return train
class A3C_local_network:
def __init__(self, shared_net_actor, shared_net_cric):
print('Local_net')
self.A3C_net_model = A3C_net_model()
self.local_actor, self.local_cric = self._make_local_actor_critic_network(shared_net_actor, shared_net_cric)
def _make_local_actor_critic_network(self, shared_net_actor, shared_net_cric):
local_actor, local_cric = self.A3C_net_model._make_model()
local_actor._make_predict_function()
local_cric._make_predict_function()
local_cric.set_weights(shared_net_cric.get_weights())
local_actor.set_weights(shared_net_actor.get_weights())
if PARA.show_model:
local_actor.summary()
local_cric.summary()
return local_actor, local_cric
|
[
"dleodlf1004@naver.com"
] |
dleodlf1004@naver.com
|
4a6e24f762958195452038e9fed1de89efdbd298
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/connectivity_hop_py3.py
|
1ab3ca4895e26853a60e072edcb9b2dcc90c301e
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,122
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityHop(Model):
"""Information about a hop between the source and the destination.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of the hop.
:vartype type: str
:ivar id: The ID of the hop.
:vartype id: str
:ivar address: The IP address of the hop.
:vartype address: str
:ivar resource_id: The ID of the resource corresponding to this hop.
:vartype resource_id: str
:ivar next_hop_ids: List of next hop identifiers.
:vartype next_hop_ids: list[str]
:ivar issues: List of issues.
:vartype issues:
list[~azure.mgmt.network.v2017_08_01.models.ConnectivityIssue]
"""
_validation = {
'type': {'readonly': True},
'id': {'readonly': True},
'address': {'readonly': True},
'resource_id': {'readonly': True},
'next_hop_ids': {'readonly': True},
'issues': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'next_hop_ids': {'key': 'nextHopIds', 'type': '[str]'},
'issues': {'key': 'issues', 'type': '[ConnectivityIssue]'},
}
def __init__(self, **kwargs) -> None:
super(ConnectivityHop, self).__init__(**kwargs)
self.type = None
self.id = None
self.address = None
self.resource_id = None
self.next_hop_ids = None
self.issues = None
|
[
"noreply@github.com"
] |
lmazuel.noreply@github.com
|
3cd1756d135e7014c2c7cfd9d5f9e2379b777769
|
b44ae8c215c7577616ce94bbddda57d46ff46577
|
/experiments/sparsity/sameK_20repeats_movielens_100K/gaussian_gaussian_univariate.py
|
4a00a17770a6dc4e784f85d4bedd6be94cbc77c7
|
[] |
no_license
|
changchunli/BMF_Priors
|
06a74d89198b11c0c3ba673a1d4869986cd7bc2d
|
15b20537eefd36347ed84617882eeea1c453e162
|
refs/heads/master
| 2020-03-21T07:50:08.081910
| 2018-06-10T10:22:04
| 2018-06-10T10:22:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
'''
Measure sparsity experiment on the MovieLens 100K dataset, with
the All Gaussian model (univariate posterior).
'''
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_gaussian_univariate import BMF_Gaussian_Gaussian_univariate
from BMF_Priors.data.movielens.load_data import load_movielens_100K
from BMF_Priors.experiments.sparsity.sparsity_experiment import sparsity_experiment
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_movielens_100K()
model_class = BMF_Gaussian_Gaussian_univariate
n_repeats = 20
stratify_rows = False
fractions_known = [0.07, 0.06, 0.05, 0.04, 0.03, 0.02]
fractions_unknown = [1. - v for v in fractions_known]
settings = {
'R': R,
'M': M,
'K': 5,
'hyperparameters': { 'alpha':1., 'beta':1., 'lamb':0.1 },
'init': 'random',
'iterations': 250,
'burn_in': 200,
'thinning': 1,
}
fout = './results/performances_gaussian_gaussian_univariate.txt'
average_performances, all_performances = sparsity_experiment(
n_repeats=n_repeats, fractions_unknown=fractions_unknown, stratify_rows=stratify_rows,
model_class=model_class, settings=settings, fout=fout)
''' Plot the performance. '''
plt.figure()
plt.title("Sparsity performances")
plt.plot(fractions_unknown, average_performances['MSE'])
plt.ylim(0,4)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
2cb0388f30962d3fce1d5759446ab5384374b8a0
|
ff4302db00bc503ab86ae02f170b078e1cda2312
|
/daili/tiantian_cart/urls.py
|
b8ce4472c71db93b77dd44ee8ad49601cd8f9f76
|
[] |
no_license
|
pythonIn/dali
|
28f3eab4f747fa0fc885d21b650e3111a0a88da2
|
f37ed9a4d3e52cf1dabe454cb434abf736eb04bf
|
refs/heads/master
| 2020-03-16T15:48:36.869406
| 2018-06-01T18:43:57
| 2018-06-01T18:43:57
| 132,759,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
from django.conf.urls import url, include
import views
urlpatterns = [
url(r'^cart$',views.cart),
url(r"^add(\d+)_(\d+)$", views.add),
url(r'^mend(\d+)_(\d+)$', views.mend),
url(r'^cart_del(\d+)$', views.cart_del)
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
e1156b9394dfbc82e1105de1b18c7c019647151c
|
10920b11a22a20f9a7f63157818327f3c4e41888
|
/jibby_opencv/Object Recognition/image.py
|
51ebc065790ef203d5025db954510b6b13e3513e
|
[] |
no_license
|
dsall/computerv
|
e331b3d025c8cec0119b789107d1fef18d08f02a
|
40671d618c31ad9d9b20fc902a218a8e281098bc
|
refs/heads/master
| 2021-09-15T09:33:08.495580
| 2018-05-29T23:41:42
| 2018-05-29T23:41:42
| 135,363,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
import urllib.request
import cv2
import numpy as np
import os
def store_raw_images():
neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
neg_image_urls = urllib.request.urlopen(neg_images_link).read().decode()
pic_num = 877
if not os.path.exists('neg'):
os.makedirs('neg')
for i in neg_image_urls.split('\n'):
try:
print(i)
urllib.request.urlretrieve(i, "neg/"+str(pic_num)+".jpg")
img = cv2.imread("neg/"+str(pic_num)+".jpg",cv2.IMREAD_GRAYSCALE)
# should be larger than samples / pos pic (so we can place our image on it)
resized_image = cv2.resize(img, (100, 100))
cv2.imwrite("neg/"+str(pic_num)+".jpg",resized_image)
pic_num += 1
except Exception as e:
print(str(e))
#store_raw_images()
def find_uglies():
match = False
for file_type in ['neg']:
for img in os.listdir(file_type):
for ugly in os.listdir('uglies'):
try:
current_image_path = str(file_type)+'/'+str(img)
ugly = cv2.imread('uglies/'+str(ugly))
question = cv2.imread(current_image_path)
if ugly.shape == question.shape and not(np.bitwise_xor(ugly,question).any()):
print('That is one ugly pic! Deleting!')
print(current_image_path)
os.remove(current_image_path)
except Exception as e:
print(str(e))
#find_uglies()
def create_pos_n_neg():
for file_type in ['neg']:
for img in os.listdir(file_type):
if file_type == 'neg':
line = file_type+'/'+img+'\n'
with open('bg.txt','a') as f:
f.write(line)
create_pos_n_neg()
|
[
"djiby45@outlook.com"
] |
djiby45@outlook.com
|
a261fb52ce97eb8401acfc01dd3dd7cc0ab4a979
|
a0cbae33d175fdf0299eddc775a1b4b84c0addcf
|
/orquesta/tests/unit/specs/mistral/test_base_spec.py
|
1559a000d680dcef5ba76c42537c582ccfa6a8bf
|
[
"Apache-2.0"
] |
permissive
|
batk0/orquesta
|
240ff95c76c610c52518ee7d2e3eee11b6594a73
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
refs/heads/master
| 2020-04-17T10:48:48.016607
| 2019-01-19T15:40:05
| 2019-01-19T15:40:05
| 166,514,957
| 0
| 0
|
Apache-2.0
| 2019-01-19T06:37:39
| 2019-01-19T06:37:39
| null |
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from orquesta.specs import loader
from orquesta.specs import mistral
from orquesta.specs.mistral import v2 as mistral_v2
class SpecTest(unittest.TestCase):
def setUp(self):
super(SpecTest, self).setUp()
self.spec_module_name = 'mistral'
def test_get_module(self):
self.assertEqual(
loader.get_spec_module(self.spec_module_name),
mistral
)
def test_get_spec(self):
spec_module = loader.get_spec_module(self.spec_module_name)
self.assertEqual(
spec_module.WorkflowSpec,
mistral.WorkflowSpec
)
def test_spec_catalog(self):
spec_module = loader.get_spec_module(self.spec_module_name)
self.assertEqual(
spec_module.WorkflowSpec.get_catalog(),
self.spec_module_name
)
def test_spec_version(self):
self.assertEqual('2.0', mistral_v2.VERSION)
self.assertEqual('2.0', mistral.VERSION)
def test_workflow_spec_imports(self):
self.assertEqual(
mistral.WorkflowSpec,
mistral_v2.workflows.WorkflowSpec
)
def test_task_spec_imports(self):
self.assertEqual(
mistral.TaskDefaultsSpec,
mistral_v2.tasks.TaskDefaultsSpec
)
self.assertEqual(
mistral.TaskSpec,
mistral_v2.tasks.TaskSpec
)
|
[
"m4d.coder@gmail.com"
] |
m4d.coder@gmail.com
|
df4fecd88d5cd9e582134ca7ea60cbda93a24e83
|
1dc0de033e5f4e2471fb0ecdf55cc955e9287836
|
/lines_bars_and_markers/120-绘制条形图.py
|
2dacb6031c45e979a4e0de31749212506fa89c36
|
[] |
no_license
|
weiyinfu/learnMatplotlib
|
5db2337723751a10d5dc6f077c97bef0fb919c0d
|
3b173161f96d7b419c1b1be65f09e267141fa385
|
refs/heads/master
| 2022-08-29T20:46:26.222042
| 2022-08-02T02:22:33
| 2022-08-02T02:22:33
| 147,894,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
# 导入绘图模块
import matplotlib.pyplot as plt
# 构建数据
GDP = [12406.8, 13908.57, 9386.87, 9143.64]
# 中文乱码的处理
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 绘图
plt.bar(range(4), GDP, align='center', color='steelblue', alpha=0.8)
# 添加轴标签
plt.ylabel('GDP')
# 添加标题
plt.title('四个直辖市GDP大比拼')
# 添加刻度标签
plt.xticks(range(4), ['北京市', '上海市', '天津市', '重庆市'])
# 设置Y轴的刻度范围
plt.ylim([5000, 15000])
# 为每个条形图添加数值标签
for x, y in enumerate(GDP):
plt.text(x, y + 100, '%s' % round(y, 1), ha='center')
# 显示图形
plt.show()
|
[
"weiyinfu.weiyinfu@bytedance.com"
] |
weiyinfu.weiyinfu@bytedance.com
|
58c292c8901934720c90bda9409969ed438bc743
|
b1c403ad1211221427dddc80a7f15956da498175
|
/0x03-caching/0-basic_cache.py
|
d6dab650de3d655f0aa8fac8482cca3dc26d9587
|
[] |
no_license
|
oumaymabg/holbertonschool-web_back_end
|
246dd47b9abdb277d6ef539c9bc38a8f0509554a
|
dce7ff683d8bce9ad986d72948c9e75ca7b80d2a
|
refs/heads/master
| 2023-09-05T22:27:28.961336
| 2021-11-15T21:05:53
| 2021-11-15T21:05:53
| 389,440,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#!/usr/bin/python3
""" Basic dictionary """
from base_caching import BaseCaching
class BasicCache(BaseCaching):
""" Class that inherits from BaseCaching and is a caching system
This caching system doesn’t have limit """
def put(self, key, item):
""" Assign to the dictionary """
if key and item:
self.cache_data[key] = item
def get(self, key):
""" Return the value linked """
if key is None or self.cache_data.get(key) is None:
return None
return self.cache_data.get(key)
|
[
"oumaymabou257@gmail.com"
] |
oumaymabou257@gmail.com
|
ec5cac4f337e78f90c3b2276496c1e93b970c87c
|
14baf43101ef0d805bfe287a1498106f9b89580f
|
/lol.py
|
77acc4b5075751f88bc3a0b7d258b4fe2d8e00c3
|
[] |
no_license
|
justinembawomye/python-fun
|
e10358e1825697d725dd9d11b94bbcc920965800
|
edabbff26a39fefe36b9800b784fee438fa1b2c8
|
refs/heads/master
| 2023-08-27T13:20:56.258556
| 2021-10-28T20:17:49
| 2021-10-28T20:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
# creating the main function
def main():
for a in range(3):
meow()
def meow():
print('Hey cat!! .....Y.O.L.O')
main()
|
[
"jmbawomye@gmail.com"
] |
jmbawomye@gmail.com
|
c95518e76732e60e2498accc86ec780469776dd4
|
1e7673cf975dbdafd57cf040b3df00bf62da2f2a
|
/final_PhonoCi2.0/makegif.py
|
63954ff3e0d581008d002f990733acea4847b393
|
[] |
no_license
|
pondjames007/RWET
|
9771058767e45392537f20d701b772d4ec776ff2
|
5fbfc46ab3f98336cfa7bd3c1789097a7f8e16b9
|
refs/heads/master
| 2021-05-04T00:53:03.212648
| 2018-05-04T20:42:15
| 2018-05-04T20:42:15
| 120,354,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
import requests
from bs4 import BeautifulSoup
import os
import sys
import random
from PIL import Image, ImageDraw, ImageFont
import textwrap
def download_file(url, local_filename=None):
if local_filename is None:
local_filename = url.split('/')[-1]
# if os.path.exists(local_filename):
# return local_filename
if not url.startswith('http'):
url = 'http:' + url
# NOTE the stram=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_filename
def get_images(query, i):
url = "https://www.shutterstock.com/search?searchterm=" + query
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
images = soup.select('.img-wrap > img')
print(len(images))
if len(images) == 0:
return False
else:
img_url = random.choice(images).get("src")
savedname = 'frames/' + str(i) + '.jpg'
try:
raw_image = download_file(img_url, savedname)
print(raw_image)
edit_image(raw_image, query)
return True
except Exception as e:
print(e)
return False
def edit_image(imagename, words):
image = Image.open(imagename)
image = image.resize((400,400))
# print(image.size)
canvas = ImageDraw.Draw(image, 'RGBA')
useFont = "/Library/Fonts/Verdana.ttf"
font = ImageFont.truetype(useFont, 30)
# lines = textwrap.wrap(words, width=15)
# y_height = 0
# for line in lines:
w, h = canvas.textsize(words, font=font)
canvas.rectangle([0, (image.size[1]-h)/2, image.size[0], (image.size[1]+h)/2], fill=(0, 0, 0, 30))
canvas.text(((image.size[0]-w)/2, (image.size[1]-h)/2) , words, font=font, fill=(255,255,255))
# y_height += h
out_image_name = imagename
image.save(out_image_name)
# edit_image("frames/1.jpg", "lololololol")
|
[
"jameshuang@nyu.edu"
] |
jameshuang@nyu.edu
|
bf76b641e40e5ae38de03980d78eb5ec4c5cea4e
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/data/__init__.py
|
fdef948f260e00288a0fd67087f3b7bd58136b7c
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d2c61947afcd5ecdc7b2c49e834ad9feeeed830ece0335e0f29375f7da626b17
size 1506
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
f4167843167e81ed9d67f4244251831321f65ce7
|
d8a9b88f4087ebfe97b462e589071222e2261e47
|
/189. Rotate Array.py
|
e4112871d60ef56e0896ddb710f0ccab9299cc03
|
[] |
no_license
|
rohitpatwa/leetcode
|
a7a4e8a109ace53a38d613b5f898dd81d4771b1b
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
refs/heads/master
| 2021-07-07T12:40:30.424243
| 2021-03-31T00:21:30
| 2021-03-31T00:21:30
| 235,003,084
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# Create 2 parts of the array of size n-k and k. Reverse these two parts individually and then reverse the whole array.
class Solution:
def reverse(self, nums, start, end):
while start<end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k = k%n
self.reverse(nums, 0, n-1)
self.reverse(nums, 0, k-1)
self.reverse(nums, k, n-1)
|
[
"rohitpatwa@gmail.com"
] |
rohitpatwa@gmail.com
|
909cc6f34cdde8891f13255f83a6b221376d03b9
|
27e18001bd40f6fe5b9f675130e359147ce3519a
|
/20.py
|
e2bcaa0ba31c60aa60680cbbbe23bd056d298725
|
[] |
no_license
|
jsomers/project-euler
|
6934a5d4eb2c116b08face308a010ddb74e0c123
|
61cc4cd7978deeed9d071f678c786f991e05d8a7
|
refs/heads/master
| 2021-01-01T05:39:39.568380
| 2014-08-21T04:05:10
| 2014-08-21T04:05:10
| 10,680,061
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Give the sum of the digits in 100!
def mul(x, y):
return x*y
def fact(n):
facts = []
for i in range (1, n+1):
facts.append(i)
a = reduce(mul, facts)
return a
print fact(100)
string = '93326215443944152681699238856266700490715968264381621468592963895217599993229915608941463976156518286253697920827223758251185210916864000000000000000000000000'
numbers = []
for l in string:
numbers.append(int(l))
print sum(numbers)
|
[
"jsomers@gmail.com"
] |
jsomers@gmail.com
|
7a7281413d8c98326a014edb808a6254dad9ba1e
|
919e74f05976d9ea5f28d5dcf0a3e9311a4d22b2
|
/conans/test/functional/toolchains/gnu/autotools/test_apple_toolchain.py
|
c9217eac77c70998abec202599110b89c924298c
|
[
"MIT"
] |
permissive
|
thorsten-klein/conan
|
1801b021a66a89fc7d83e32100a6a44e98d4e567
|
7cf8f384b00ba5842886e39b2039963fc939b00e
|
refs/heads/develop
| 2023-09-01T12:04:28.975538
| 2023-07-26T10:55:02
| 2023-07-26T10:55:02
| 150,574,910
| 0
| 0
|
MIT
| 2023-08-22T14:45:06
| 2018-09-27T11:16:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,435
|
py
|
import os
import textwrap
import platform
import pytest
from conans.client.tools.apple import to_apple_arch
from conans.test.assets.autotools import gen_makefile
from conans.test.assets.sources import gen_function_h, gen_function_cpp
from conans.test.utils.tools import TestClient
makefile = gen_makefile(apps=["app"], libs=["hello"])
conanfile_py = textwrap.dedent("""
from conans import ConanFile, tools
from conan.tools.gnu import Autotools
class App(ConanFile):
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "AutotoolsToolchain"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def build(self):
env_build = Autotools(self)
env_build.make()
""")
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("config", [("x86_64", "Macos", "10.14"),
("armv8", "iOS", "10.0"),
("armv7", "iOS", "10.0"),
("x86", "iOS", "10.0"),
("x86_64", "iOS", "10.0"),
("armv8", "Macos", "10.14") # M1
])
def test_makefile_arch(config):
arch, os_, os_version = config
profile = textwrap.dedent("""
include(default)
[settings]
os = {os}
os.version = {os_version}
arch = {arch}
""").format(os=os_, arch=arch, os_version=os_version)
t = TestClient()
hello_h = gen_function_h(name="hello")
hello_cpp = gen_function_cpp(name="hello")
main_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
t.save({"Makefile": makefile,
"hello.h": hello_h,
"hello.cpp": hello_cpp,
"app.cpp": main_cpp,
"conanfile.py": conanfile_py,
"profile": profile})
t.run("install . --profile:host=profile --profile:build=default")
t.run("build .")
libhello = os.path.join(t.current_folder, "libhello.a")
app = os.path.join(t.current_folder, "app")
assert os.path.isfile(libhello)
assert os.path.isfile(app)
expected_arch = to_apple_arch(arch)
t.run_command('lipo -info "%s"' % libhello)
assert "architecture: %s" % expected_arch in t.out
t.run_command('lipo -info "%s"' % app)
assert "architecture: %s" % expected_arch in t.out
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("arch", ["x86_64", "armv8"])
def test_catalyst(arch):
profile = textwrap.dedent("""
include(default)
[settings]
os = Macos
os.version = 13.0
os.sdk = macosx
os.subsystem = catalyst
os.subsystem.ios_version = 13.1
arch = {arch}
""").format(arch=arch)
t = TestClient()
hello_h = gen_function_h(name="hello")
hello_cpp = gen_function_cpp(name="hello")
main_cpp = textwrap.dedent("""
#include "hello.h"
#include <TargetConditionals.h>
#include <iostream>
int main()
{
#if TARGET_OS_MACCATALYST
std::cout << "running catalyst " << __IPHONE_OS_VERSION_MIN_REQUIRED << std::endl;
#else
#error "not building for Apple Catalyst"
#endif
}
""")
t.save({"Makefile": makefile,
"hello.h": hello_h,
"hello.cpp": hello_cpp,
"app.cpp": main_cpp,
"conanfile.py": conanfile_py,
"profile": profile})
t.run("install . --profile:host=profile --profile:build=default")
t.run("build .")
libhello = os.path.join(t.current_folder, "libhello.a")
app = os.path.join(t.current_folder, "app")
assert os.path.isfile(libhello)
assert os.path.isfile(app)
expected_arch = to_apple_arch(arch)
t.run_command('lipo -info "%s"' % libhello)
assert "architecture: %s" % expected_arch in t.out
t.run_command('lipo -info "%s"' % app)
assert "architecture: %s" % expected_arch in t.out
if arch == "x86_64":
t.run_command('"%s"' % app)
assert "running catalyst 130100" in t.out
|
[
"noreply@github.com"
] |
thorsten-klein.noreply@github.com
|
f149b88f3ad064c0087ca3f578642aea3bc5c4ed
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/Archetypes/tests/test_construction.py
|
5e11425921baaa08285bc4c4a19308a46ef93964
|
[
"BSD-3-Clause"
] |
permissive
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074
| 2009-04-08T20:13:20
| 2009-04-08T20:13:20
| 171,351
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
import unittest
from Products.Archetypes.tests.atsitetestcase import ATSiteTestCase
class FactoryTest(ATSiteTestCase):
def testSimplePortalType(self):
self.folder.invokeFactory(id="dummy", type_name="SimpleType")
self.assertEqual(self.folder.dummy.getPtype(), "Simple Type")
def XXXtestCopiedFTIPortalType(self):
# A known bug where `default_method` doesn't have the correct
# portal type available. For a discussion, see
# https://dev.plone.org/plone/ticket/6734
self.folder.invokeFactory(id="dummy", type_name="MySimpleType")
self.assertEqual(self.folder.dummy.getPtype(), "My Simple Type")
def test_suite():
suite=unittest.TestSuite()
suite.addTest(unittest.makeSuite(FactoryTest))
return suite
|
[
"ron@domU-12-31-39-02-65-03.compute-1.internal"
] |
ron@domU-12-31-39-02-65-03.compute-1.internal
|
560b1cb677c6f79c4127fb6c7433e86b2f01752a
|
6320fef2ea7376c2b35f97f1a5af004e90f09098
|
/1-2주차 실습(복습)/venv/Lib/site-packages/bleach/utils.py
|
1163bd3e732017cd74fb3402832980ffcaa22fa2
|
[] |
no_license
|
Dplo1514/ploaistudy
|
7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9
|
e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c
|
refs/heads/master
| 2023-09-03T00:45:55.601651
| 2021-10-24T12:19:38
| 2021-10-24T12:19:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
from collections import OrderedDict
def _attr_key(attr):
"""Returns appropriate key for sorting attribute names
Attribute names are a tuple of ``(namespace, name)`` where namespace can be
``None`` or a string. These can't be compared in Python 3, so we conver the
``None`` to an empty string.
"""
key = (attr[0][0] or ""), attr[0][1]
return key
def alphabetize_attributes(attrs):
"""Takes a dict of attributes (or None) and returns them alphabetized"""
if not attrs:
return attrs
return OrderedDict([(k, v) for k, v in sorted(attrs.items(), key=_attr_key)])
|
[
"dladlsgur3334@gmail.com"
] |
dladlsgur3334@gmail.com
|
b3caaba13db1b1269a90e25161e1dace19a05ba5
|
f7d0c32b8d29dcff788d439c2b7051734afbbfc6
|
/meiduo1/apps/user/utils.py
|
3f1156f45390d1d7a949de53e45f59034719129f
|
[
"MIT"
] |
permissive
|
woobrain/nginx-uwsgi-web
|
dcf5159ba3f3332108c2d351ef3dac0cc504ade7
|
5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae
|
refs/heads/master
| 2022-12-22T21:08:00.758841
| 2019-11-13T12:31:20
| 2019-11-13T12:31:20
| 221,447,696
| 0
| 0
|
MIT
| 2022-12-11T19:51:54
| 2019-11-13T11:52:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from meiduo1 import settings
def generic_active_email_url(id, email):
# 实例化加密对象
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
# 组织数据
data = {
"id":id,
"email":email
}
# 加密数据
serect_data = s.dumps(data)
return 'http://www.meiduo.site:8000/emailsactive/?token=%s' % serect_data.decode()
def check_active_email_url(token_id):
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
try:
token_id = s.loads(token_id)
except:
return None
return token_id
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
f8eec1a3c5a02a18416e5676f7d9647bf0fd199e
|
35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6
|
/200_NumberOfIslands/200_NumberOfIslands_1.py
|
88b82c01ab92a45cbcc835225055f670d848f1c6
|
[] |
no_license
|
H-Cong/LeetCode
|
0a2084a4845b5d7fac67c89bd72a2adf49f90c3d
|
d00993a88c6b34fcd79d0a6580fde5c523a2741d
|
refs/heads/master
| 2023-03-19T15:22:00.971461
| 2021-03-11T00:33:00
| 2021-03-11T00:33:00
| 303,265,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
'''
BFS
'''
row = len(grid)
col = len(grid[0])
count = 0
for i in range(row):
for j in range(col):
if grid[i][j] == "1":
count += 1
self.bfs(grid, i, j)
return count
def bfs(self, grid, i, j):
row = len(grid)
col = len(grid[0])
queue = collections.deque()
queue.append((i,j))
while queue:
x, y = queue.pop()
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
for d in directions:
a = x + d[0]
b = y + d[1]
if 0 <= a < row and 0 <= b < col and grid[a][b] == "1":
queue.appendleft((a, b))
grid[a][b] = "0"
# TC: O(m*n)
# visit all elements of grid once
# SC: O(min(m, n))
# min(m,n) as the space taken by queue
# It is worthy to mention that when you iterate a matrix from any corener,
# the length of queue is smaller or equal to min(m,n) as queue is a FIFO
# data structure. However, if you star from middle of the matrix, the length
# of queue can certainly be longer than min(m,n). It can be smh like
# min(m, n)*3+b I think, but the dominating factor still is min(m,n).
|
[
"nych1989@gmail.com"
] |
nych1989@gmail.com
|
46737b82df4a556766e1e833e4e748b0474f551c
|
2136701f48ad131084b331039d864f85988cf451
|
/spider/work/media_huadong/somenew/spiders/zhongguojiangxiwang.py
|
a15ce477adf94d9e61ca22cfee9bc5f1b8e94dbf
|
[] |
no_license
|
cuiyulin77/other
|
9d374a47d482f1c3f9ef0f3ac4429487643b04b9
|
c00cafaf7607452966fa523c4d0b04edb7f153e6
|
refs/heads/master
| 2020-05-18T04:24:26.095929
| 2019-04-30T06:37:53
| 2019-04-30T06:37:53
| 184,169,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from somenew.items import SomenewItem
import hashlib
import datetime
class DezhouxinwenSpider(scrapy.Spider):
# 中国江西网
name = 'zhongguojiangxiwang'
allowed_domains = ['jxnews.com.cn']
start_urls = ['http://www.jxnews.com.cn/']
# start_urls = ['http://fc.jxnews.com.cn/system/2019/03/14/017417180.shtml']
custom_settings = {'DOWNLOAD_DELAY': 0.8}
def parse(self, response):
# print(response.url)
xp = '//*[@id="Cen_Ri_R"]/div/table/tr/td/a/@href|//*[@id="PageOneRighLine"]/div[16]/ul/table/tr/td/a/@href|/html/body/div[32]/div[4]/div[1]/div/table/tr/td/@href|/html/body/div[32]/div[4]/div[2]/div/ul/li/a/@href|//*[@id="jsbbs"]/div/ul/li/a/@href|//div/div/ul/li/a/@href'
res = response.xpath(xp).extract()
print(res)
for url in res:
if '#' not in url and 'jiangxi' not in url and 'wenz'not in url and 'bbs' not in url:
yield scrapy.Request(url, callback=self.get_detail)
def get_detail(self,response):
print(response.url,'响应的url')
item = SomenewItem()
item['title'] = response.xpath("//h1/a/text()|//div[@class=\"biaoti\"]/*/text()|//h1/text()|//div[@class=\"BiaoTi\"]/text()").extract_first()
item['time'] = response.xpath("/html/body/div[5]/div[1]/div[1]/h5/text()|//*[@id=\"pubtime_baidu\"]/text()|//div[@class=\"xbt\"]/span[1]/text()|//div[@class=\"text1t\"]/h5/text()").extract_first()
item['content'] = response.xpath('//*[@id="content"]/p/text()|//p/text()').extract()
item['come_from'] ='中国江西网'
item['content']= ''.join(item['content']).replace('\u3000', u' ').replace(u'\xa0', u' ').\
replace('\n', '').replace( '\u2002', '').replace( '\r', '').replace( '\r\n', '').strip()
if item['content'] and item['title']:
item['url'] = response.url
m = hashlib.md5()
m.update(str(item['url']).encode('utf8'))
item['article_id'] = m.hexdigest()
item['media'] = '中国江西网'
item['create_time'] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
item['comm_num'] = "0"
item['fav_num'] = '0'
item['read_num'] = '0'
item['env_num'] = '0'
item['media_type'] = '网媒'
item['addr_province'] = '江西'
print(item)
yield item
|
[
"494658565@qq.com"
] |
494658565@qq.com
|
121ee74bb7d8df6823b01dac06b94931eb309d1a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03043/s556042691.py
|
4e6f577175da4a7f55616baa41faeb4da62a38fa
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
n,k=map(int, input().split())
ans=0
import math
for i in range(1,n+1):
sikou=math.ceil(math.log((k/i),2))
if sikou<0:
sikou=0
ans+=(1/n)*0.5**sikou
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
afd23e3eaa95e013ad89a9c3b5e91281ab263ef3
|
3147bb3457617842c24210c9c7e5a0d07fc548a6
|
/guardianCrossword.py
|
3e8b9aa7b1dcf1ebf5c095422550004651768d48
|
[] |
no_license
|
rashley2712/tools
|
4378a062276e4c5c52563842d7a077e3a5e84f42
|
e361abce89bfd4cf9e11d0e575c5d12c2c530e13
|
refs/heads/master
| 2023-01-10T12:52:50.744164
| 2022-12-21T14:19:01
| 2022-12-21T14:19:01
| 40,479,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,040
|
py
|
#!/usr/bin/env python3
import argparse
import datetime
import datetime
import sys
import urllib.request
import os
# Google Cloud libraries
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from apiclient.http import MediaFileUpload
def getDriveListing():
# Call the Drive v3 API
results = service.files().list(
pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Recent 10 files:')
for item in items:
print(u'{0} ({1})'.format(item['name'], item['id']))
def checkForExistingFile(service, name):
print("Searching for a file called:", name)
results = service.files().list(q="name = '" + name + "'", spaces="drive", fields="files(id, name, parents, trashed)").execute()
if (len(results.get('files', [])) > 0):
print("file found!")
allTrashed = True
for f in results.get('files', []):
print(f.get('name'), f.get('id'), f.get('parents'), f.get('trashed'))
if not f.get('trashed'): return True
return False
def uploadToDrive(crosswordFile):
SCOPES = ['https://www.googleapis.com/auth/drive']
creds = None
if os.path.exists(cloudtokenFile):
creds = Credentials.from_authorized_user_file(cloudtokenFile, SCOPES)
else:
print("No token.json file. Exiting")
return
service = build('drive', 'v3', credentials=creds)
name = crosswordFile.split('/')[-1]
if checkForExistingFile(service, name): return
fileMetadata = { 'name': name, "parents" : ["1Kwy3lson-RWAntRkxO67NV-Mo6l8jYzw"]}
media = MediaFileUpload(crosswordFile, mimetype='application/pdf')
results = service.files().create(body=fileMetadata, media_body = media).execute()
print("File Name: %s ID: %s"%(name, results.get('id')))
def getWriteCrossword(fullURL, outputFilename):
try:
response = urllib.request.urlopen(fullURL)
except urllib.error.HTTPError as e:
print("We got an error of:", e.code)
sys.exit()
except urllib.error.URLError as e:
print(e.reason)
sys.exit()
headers = str(response.headers)
startIndex = headers.find('Content-Type')
startIndex+= len("Content-Type: ")
endIndex = headers.find('\n', startIndex)
contentType = headers[startIndex:endIndex]
# print("Content-type: " + contentType)
if contentType!='application/pdf':
print("The server did not return a PDF object.")
sys.exit()
pdfData = response.read()
print("Fetched the data ok ... Writing to %s"%outputFilename)
outputFile = open(outputFilename, 'wb')
outputFile.write(pdfData)
outputFile.close()
print("Written the file to:", outputFilename)
return outputFilename
if __name__ == "__main__":
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
testBaseURL = "http://www.devicecloud.co.uk/crosswords/"
baseURL = "http://crosswords-static.guim.co.uk/"
homeDIR = os.getenv("HOME")
crosswordPath = homeDIR + "/Crosswords/"
namePrefix = "gdn.quick."
nameSuffix = ".pdf"
cloudtokenFile = homeDIR + "/bin/token.json"
parser = argparse.ArgumentParser(description='Downloads the Guardian Quick crosswords and saves (and archives) them to a Dropbox folder.')
parser.add_argument('--date', default = 'today', type=str, help='Date for the crossword (default: today)')
parser.add_argument('-g', '--get', action='store_true', help='\'Get\' directive. Asks the script to get the crossword.')
parser.add_argument('--test', action='store_true', help='Use the test URL instead of the real Guardian URL.')
parser.add_argument('--archive', action = 'store_true', help='Clean up the Drive directory.')
parser.add_argument('-u', '--upload', action = 'store_true', help='Upload the crossword to a Google Drive folder.')
arg = parser.parse_args()
print(arg)
if arg.test:
baseURL = testBaseURL
todaysDate = datetime.datetime.now()
requestedDate = todaysDate
if arg.date!='today':
try:
inputDate = datetime.datetime.strptime(arg.date, '%Y-%m-%d')
requestedDate = inputDate
except ValueError:
print("I am not able to understand the date input, please use YYYY-MM-DD")
sys.exit()
todayYear = todaysDate.year
todayMonth = todaysDate.month
todayDay = todaysDate.day
todayDayOfWeek = todaysDate.weekday()
requestedYear = requestedDate.year
requestedDay = requestedDate.day
requestedMonth = requestedDate.month
requestedDayOfWeek = requestedDate.weekday()
dayDifference = todaysDate - requestedDate
print("Today is: %d-%02d-%02d %s"%(todayYear, todayMonth, todayDay, days[todayDayOfWeek]))
print("You have asked for: %d-%02d-%02d %s"%(requestedYear, requestedMonth, requestedDay, days[requestedDayOfWeek]))
if dayDifference.days<0:
print("Your requested date is in the future, no crossword yet.")
sys.exit()
if dayDifference.days>0:
print('Your date was %d days ago'%dayDifference.days)
if requestedDayOfWeek == 6:
print("You are requesting a crossword for a Sunday. Try the Observer.")
sys.exit()
dateString = "%d%02d%02d"%(requestedYear, requestedMonth, requestedDay)
fullURL = baseURL + namePrefix + dateString + nameSuffix
print("Ready to fetch: ", fullURL)
outputFilename = crosswordPath + namePrefix + dateString + nameSuffix
if (arg.get):
crosswordFile = getWriteCrossword(fullURL, outputFilename)
else:
print("You did not specify the 'get' directive, so not really fetching the crossword.")
if (arg.upload):
uploadToDrive(crosswordFile)
if (arg.archive):
files = os.listdir(crosswordPath)
crosswordFilenames = []
dates = []
for f in files:
if f.find('gdn.quick.')!=-1:
crosswordFilenames.append(f)
dateString = f[10:18]
dates.append(dateString)
print("Crosswords found in root folder...")
print(crosswordFilenames)
daysOld = []
for d in dates:
date = datetime.datetime.strptime(d, '%Y%m%d')
days = (todaysDate - date).days
daysOld.append(days)
oldCrosswords = []
oldCrosswordDates = []
for index, f in enumerate(crosswordFilenames):
if daysOld[index] > 7:
oldCrosswords.append(f)
oldCrosswordDates.append(dates[index])
print("Crosswords older than 7 days...")
print(oldCrosswords)
for index, filename in enumerate(oldCrosswords):
date = datetime.datetime.strptime(oldCrosswordDates[index], '%Y%m%d')
print(filename,date)
month = date.month
monthString = months[month-1]
year = date.year
print(year, monthString)
directory = str(year) + "-" + monthString
fullDirectory = dropboxPath + "/" + directory
if not os.path.exists(fullDirectory):
print("Creating the directory: " + fullDirectory)
os.mkdir(fullDirectory)
oldFilename = dropboxPath + "/" + filename
newFilename = dropboxPath + "/" + directory + "/" + filename
print(oldFilename, newFilename)
os.rename(oldFilename, newFilename)
print('Completed successfully')
|
[
"rashley@gmail.com"
] |
rashley@gmail.com
|
2c064571b166c16d2e2162eb550f4acddc9755a2
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_BoxCox/trend_Lag1Trend/cycle_0/ar_12/test_artificial_128_BoxCox_Lag1Trend_0_12_100.py
|
69be51d0ac616d981af16e76ed8ffa0a59aeca1f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
5cbd9b759b68b1c777c5a8035820707a318f8074
|
dd449ad8388847779b265f49f2339c9681376c60
|
/rl-lapan-book/chap6_dqn_pong/model.py
|
12c593bc41d537299d8fb3ee85ce5990e5c10f0d
|
[] |
no_license
|
whoji/training-ground
|
478d76a8c274050eb910b28729ca1d1cdb47eae9
|
b107cc47c4a04bb8868c410ab207bacab5a86e4c
|
refs/heads/master
| 2020-05-16T16:13:26.788156
| 2019-12-04T01:56:01
| 2019-12-04T01:56:01
| 183,154,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
import torch
import torch.nn as nn
import numpy as np
class DQN(nn.Module):
"""docstring for DQN"""
def __init__(self, input_shape, n_actions):
"""assumes input_shape is of CHW shape (4,84,84)"""
super(DQN, self).__init__()
self.conv=nn.Sequential(
nn.Conv2d(in_channels=input_shape[0],
out_channels=32,
kernel_size=8,
stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, 2),
nn.ReLU(),
nn.Conv2d(64, 128, 3, 1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, n_actions)
)
def _get_conv_out(self, shape):
temp_conv_out = self.conv(torch.zeros(1, *shape))
# import pdb; pdb.set_trace()
return int(np.prod(temp_conv_out.size())) # 1*128*7*7 = 6272
def forward(self, x):
""" assumes x is 4D of shape BCHW, output will be 2D: B*n_actions """
conv_out = self.conv(x).view(x.size()[0], -1) # flatten ? what this is not 1D ??!!
return self.fc(conv_out)
if __name__ == '__main__':
m = DQN((4,100,100), 5)
print(m)
|
[
"minli1985@gmail.com"
] |
minli1985@gmail.com
|
e1ac6ff44e858e2564178ff72f096851718b1ac2
|
7f8b266b1b175f62a5d9608a0028f2265f294233
|
/guild/commands/operations.py
|
0e51d3f6f25d165f93eb42a4dc169732d4f1fe81
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
jukiewiczm/guildai
|
7d1cd4e324e1f2bd70a90156e254e9e4d00c6e4c
|
478cc29cb102a8bd0bed693ce9626fe4949257a2
|
refs/heads/master
| 2020-10-01T22:47:39.595549
| 2019-12-12T15:38:06
| 2019-12-12T15:38:06
| 227,639,644
| 0
| 0
|
Apache-2.0
| 2019-12-12T15:37:39
| 2019-12-12T15:37:38
| null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command(name="operations, ops")
@click.argument("filters", metavar="[FILTER]...", required=False, nargs=-1)
@click.option(
"-a", "--all", is_flag=True,
help="Show all operations including those designated as private.")
@click.option(
"-i", "--installed", is_flag=True,
help=(
"Include operations installed from packages when running "
"command from a project directory."))
@click.option("-v", "--verbose", help="Show operation details.", is_flag=True)
@click_util.use_args
def operations(args):
"""Show model operations.
If the current directory is a project directory (i.e. contains a
Guild file), the command shows operations defined for the
project. Otherwise, the command shows operations defined in
installed packages.
Note that operations defined in packages are always available to
run, even when running within a project directory. To always show
installed operations, use the `--installed` option.
Use one or more `FILTER` arguments to show only operations with
names or models that match the specified values.
"""
from . import operations_impl
operations_impl.main(args)
|
[
"g@rre.tt"
] |
g@rre.tt
|
e43043403f1e2a603fa3fac290b06c7c435e8f11
|
fa0ee3daeed8edb10e98b6772fa39923243d53c5
|
/algorithms/hard_all_pairs_that_sum_to_number.py
|
56976bc27cc4888ec8cdbc2a9df5029dab6ab0f5
|
[
"Apache-2.0"
] |
permissive
|
ppinko/python_exercises
|
8a93664b6a8fc75d1088d67cd8e5f5ba3f39364a
|
d4ef2ddb28834ef49ac8060ce16f6b1446b6713e
|
refs/heads/master
| 2022-07-21T13:39:39.694515
| 2022-07-06T18:46:49
| 2022-07-06T18:46:49
| 254,176,710
| 1
| 0
| null | 2020-04-08T19:13:45
| 2020-04-08T19:05:39
|
Python
|
UTF-8
|
Python
| false
| false
| 836
|
py
|
"""
https://edabit.com/challenge/8LZdBwmpBiLJ5Sobt
"""
def all_pairs(lst: list, num: int) -> list:
lst.sort()
ans = []
for i, v in enumerate(lst[:-1]):
for j in lst[i+1:]:
if v + j == num:
ans.append([v, j])
return ans
assert all_pairs([2, 4, 5, 3], 7) == [[2, 5], [3, 4]]
assert all_pairs([5, 3, 9, 2, 1], 3) == [[1, 2]]
assert all_pairs([4, 5, 1, 3, 6, 8], 9) == [[1, 8], [3, 6], [4, 5]]
assert all_pairs([5, 2], 14) == []
assert all_pairs([5, 5, 2], 14) == []
assert all_pairs([8, 7, 7, 2, 4, 6], 14) == [[6, 8], [7, 7]]
assert all_pairs([8, 7, 2, 4, 6], 14) == [[6, 8]]
assert all_pairs([1, 3, 5, 4, 0], 4) == [[0, 4], [1, 3]]
assert all_pairs([1, 3, 5, 4, 0, 2], 4) == [[0, 4], [1, 3]]
assert all_pairs([1, 3, 5, 4, 0, 2, 2], 4) == [[0, 4], [1, 3], [2, 2]]
print('Success')
|
[
"p.pinkowicz@gmail.com"
] |
p.pinkowicz@gmail.com
|
699cfc5f42f72a4fd6018d7cf3cb617c55697fac
|
953fc3064e82231d1c5b7fb3be6563113e3c1483
|
/examples/dfa/authentication/create_dfa_client_without_yaml.py
|
3cc547e12209647629f680aa25f4f657827a493f
|
[
"Apache-2.0"
] |
permissive
|
hshore29/googleads-python-lib
|
08a3dc093b76b16c7b84026360e89793a1a5e4c9
|
14de75b8400aa4c1c0920d8edda6bb6e46b858cc
|
refs/heads/master
| 2021-01-15T12:50:43.278667
| 2014-03-20T14:47:01
| 2014-03-20T14:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializes a DfaClient without using yaml-cached credentials.
While our LoadFromStorage method provides a useful shortcut to instantiate a
client if you regularly use just one set of credentials, production applications
may need to swap out users. This example shows you how to create an OAuth 2.0
client and a DfaClient without relying on a yaml file.
"""
__author__ = 'Joseph DiLallo'
from googleads import dfa
from googleads import oauth2
# OAuth 2.0 credential information. In a real application, you'd probably be
# pulling these values from a credential storage.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE'
# DFA API information.
USER_PROFILE_NAME = 'INSERT_USER_PROFILE_NAME_HERE'
APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
def main(client_id, client_secret, refresh_token, user_profile_name,
application_name):
oauth2_client = oauth2.GoogleRefreshTokenClient(
client_id, client_secret, refresh_token)
dfa_client = dfa.DfaClient(user_profile_name, oauth2_client, application_name)
results = dfa_client.GetService('CampaignService').getCampaignsByCriteria({})
if results['records']:
for campaign in results['records']:
print ('Campaign with name \'%s\' and ID \'%s\' was found.'
% (campaign['name'], campaign['id']))
if __name__ == '__main__':
main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, USER_PROFILE_NAME,
APPLICATION_NAME)
|
[
"api.jdilallo@gmail.com"
] |
api.jdilallo@gmail.com
|
c02e1231cabab17604214697637c98b264de8add
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_396/ch54_2019_09_23_14_18_10_668876.py
|
7c95795017287e26da8a69a6e6f64707dd17edcc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
def junta_nome_sobrenome(x, y):
i = 0
lis = []
while i < len(x):
lis.append(x[i] + " " + y[i])
return lis
|
[
"you@example.com"
] |
you@example.com
|
0c3fd2bef7b4cd1a6f18d706cdf63c1576257b8d
|
6c8a590bac5d39d95034127f0f47a84e71d298a7
|
/game_element.py
|
e3dcefbe2fc5a99445c3f617772db566b601c6ac
|
[] |
no_license
|
UO-CIS211/FiveTwelve
|
3f57c40d401588f89370676679dd03f5ce35f42e
|
eca5d58e4c09189b873a6ce04fe109b6ac711a2e
|
refs/heads/master
| 2022-01-12T18:58:30.185373
| 2022-01-11T23:11:09
| 2022-01-11T23:11:09
| 117,500,226
| 1
| 13
| null | 2020-10-01T17:16:05
| 2018-01-15T05:16:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
"""
Board game element: Relates the model component
of a grid game to the view component.
Neither the game logic (in the model component)
nor the display logic (n the view component) is
defined here; this is the notification logic
for sending events from the model component
to the view component.
The 'model' component will inherit from the
GameListener class and generate EventKind events.
"""
from enum import Enum
class EventKind(Enum):
"""All the kinds of events that we may notify listeners of"""
tile_created = 1
tile_updated = 2
tile_removed = 3
class GameEvent(object):
"""An event that may need to be depicted
"""
def __init__(self, kind: EventKind, tile: "Tile"):
self.kind = kind
self.tile = tile
def __repr__(self):
return f"GameEvent({self.kind}, {self.tile})"
class GameListener(object):
"""Abstract base class for objects that listen to
game events in a model-view-controller pattern.
Each listener must implement a 'notify' method.
"""
def notify(self, event: GameEvent):
raise NotImplementedError("Game Listener classes must implement 'notify'")
# -------------------------------------------
class GameElement(object):
"""Base class for game elements, especially to support
depiction through Model-View-Controller.
"""
def __init__(self):
"""Each game element can have zero or more listeners.
Listeners are view components that react to notifications.
"""
self._listeners = []
def add_listener(self, listener: GameListener):
self._listeners.append(listener)
def notify_all(self, event: GameEvent):
"""Instead of handling graphics in the model component,
we notify view components of each significant event and let
the view component decide how to adjust the graphical view.
When additional information must be packaged with an event,
it goes in the optional 'data' parameter.
"""
for listener in self._listeners:
listener.notify(event)
|
[
"michal.young@gmail.com"
] |
michal.young@gmail.com
|
116f54ce54861411c14fc2fd70b69e2b5b4e04c4
|
6d4f60b000d3b00561c439c5faceaa9931e20a9d
|
/fast_arrow/resources/option_order.py
|
de9ab50509c11638544e980be5ecac9f567d2ab9
|
[
"MIT"
] |
permissive
|
Jiacli/fast_arrow
|
25485c6712e3e2f5e8dea7de7cc40e932d372381
|
beb95fc402fca12670a6c39cdb4abe524937d321
|
refs/heads/master
| 2020-03-28T03:12:59.549434
| 2018-09-06T02:45:37
| 2018-09-06T02:45:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from fast_arrow import util
from fast_arrow.resources.option import Option
class OptionOrder(object):
@classmethod
def all(cls, client):
"""
fetch all option positions
"""
url = 'https://api.robinhood.com/options/orders/'
data = client.get(url)
results = data["results"]
while data["next"]:
data = get(data["next"], token)
results.extend(data["results"])
return results
@classmethod
def humanize_numbers(cls, option_orders):
results = []
for oo in option_orders:
keys_to_humanize = ["processed_premium"]
coef = (1.0 if oo["direction"] == "credit" else -1.0)
for k in keys_to_humanize:
if oo[k] == None:
continue
oo[k] = float(oo[k]) * coef
results.append(oo)
return results
|
[
"westonplatter@gmail.com"
] |
westonplatter@gmail.com
|
aba91413f1139d347b5032c4f60dae2b8e713d84
|
2ac77ba91b0972516aea272d24faad85a53e229a
|
/ProfileReviews.py
|
e31c1ed97473faafbc7c570b420f4a925172d989
|
[
"MIT"
] |
permissive
|
Blue-IT-Marketing/cloud-jobs
|
f98cdb80f61a1b85230025696dab432b423f7c21
|
99f577fe963aeaaaad6a6c6139563648a1fb3b03
|
refs/heads/master
| 2020-03-18T19:28:22.313288
| 2019-02-27T07:28:39
| 2019-02-27T07:28:39
| 135,156,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,548
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Justice Ndou'
__website__ = 'http://jobcloud.freelancing-seo.com/'
__email__ = 'justice@freelancing-seo.com'
# Copyright 2014 Freelancing Solutions.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datatypes import Reference
from ConstantsAndErrorCodes import MyConstants, ErrorCodes, isGoogleServer
from google.appengine.ext import db
import logging
class ProfileReviews(db.Expando, MyConstants, ErrorCodes):
PReviewRef = db.StringProperty()
indexReference = db.ReferenceProperty(reference_class=Reference,collection_name='profile_reviews') # to the owner of the Profile
UReferenceNum = db.StringProperty() # A Reference Number from the Profile Owner Reference Class
# A Reference Number to the user who placed the Review
RReferenceNum = db.StringProperty()
Firstname = db.StringProperty()
Email = db.EmailProperty()
Subject = db.StringProperty()
Rating = db.StringProperty()
Review = db.StringProperty()
DateTimeCreated = db.DateTimeProperty(auto_now_add=True)
def createProfileReview(self, inIndex, inUReference, inRReference,inFirstname,inEmail,inSubject,inRating,inReview):
try:
if self.writeIndexReference(strinput=inIndex) and self.writeUReferenceNum(strinput=inUReference) \
and self.writeFirstname(strinput=inFirstname) and self.writeEmail(strinput=inEmail) and \
self.writeSubject(strinput=inSubject) and self.writeRating(strinput=inRating) and \
self.writeReview(strinput=inReview) and self.writeRReferenceNum(strinput=inRReference):
logging.info('PROFILE REVIEW INITIAL PASSED')
self.PReviewRef = str(self.put())
self.put()
return True
else:
logging.info('SIMPLE FAILURE IN CREATING PROFILE REVIEWS')
return False
except:
logging.info('SERIOUS FAILURE IN CREATING PROFILE REVIEWS')
return self._generalError
def readRReferenceNum(self):
try:
temp = str(self.RReferenceNum)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeRReferenceNum(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.RReferenceNum = strinput
return True
else:
return False
except:
return self._generalError
def readIndexReference(self):
try:
temp = self.indexReference
if not(temp == self.undefined):
return temp
else:
return self.undefined
except:
return self._generalError
def writeIndexReference(self, strinput):
try:
temp = str(strinput)
if len(temp) > 0:
self.indexReference = strinput
logging.info('WRITE INDEX FOR PROFILE REVIEW PASSED')
return True
else:
return False
except:
return self._generalError
def readUReferenceNum(self):
try:
temp = str(self.UReferenceNum)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeUReferenceNum(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.UReferenceNum = strinput
logging.info('WRITE UREFERENCE PASSED ON PROFILE REVIEWS')
return True
else:
return False
except:
return self._generalError
def readFirstname(self):
try:
temp = str(self.Firstname)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeFirstname(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.Firstname = strinput
logging.info('WRITE FIRST NAME PASSED ON PROFILE REVIEWS')
return True
else:
return False
except:
return self._generalError
def readEmail(self):
try:
temp = str(self.Email)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeEmail(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.Email = strinput
logging.info('WRITE EMAIL PASSED ON PROFILE REVIEWS')
return True
else:
return False
except:
return self._generalError
def readSubject(self):
try:
temp = str(self.Subject)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeSubject(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.Subject = strinput
logging.info('WRITE SUBJECT PASSED ON PROFILE REVIEWS')
return True
else:
return False
except:
return self._generalError
def readRating(self):
try:
temp = int(self.Rating)
if (temp > 0) and (temp < 11):
return temp
else:
return self.undefined
except:
return self._generalError
def writeRating(self, strinput):
try:
strinput = str(strinput)
strinput = strinput.strip()
if strinput.isdigit():
tRating = int(strinput)
else:
tRating = 0
if (tRating > 0) and (tRating < 11):
self.Rating = str(tRating)
logging.info('WRITE RATING PASSED ON PROFILE REVIEWS')
return True
else:
return False
except:
return self._generalError
def readReview(self):
try:
temp = str(self.Review)
if len(temp) > 0:
return temp
else:
return self.undefined
except:
return self._generalError
def writeReview(self, strinput):
try:
strinput = str(strinput)
if len(strinput) > 0:
self.Review = strinput
return True
else:
return False
except:
return self._generalError
|
[
"mobiusndou@gmail.com"
] |
mobiusndou@gmail.com
|
0651a0df3619054f72a838d5ca3e1adf32cbab8d
|
7889f7f0532db6a7f81e6f8630e399c90438b2b9
|
/1.5.0/examples/user_interfaces/embedding_in_qt5.py
|
6f35d7745d3b7dae338f30a786f1cece1338a65e
|
[] |
no_license
|
matplotlib/matplotlib.github.com
|
ef5d23a5bf77cb5af675f1a8273d641e410b2560
|
2a60d39490941a524e5385670d488c86083a032c
|
refs/heads/main
| 2023-08-16T18:46:58.934777
| 2023-08-10T05:07:57
| 2023-08-10T05:08:30
| 1,385,150
| 25
| 59
| null | 2023-08-30T15:59:50
| 2011-02-19T03:27:35
| null |
UTF-8
|
Python
| false
| false
| 4,328
|
py
|
#!/usr/bin/env python
# embedding_in_qt5.py --- Simple Qt5 application embedding matplotlib canvases
#
# Copyright (C) 2005 Florent Rougon
# 2006 Darren Dale
# 2015 Jens H Nielsen
#
# This file is an example program for matplotlib. It may be used and
# modified with no restriction; raw copies as well as modified versions
# may be distributed without limitation.
from __future__ import unicode_literals
import sys
import os
import random
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from PyQt5 import QtGui, QtCore, QtWidgets
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
progversion = "0.1"
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
#
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtWidgets.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtWidgets.QWidget(self)
l = QtWidgets.QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtGui.QMessageBox.about(self, "About",
"""embedding_in_qt5.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale, 2015 Jens H Nielsen
This program is a simple example of a Qt5 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation.
This is modified from the embedding in qt4 example to show the difference
between qt4 and qt5"""
)
qApp = QtWidgets.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
#qApp.exec_()
|
[
"quantum.analyst@gmail.com"
] |
quantum.analyst@gmail.com
|
6db2f2bffdf1414130a745c38c695d1487cb7613
|
05ea9d119263eca2292e23fa2151b7b4cabd8de6
|
/setup.py
|
188200af449f4008c216f639b97207689b00c9c8
|
[
"MIT"
] |
permissive
|
BugBreaker/smote_variants
|
55f5f650abac163a1dbebf71adb96ce13060e6da
|
dd937a5c827580912ec81d3e209ae96074623301
|
refs/heads/master
| 2020-04-16T21:26:05.344604
| 2019-01-01T16:36:23
| 2019-01-01T16:36:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
import os
import codecs
from setuptools import setup
def readme():
with codecs.open('README.rst', encoding='utf-8-sig') as f:
return f.read()
version_file= os.path.join('smote_variants', '_version.py')
with open(version_file) as f:
exec(f.read())
DISTNAME= 'smote_variants'
DESCRIPTION= 'Variants of the synthetic minority oversampling technique (SMOTE) for imbalanced learning'
LONG_DESCRIPTION= readme()
LONG_DESCRIPTION_CONTENT_TYPE='text/x-rst'
MAINTAINER= 'Gyorgy Kovacs'
MAINTAINER_EMAIL= 'gyuriofkovacs@gmail.com'
URL= 'https://github.com/gykovacs/smote-variants'
LICENSE= 'MIT'
DOWNLOAD_URL= 'https://github.com/gykovacs/smote-variants'
VERSION= __version__
CLASSIFIERS= [ 'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS']
INSTALL_REQUIRES= ['numpy>=1.13.0', 'scipy', 'scikit-learn', 'joblib', 'minisom', 'statistics', 'tensorflow', 'keras']
EXTRAS_REQUIRE= {'tests': ['nose'],
'docs': ['sphinx', 'sphinx-gallery', 'sphinx_rtd_theme', 'matplotlib', 'pandas']}
PYTHON_REQUIRES= '>=3.5'
TEST_SUITE='nose.collector'
PACKAGE_DIR= {'smote_variants': 'smote_variants'}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=PYTHON_REQUIRES,
test_suite=TEST_SUITE,
package_dir=PACKAGE_DIR)
#setup(name='smote_variants',
# version=getversion(),
# description='smote_variants',
# long_description=readme(),
# classifiers=[
# 'Development Status :: 3 - Alpha',
# 'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python',
# 'Topic :: Scientific/Engineering :: Artificial Intelligence'],
# url='http://github.com/gykovacs/smote_variants',
# author='Gyorgy Kovacs',
# author_email='gyuriofkovacs@gmail.com',
# license='MIT',
# packages=['smote_variants'],
# install_requires=[
# 'joblib',
# 'numpy',
# 'pandas',
# 'scipy',
# 'sklearn',
# 'minisom',
# 'statistics',
# ],
# py_modules=['smote_variants'],
# python_requires='>=3.5',
# zip_safe=False,
# package_dir= {'smote_variants': 'smote_variants'},
# package_data= {},
# tests_require= ['nose'],
# test_suite= 'nose.collector'
# )
|
[
"gyuriofkovacs@gmail.com"
] |
gyuriofkovacs@gmail.com
|
233365653559692108c043142b29b488ee4196ec
|
47b028ec01008ed901f2fd7779abe47f52d458fe
|
/tests/test_api.py
|
279dc71cb898c8d6e493c54c58b52be80ebc4953
|
[
"MIT"
] |
permissive
|
avara1986/gozokia
|
d6a5c2214f96c666283f129b96d22ce78291b32e
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
refs/heads/master
| 2020-12-24T06:24:06.031752
| 2016-10-12T10:27:12
| 2016-10-12T10:27:12
| 38,059,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
# encoding: utf-8
import unittest
import os
from gozokia import Gozokia
from gozokia.rules import GreetingRaise, GreetingObjetive
os.environ.setdefault("GOZOKIA_SETTINGS_MODULE", "tests.settings_tests")
class ApiTest(unittest.TestCase):
def test_foo(self):
goz = Gozokia()
goz.initialize()
goz.set_io(input_type="value", output_type="value")
value = "foo"
self.assertEqual(goz.api(value), "bar")
value = "foo"
self.assertEqual(goz.api(value), "bar second")
value = "foo"
self.assertEqual(goz.api(value), "No rules. you said: foo")
def test_greetings(self):
goz = Gozokia()
goz.rule(name='greeting', type=goz.RAISE_COND, rank=100)(GreetingRaise)
goz.initialize()
goz.set_io(input_type="value", output_type="value")
value = "foo"
self.assertEqual(goz.api(value), "bar")
value = "Bacon"
self.assertEqual(goz.api(value), "No rules. you said: Bacon")
value = "Hi"
self.assertEqual(goz.api(value), "Hi, who are you?")
value = "i am Alberto"
self.assertEqual(goz.api(value), "Hi, alberto")
if __name__ == '__main__':
unittest.main()
|
[
"a.vara.1986@gmail.com"
] |
a.vara.1986@gmail.com
|
abb4fca4705ef6f801a4ae2c4b73ea567e65411f
|
c7f1c021d88e215dccb0bbb10db02293cd8528b8
|
/plugins/agent2/agent2/__init__.py
|
06fa9186ad1d3376ceff7add1d40b3ac454ed2dd
|
[] |
no_license
|
manuelnaranjo/OpenProximity
|
d11e61418b2249d210f0bbf5adbc6ccc38cd2773
|
f93c8009e40ce60c65090370adf0427e944d375b
|
refs/heads/master
| 2023-08-31T11:17:28.033936
| 2011-11-13T03:27:10
| 2011-11-13T03:27:10
| 1,778,676
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
# OpenProximity2.0 is a proximity marketing OpenSource system.
# Copyright (C) 2009,2008 Naranjo Manuel Francisco <manuel@aircable.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# A data collector agent compatible with Global Scanner
__version_info__=('0','0','1')
__version__ = '.'.join(__version_info__)
def statistics_reset(connection):
print "agent statics reset"
|
[
"manuel@aircable.net"
] |
manuel@aircable.net
|
1d0bddf3aee9a3f85fd686500670596d63d534b2
|
71a4a5ff8dac94da32769710ed7734b6d93013de
|
/durgajobs/testapp/admin.py
|
22abbfb07dee953ad05bbe9e0aeebfcf61c1e93b
|
[] |
no_license
|
djangoprojects5pm/durga-jobs-project
|
fc6f114ac37276bda1567470831a5ff464432254
|
7cd967a2114a3bdd3038931a1ee15a7fa2367911
|
refs/heads/master
| 2022-11-22T07:39:08.747916
| 2020-07-20T13:44:04
| 2020-07-20T13:44:04
| 281,136,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
from django.contrib import admin
from testapp.models import hydjobs,blorejobs,chennaijobs,punejobs
# Register your models here.
class hydjobsAdmin(admin.ModelAdmin):
list_display=['date','company','title','eligibility','address','email','phonenumber']
class blorejobsAdmin(admin.ModelAdmin):
list_display=['date','company','title','eligibility','address','email','phonenumber']
class chennaijobsAdmin(admin.ModelAdmin):
list_display=['date','company','title','eligibility','address','email','phonenumber']
class punejobsAdmin(admin.ModelAdmin):
list_display=['date','company','title','eligibility','address','email','phonenumber']
admin.site.register(hydjobs,hydjobsAdmin)
admin.site.register(blorejobs,blorejobsAdmin)
admin.site.register(chennaijobs,chennaijobsAdmin)
admin.site.register(punejobs,punejobsAdmin)
|
[
"you@example.com"
] |
you@example.com
|
53dcc50f07e28eea9f33d772461b6d58768e8783
|
409ce560793c070ef4211b99c5a4a5316a258c4f
|
/pylith/topology/JacobianViewer.py
|
b8265fba4e31aecf6a5a07b3d37f9c57c2e909a2
|
[
"MIT"
] |
permissive
|
calum-chamberlain/pylith
|
bb718bfb4305f03b45d42348e5d4fa5ed5f4a918
|
8712c39ade53c1cc5ac0e671e4296cee278c1dcf
|
refs/heads/master
| 2020-12-06T17:15:08.638337
| 2016-05-15T20:30:28
| 2016-05-15T20:30:28
| 46,401,744
| 0
| 0
| null | 2016-05-15T20:30:29
| 2015-11-18T07:09:12
|
C++
|
UTF-8
|
Python
| false
| false
| 3,525
|
py
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2015 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pylith/topology/JacobianViewer.py
##
## @brief Python object for writing system Jacobian to file.
##
## Factory: jacobian_viewer
from pylith.utils.PetscComponent import PetscComponent
# JacobianViewer class
class JacobianViewer(PetscComponent):
"""
Python abstract base class for formulations of solving equations.
In general, we use some explicit or implicit formulation of the PDEs
to create a linear form, [A]{u}={b} that we can solve.
Factory: pde_formulation.
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(PetscComponent.Inventory):
"""
Python object for managing JacobianViewer facilities and properties.
"""
## @class Inventory
## Python object for managing JacobianViewer facilities and properties.
##
## \b Properties
## @li \b filename Filename for Jacobian matrix.
## @li \b time_format C style format string for time stamp in filename.
## @li \b time_constant Value used to normalize time stamp in filename.
##
## \b Facilities
## @li None
import pyre.inventory
filename = pyre.inventory.str("filename", default="jacobian.mat")
filename.meta['tip'] = "Filename for Jacobian matrix."
timeFormat = pyre.inventory.str("time_format", default="%f")
timeFormat.meta['tip'] = "C style format string for time stamp in filename."
from pyre.units.time import second
timeConstant = pyre.inventory.dimensional("time_constant",
default=1.0*second,
validator=pyre.inventory.greater(0.0*second))
timeConstant.meta['tip'] = \
"Values used to normalize time stamp in filename."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="formulation"):
"""
Constructor.
"""
PetscComponent.__init__(self, name, facility="jacobian_viewer")
return
def view(self, jacobian, t, comm):
"""
Write Jacobian to binary file.
"""
jacobian.write(self._filenameStamp(t), comm)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
PetscComponent._configure(self)
self.filename = self.inventory.filename
self.timeFormat = self.inventory.timeFormat
self.timeConstant = self.inventory.timeConstant
return
def _filenameStamp(self, t):
"""
Create filename by extracting basename and adding a time stamp.
"""
timeStamp = self.timeFormat % (t/self.timeConstant.value)
basename = self.filename
if basename.endswith(".mat"):
basename = basename[0:len(basename)-4]
filename = basename + "_t" + timeStamp + ".mat"
return filename
# FACTORIES ////////////////////////////////////////////////////////////
def jacobian_viewer():
"""
Factory associated with JacobianViewer.
"""
return JacobianViewer()
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
02026e3cc3bc04d240c802281453151f5a171cb6
|
4a7e359bb329e3df757c2b6c812512c24ec615eb
|
/zxserver/migrations/0013_zxcomments_active.py
|
849c33e5850883aaea0e8f8950c9b6f5450d2b3a
|
[] |
no_license
|
AlexBlueCrow/farmbackend
|
7768e84edafea230de713241db6bb566f7b8407e
|
311b25543d312a6b9d1b68905c15a6785a0b5097
|
refs/heads/master
| 2021-08-20T04:47:28.018997
| 2020-08-12T16:23:49
| 2020-08-12T16:23:49
| 214,458,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Generated by Django 2.2.5 on 2020-03-25 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zxserver', '0012_auto_20200324_0957'),
]
operations = [
migrations.AddField(
model_name='zxcomments',
name='active',
field=models.BooleanField(default=True),
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
ec9d306935b9d6e779748f27758791152ace2ff5
|
581b8d28255229ce8c08c679ad1f169ec33dccc8
|
/tests/test_adjdictbqm.py
|
bebba45ad99b789da7e46a03dd1e0748b981877f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
arcondello/dimod
|
6e3381299dd08e8aa01af2b95350ef92a9504ef9
|
9af460dc0f9028a1292cf786063c278cae3c0c80
|
refs/heads/master
| 2023-08-20T18:57:06.730554
| 2020-10-08T02:56:36
| 2020-10-08T02:56:36
| 115,138,524
| 1
| 0
|
Apache-2.0
| 2022-04-21T16:23:24
| 2017-12-22T17:55:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import numpy as np
from dimod.bqm.adjdictbqm import AdjDictBQM
class TestObjectDtype(unittest.TestCase):
# AdjDictBQM has an object dtype so it has some special cases that need
# to be tested
def test_dtypes_array_like_ints(self):
# these should stay as python ints
obj = [[0, 1], [1, 2]]
bqm = AdjDictBQM(obj, 'BINARY')
for _, bias in bqm.quadratic.items():
self.assertIsInstance(bias, int)
def test_dtypes_ndarray_ints(self):
# these should stay as python ints
obj = np.asarray([[0, 1], [1, 2]], dtype=np.int32)
bqm = AdjDictBQM(obj, 'BINARY')
for _, bias in bqm.quadratic.items():
self.assertIsInstance(bias, np.int32)
|
[
"arcondello@gmail.com"
] |
arcondello@gmail.com
|
5056b4eb34025036efed2f77739e4022a917041f
|
4ca8382d8e0bc00f4cd440be63d7d64fbbd033c0
|
/test_site/mysite/news/migrations/0006_auto_20210608_1502.py
|
c4c425c07c24e3f981fdecb4725908c322ffb1cd
|
[] |
no_license
|
KarinaYatskevich/python
|
8ea0b8273399a313f161f5b4ce5e62e421109be1
|
b889532fe6dbf7f953b250c923584e910bc70b21
|
refs/heads/master
| 2023-08-28T14:52:27.298276
| 2021-10-22T19:27:29
| 2021-10-22T19:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
# Generated by Django 3.2.3 on 2021-06-08 12:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_alter_news_category'),
]
operations = [
migrations.RenameField(
model_name='news',
old_name='content',
new_name='abstract',
),
migrations.AddField(
model_name='news',
name='URI',
field=models.URLField(default='default'),
preserve_default=False,
),
migrations.AddField(
model_name='news',
name='abstract_in_another_language',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='news',
name='another_title',
field=models.CharField(default='default ', max_length=150),
preserve_default=False,
),
migrations.AddField(
model_name='news',
name='bibliographic_entry',
field=models.TextField(blank=True),
),
]
|
[
"karinav19817@gmail.com"
] |
karinav19817@gmail.com
|
940287a3c1503fd4beefd16acea158f5422f72d3
|
633ab8880dc367feefdb6ef565ed0e70a4094bc1
|
/10001-11000/10989.py
|
dac6c76fc02397c18eca1041d39086789a6943ec
|
[] |
no_license
|
winston1214/baekjoon
|
2e9740ee2824d7777f6e64d50087b5c040baf2c6
|
20125255cd5b359023a6297f3761b2db1057d67d
|
refs/heads/master
| 2023-03-04T09:07:27.688072
| 2021-02-16T13:51:49
| 2021-02-16T13:51:49
| 284,832,623
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# @Author YoungMinKim
# baekjoon
import sys
N=int(sys.stdin.readline())
result = [0]*10001
for _ in range(N):
a=int(sys.stdin.readline())
result[a] = result[a]+1
for i in range(10001):
if result[i] != 0:
for j in range(result[i]):
print(i)
|
[
"winston1214@naver.com"
] |
winston1214@naver.com
|
8807631112c2be71c1b5d45755803ffd6af7db0f
|
8606e128484a4cc1fc4e7b406817a7ea96b55c8b
|
/src/run.py
|
fbb1ff4541fb7f17ac4c7648ff7c86c21ecf6c22
|
[
"CC0-1.0"
] |
permissive
|
ytyaru/Python.PySimpleGuiWeb.SetUp.20210618110027
|
a3eba2e262cbf40a3e26e854c849e604244aeb99
|
9b5163d7eb07ff8efae5de91bd21d02a87455a39
|
refs/heads/master
| 2023-06-07T21:16:56.862107
| 2021-06-18T06:38:15
| 2021-06-18T06:38:15
| 378,022,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
#!/usr/bin/env python3
# coding: utf8
import PySimpleGUIWeb as sg
print(dir(sg))
print(sg.theme_list())
sg.theme('DarkGreen')
layout = [
[sg.Text('PySimpleGUIWeb テスト')],
[sg.Text('名前', size=(15, 1)), sg.InputText('山田太郎')],
[sg.Text('年齢', size=(15, 1)), sg.Spin(None, initial_value=20)],
[sg.Text('趣味', size=(15, 1)), sg.Combo(['料理','読書','映画'])],
[sg.Submit(button_text='実行')]
]
window = sg.Window('PySimpleGUIWeb テスト', layout)
while True:
event, values = window.read()
if event is None:
print('exit')
break
if event == '実行':
show_message = "名前:" + values[0] + '\n'
show_message += "年齢:" + values[1] + '\n'
show_message += "趣味:" + values[2] + 'が入力されました。'
print(show_message)
sg.popup(show_message)
window.close()
|
[
"yttry0@gmail.com"
] |
yttry0@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.