blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a941aa45b4e10dc6b7401fbe7ba650f95322f544
|
dd6dd68d17b7355e01c4ce86649423974bb69c50
|
/Masters/migrations/0002_serialnumber.py
|
b2266937dd7911f5043e0f92d9384abefe27e6ea
|
[
"MIT"
] |
permissive
|
adithyanps/netprofit-django
|
2b9953296fb016e4a16b30768ba864f91882573f
|
7ba87f054d09a201352635bb6cf8d0112208609e
|
refs/heads/master
| 2020-05-04T16:43:14.547552
| 2019-09-30T13:20:07
| 2019-09-30T13:20:07
| 179,285,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
# Generated by Django 2.2.4 on 2019-09-25 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Masters', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SerialNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefix', models.CharField(max_length=6)),
('suffix', models.CharField(max_length=6)),
('start_number', models.IntegerField()),
('padding', models.IntegerField()),
('type', models.CharField(choices=[('CN', 'CreditNote'), ('DN', 'DebitNote'), ('SI', 'Sales'), ('CR', 'customer_reciept')], max_length=10)),
],
),
]
|
[
"adithynps3@gmial.com"
] |
adithynps3@gmial.com
|
4a1e2396753dbd98038ff0f8e6b6c8c9df9d2267
|
1337ccefdeddc753090281d473fa1e08d42b6884
|
/bnpl/plugin_itunes.py
|
f85c544dc2c7004c8fd8d1b161314418b8a5db90
|
[] |
no_license
|
abelsonlive/bnpl
|
8bd97d7bec8933642188814e07a38b544bcb3963
|
8f49a6d257fab75b7659ba2bae502595a164b8ee
|
refs/heads/master
| 2021-01-02T08:46:39.093073
| 2017-01-30T19:09:57
| 2017-01-30T19:09:57
| 76,924,249
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
from pyItunes import Library
from bnpl import Option, OptionSet
from bnpl import Extractor
class ItunesSongs(Extractor):
"""
Extract sounds from your Itunes Library.
"""
options = OptionSet(
Option('library_xml', type='path', required=True)
)
def run(self):
"""
"""
l = Library(self.options['library_xml'])
for id, song in l.songs.items():
yield song
|
[
"brianabelson@gmail.com"
] |
brianabelson@gmail.com
|
7fcd687644a4140303be421ead340e6b0a7527f4
|
e27f9f1f8bef8b1f4676df84ee3e753974d21a1c
|
/ignite/contrib/metrics/precision_recall_curve.py
|
5021315904b334d819722a75e9cb9036f4d4d11b
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/ignite
|
8fb275638e94e702762eec932b21dc8df7a54cb0
|
34a707e53785cf8a524589f33a570a7516fe064e
|
refs/heads/master
| 2023-09-02T00:27:22.485479
| 2023-08-31T15:10:14
| 2023-08-31T15:10:14
| 111,835,796
| 4,613
| 788
|
BSD-3-Clause
| 2023-09-13T07:46:41
| 2017-11-23T17:31:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,596
|
py
|
from typing import Any, Callable, cast, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return precision_recall_curve(y_true, y_pred)
class PrecisionRecallCurve(EpochMetric):
"""Compute precision-recall pairs for different probability thresholds for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `precision_recall_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
#sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
Note:
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
.. testoutput::
Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
Recall [1.0, 1.0, 1.0, 0.5, 0.0]
Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(PrecisionRecallCurve, self).__init__(
precision_recall_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
precision = torch.tensor(precision, device=_prediction_tensor.device)
recall = torch.tensor(recall, device=_prediction_tensor.device)
# thresholds can have negative strides, not compatible with torch tensors
# https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
else:
precision, recall, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
precision = idist.broadcast(precision, src=0, safe_mode=True)
recall = idist.broadcast(recall, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
self._result = (precision, recall, thresholds) # type: ignore[assignment]
return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
|
[
"noreply@github.com"
] |
pytorch.noreply@github.com
|
e4fcd91448875458507f16667600fe6a845f31a3
|
403eac0dab1a935bf2ce1833a950a29ea5bc1d0b
|
/sentencepiece的使用.py
|
bd9b013f11aa4224b1c676e5b7c2b8b33899b978
|
[
"MIT"
] |
permissive
|
Le-Code/nlp-tutorial
|
ada91aaa0e50ff72bf4a24d70c40f67b58191f94
|
c8f46f702cc8643bb252a65b0a8cf375c7bd4704
|
refs/heads/master
| 2020-09-25T18:39:17.726661
| 2019-11-05T16:29:09
| 2019-11-05T16:29:09
| 226,065,142
| 1
| 0
| null | 2019-12-05T09:30:57
| 2019-12-05T09:30:56
| null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
'''
` % python
>>> import sentencepiece as spm
>>> sp = spm.SentencePieceProcessor()
>>> sp.Load("test/test_model.model") True
>>> sp.EncodeAsPieces("This is a test") ['\xe2\x96\x81This', '\xe2\x96\x81is', '\xe2\x96\x81a', '\xe2\x96\x81', 't', 'est']
>>> sp.EncodeAsIds("This is a test") [284, 47, 11, 4, 15, 400]
>>> sp.DecodePieces(['\xe2\x96\x81This', '\xe2\x96\x81is', '\xe2\x96\x81a', '\xe2\x96\x81', 't', 'est']) 'This is a test'
>>> sp.DecodeIds([284, 47, 11, 4, 15, 400]) 'This is a test'
>>> sp.GetPieceSize() 1000
>>> sp.IdToPiece(2) '</s>'
>>> sp.PieceToId('</s>') 2
>>> len(sp) 1000
>>> sp['</s>'] 2 `
'''
#打算弄2个词表,一个是英文的一个是中文的,这样分散开会提高代码运行速度.词表小,会提高收敛速度,
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
help(sp)
'''
用法:https://github.com/zhangbo2008/sentencepiece
'''
|
[
"15122306087@163.com"
] |
15122306087@163.com
|
4ade1604112ef44e3fd921651808a154f424ddc5
|
c7d91529db199322e39e54fe4051a75704ea843e
|
/算法4/字符串/字符串排序.py
|
7194b2ca9bd98f1e8a4ad54cbca048dd9bead5fa
|
[] |
no_license
|
2226171237/Algorithmpractice
|
fc786fd47aced5cd6d96c45f8e728c1e9d1160b7
|
837957ea22aa07ce28a6c23ea0419bd2011e1f88
|
refs/heads/master
| 2020-12-26T07:20:37.226443
| 2020-09-13T13:31:05
| 2020-09-13T13:31:05
| 237,431,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
def keyIndexSort(arr,R):
'''键索引计数法(小整数排序)'''
aux=arr.copy()
count=[0 for _ in range(R+1)]
for val,key in arr:
count[key+1]+=1
for i in range(1,R+1):
count[i]+=count[i-1]
for val,key in arr:
aux[count[key]]=(val,key)
count[key]+=1
for i in range(len(aux)):
arr[i]=aux[i]
class LSD:
'''低位优先的字符串排序'''
def sort(self,arr,w):
'''
:param list[str] arr: 字符串列表
:param int w: 字符串长度
:return:
'''
N=len(arr)
R=256
aux=arr.copy()
for d in range(w-1,-1,-1):
count=[0 for _ in range(R+1)]
for x in arr:
count[ord(x[d])+1]+=1
for r in range(1,R+1):
count[r]+=count[r-1]
for i in range(N):
key=ord(arr[i][d])
aux[count[key]]=arr[i]
count[key]+=1
for i in range(N):
arr[i]=aux[i]
class InsertSort:
def sort(self,arr,low,high,d):
for i in range(low,high+1):
for j in range(i,low,-1):
if arr[j][d:]<arr[j-1][d:]:
arr[j-1],arr[j]=arr[j],arr[j-1]
class MSD:
'''高位优先排序'''
def __init__(self):
self.R=256
self.H=10
self.insertsort=InsertSort()
def sort(self,arr):
self.aux=arr.copy()
self.subsort(arr,0,len(arr)-1,0)
def key(self,s,d):
if d>=len(s):
return -1
return ord(s[d])
def subsort(self,arr,low,high,d):
if high<=low+self.H:
self.insertsort.sort(arr,low,high,d)
return
counts=[0 for _ in range(self.R+2)]
for i in range(low,high+1):
key=self.key(arr[i],d)
counts[key+2]+=1
for r in range(1,self.R+2):
counts[r]+=counts[r-1]
for i in range(low,high+1):
key=self.key(arr[i],d)
self.aux[counts[key+1]]=arr[i]
counts[key+1]+=1
for i in range(low,high+1):
arr[i]=self.aux[i-low]
for r in range(0,self.R):
self.subsort(arr,low+counts[r],low+counts[r+1]-1,d+1)
class Quick3string:
'''三向切分字符串快排'''
def key(self,s,d):
if len(s)<=d:
return -1
else:
return ord(s[d])
def sort(self,arr):
self._subsort(arr,0,len(arr)-1,0)
def _subsort(self,arr,low,high,d):
if low>=high:
return
v=self.key(arr[low],d)
lt=low
i=low+1
gt=high
while i<=gt:
t=self.key(arr[i],d)
if t<v:
arr[i],arr[lt]=arr[lt],arr[i]
i+=1
lt+=1
elif t==v:
i+=1
else:
arr[i],arr[gt]=arr[gt],arr[i]
gt-=1
self._subsort(arr,low,lt-1,d)
if v>0:
self._subsort(arr,lt,gt,d+1)
self._subsort(arr,gt+1,high,d)
if __name__ == '__main__':
arr=[('lijie',1),('liuzi',2),('zhangliu',2),('miwmi',1),('liuzhao',3),('xiaozju',2),('liejo',3),('liuzhao',0)]
keyIndexSort(arr,4)
print(arr)
arr=['4PGC938','2IYE230','3CI0720','1ICK750','10HV845','4JZY524','1ICK750','3CI0720','10HV845']
s=LSD()
s.sort(arr,7)
s=MSD()
arr=['she','by','shells','the','sea','are','surely','seashells']
s.sort(arr)
print(arr)
qs=Quick3string()
arr = ['she', 'by', 'shells', 'the', 'sea', 'are', 'surely', 'seashells']
qs.sort(arr)
print(arr)
|
[
"2226171237@qq.com"
] |
2226171237@qq.com
|
061399c156296dfd0f08ab0ef22d181d250ea69e
|
856323fc904cd36b947114666186a2bcd0c1e10e
|
/tests/randMove/test_randMoveSIS.py
|
f612f0051507494ef17fd722dc003e328d396a01
|
[
"MIT"
] |
permissive
|
mjacob1002/Eir
|
fd6ee0fa7c2e0af93a34dca66bcd5b07a5c31f05
|
ab9cb4e353796ba3ab79b1673adc251d434717cf
|
refs/heads/master
| 2023-04-15T13:06:14.897503
| 2021-07-04T20:06:15
| 2021-07-04T20:06:15
| 286,567,858
| 39
| 9
|
MIT
| 2021-07-04T20:06:16
| 2020-08-10T20:03:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
import numpy as np
import pandas as pd
import unittest
from Eir.DTMC.spatialModel.randomMovement.randMoveSIS import RandMoveSIS
import Eir.exceptions as e
np.random.seed(35235)
class Test_RandMoveSIS(unittest.TestCase):
def __init__(self):
self.test = RandMoveSIS(999, 2, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.sdetails = self.test.run()
def generateCSV(self):
df = self.test.toDataFrame()
df.to_csv("randMoveSIS.csv", index=False)
def checkOutput(self):
df = self.test.toDataFrame()
df2 = pd.read_csv("randMoveSIS.csv")
assert df.equals(df2)
print("Output test passed")
def checkSimulInputs(self):
# checks for invalid person inputs
self.assertRaises(e.NotIntException, self.sdetails.personHistory, 100.0)
self.assertRaises(e.PersonNotFound, self.sdetails.personHistory, 1001)
# checks for exceptions when inputting days
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, 65)
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, -1)
self.assertRaises(e.NotIntException, self.sdetails.transmissionHistoryOnDay, 25.0)
print("Simul_Details input test passed: throws error for invalid inputs")
def checkInputs(self):
# int check
self.assertRaises(e.NotIntException, RandMoveSIS, 999.0, 1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotIntException, RandMoveSIS, 999, 1.0, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotIntException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31.0, 1.0, 2.0)
# float check
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, '.3', 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, "25", 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, "3", .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, 3, True, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31, False, 2.0)
# negvalue check
self.assertRaises(e.NegativeValException, RandMoveSIS, -999, 1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, -1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, -.3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, .3, -25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, .3, 25, -3, .3, 1, .25, 31, 1.0, 2.0)
# probability check
self.assertRaises(e.ProbabilityException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31, 1.01, 2.0)
self.assertRaises(e.ProbabilityException, RandMoveSIS, 999, 1, 1.3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
print("Input Test passed")
if __name__ == '__main__':
a = Test_RandMoveSIS()
#a.generateCSV()
a.checkOutput()
a.checkSimulInputs()
a.checkInputs()
|
[
"mjacob1002@gmail.com"
] |
mjacob1002@gmail.com
|
871f553222511c050f1cdb9731b0b3044efd8503
|
b2cc6507d5260514f63a3f0aa7915308cd20bf28
|
/shelf/migrations/0002_auto_20200426_1245.py
|
906189220f06b19dd541cfceb01b3172ff017dee
|
[] |
no_license
|
japawka/kozaczko
|
4df09523543a15ae59abad24b689730fecca8049
|
20cf9e0e83588b6ecd79ab10889925144688f2b7
|
refs/heads/master
| 2022-06-07T21:37:42.788616
| 2020-04-28T13:39:09
| 2020-04-28T13:39:09
| 259,619,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# Generated by Django 3.0.5 on 2020-04-26 10:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('shelf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BookEdition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn', models.CharField(max_length=17)),
('date', models.DateField()),
],
),
migrations.RemoveField(
model_name='book',
name='author',
),
migrations.RemoveField(
model_name='book',
name='isbn',
),
migrations.RemoveField(
model_name='book',
name='publisher',
),
migrations.AddField(
model_name='book',
name='authors',
field=models.ManyToManyField(to='shelf.Author'),
),
migrations.CreateModel(
name='BookItem',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('cat_number', models.CharField(max_length=30)),
('cover_type', models.CharField(choices=[('soft', 'Soft'), ('hard', 'Hard')], max_length=4)),
('edition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.BookEdition')),
],
),
migrations.AddField(
model_name='bookedition',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Book'),
),
migrations.AddField(
model_name='bookedition',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Publisher'),
),
migrations.AddField(
model_name='book',
name='categories',
field=models.ManyToManyField(to='shelf.BookCategory'),
),
]
|
[
"japawka@poczta.onet.pl"
] |
japawka@poczta.onet.pl
|
a53f4e73d6c753979be2329785696ae68b6dc336
|
438e546e2acf5aa57c34c6481e477f7025b12e21
|
/mocks/skowser_session3_question1.py
|
ddbe00521f78b177bdaa90997f0a5043f2b26f86
|
[] |
no_license
|
SajinKowserSK/algorithms-practice
|
988537ef3537487cb40c78776dd2c9e1130cde4f
|
41bbd55553747492a539b41f6e86bff5504c5842
|
refs/heads/master
| 2022-11-06T18:22:41.329484
| 2022-10-19T23:40:10
| 2022-10-19T23:40:10
| 206,470,288
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def has_cycle(head):
if head is None:
return False
slow = head
fast = head
while fast is not None:
fast = fast.next
if fast is None:
break
else:
fast = fast.next
if slow == fast:
return True
slow = slow.next
return False
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = None
print(has_cycle(node1)) # False
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = node2
print(has_cycle(node1)) # True
|
[
"sajinkowser@gmail.com"
] |
sajinkowser@gmail.com
|
0eacc8850c2e0a1b284058bd4cf49418cf285991
|
5a6da40024217c0ca2c0242f031d6d0105775899
|
/utils.py
|
13bdef9709be85f99880404dcbf366620db0ea5f
|
[
"MIT"
] |
permissive
|
tobyma/generative
|
06fa928c8882469a3fe35cb69863c1ae06409021
|
75cb6c1065cd04ce4eaf13b90b037137a8ad7873
|
refs/heads/master
| 2020-04-22T13:53:58.599101
| 2017-03-04T05:01:16
| 2017-03-04T05:01:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
def data_on_latent_space(encoded, categories, ax=None):
"""
plots the data in the latent space
encoded: first two dimensions of the data encoded
categories: the categories for each datapoint to (for visualization purposes)
batch_size[=32]: the batch size for the predictions
ax[=None]: axis to add the plot to
"""
if not ax:
f = plt.figure(figsize=(6, 6))
ax = f.add_subplot(111)
else:
f = None
ax.scatter(encoded[:,0], encoded[:,1], c=categories)
return f, ax
def manifold_2D(generator, ax=None, n=15, shape=(28,28), latent_space='gaussian', latent_range=(0.05, 0.95)):
""" display a 2D manifold of the digits
@params:
generator: a generator with a .predict() function
ax[=None]: axis to add the plot to
n[=15]: number of samples to generate for each dimension
shape[=(28,28)]: reshape of the sample
latent_space[='gaussian']
latent_range[=(0.05,0.95)]
@returns:
matplotlib axes with the figure added.
"""
digit_size = shape[0]
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
if latent_space == 'gaussian':
grid_x = stats.norm.ppf(np.linspace(latent_range[0], latent_range[1], n))
grid_y = stats.norm.ppf(np.linspace(latent_range[0], latent_range[1], n))
else:
raise NotImplementedError('Unknown Latent Space not yet implemented')
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape((digit_size, digit_size))
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
if not ax:
f = plt.figure(figsize=(10, 10))
ax = f.add_subplot(111)
else:
f = None
ax.imshow(figure, cmap='Greys_r')
return f, ax
|
[
"zafarali.ahmed@gmail.com"
] |
zafarali.ahmed@gmail.com
|
e7c6ef1f37a3c97baf924f1e1b774645219dce68
|
0e78b2df0fb93afc62684dece8ac05b700570248
|
/BOJ/10950.py
|
94b5b24e31f885c29d43c6d8b15d547aa553f987
|
[] |
no_license
|
ajy720/Algorithm
|
f1e2301327db09667ba011bc317c8f380707c25c
|
b141538802e9056f154ab91c816ad29500505f34
|
refs/heads/master
| 2022-05-06T21:37:05.780170
| 2022-04-23T09:25:52
| 2022-04-23T09:25:52
| 200,335,390
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
n = int(input())
res = []
for i in range(n):
a, b = map(int, input().split())
res.append(a+b)
for i in range(n):
print(res[i])
|
[
"ajy720@gmail.com"
] |
ajy720@gmail.com
|
9eaaae7e015f7af1d6d99117f0f6bac098baf788
|
5bec846696ea2c198186f492baec4ed6b1de8aae
|
/detect/eval/voc_eval.py
|
cb30858f545329e942e2b60217ca5361465088b4
|
[] |
no_license
|
Peiiii/lpr
|
0e268e1ff71ae37d01a3501c608f4a8024df6dd2
|
90bcbdee4555915b77dd6c6dab2b48ed56c9952d
|
refs/heads/master
| 2022-12-11T23:57:37.075730
| 2019-10-02T15:25:09
| 2019-10-02T15:25:09
| 210,851,442
| 0
| 0
| null | 2022-11-21T21:32:26
| 2019-09-25T13:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,137
|
py
|
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import pickle as cPickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print ('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print ('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
# load
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
[
"1535376447@qq.com"
] |
1535376447@qq.com
|
c37b3f1f31f82758423901149d2f6c52870759a6
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/33SearchinRotatedSortedArray.py
|
8947296c9b426239174e8e74045feff557580a62
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092
| 2022-09-08T02:44:56
| 2022-09-08T02:44:56
| 122,086,222
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 1,248
|
py
|
# coding=utf-8
'''
Created on 2016�12�22�
@author: Administrator
'''
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def find_min_idx(nums):
lo, hi = 0, len(nums) - 1
while lo < hi:
if nums[lo] < nums[hi]:
return lo
mid = (lo + hi) / 2
if nums[mid] < nums[hi]:
hi = mid
else:
lo = mid + 1
return lo
min_idx = find_min_idx(nums)
def search(nums, s, e, target):
while s <= e:
mid = (s + e) / 2
if target < nums[mid]:
e = mid - 1
elif target > nums[mid]:
s = mid + 1
else:
return mid
return None
ans = search(nums, 0, min_idx - 1, target)
if ans != None: return ans
ans = search(nums, min_idx, len(nums) - 1, target)
if ans != None:
return ans
else:
return -1
nums = [1]
target = 1
print Solution().search(nums, target)
|
[
"yanhuang1293@gmail.com"
] |
yanhuang1293@gmail.com
|
24fd164938cb979b18d12711c9ca1fcc2cadfa53
|
07b4dd9a88f3404c4851ea7cbb57c67035bc9a54
|
/tables.py
|
8583928d290312a85da78b29569f435b41ae38a5
|
[] |
no_license
|
surajgholap/python-Misc
|
9c9d02c42bb37b7378d7336343f8bef7cd802edf
|
4a8ce4bfa5a959692d98663b7b5c0b67a165835f
|
refs/heads/master
| 2021-06-17T19:19:25.021038
| 2021-01-27T20:54:03
| 2021-01-27T20:54:03
| 142,781,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
def print_tables(base, times):
for i in range(1, base+1):
for j in range(1, times+1):
print(i*j, end=" ")
print()
print_tables(12, 12)
|
[
"surajgholap27@gmail.com"
] |
surajgholap27@gmail.com
|
703f70a906c0d25b1b21f4c05a311f1a735b51eb
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow/source/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
|
688315fd12e6f3b07e97cd1fc273c6ed725de5ca
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,025
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_sparse_feature_cross_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))
# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE
@deprecated_arg_values(
"2016-11-20",
"The default behavior of sparse_feature_cross is changing, the default\n"
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
"From that point on sparse_feature_cross will always use FingerprintCat64\n"
"to concatenate the feature fingerprints. And the underlying\n"
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
"as deprecated.",
hash_key=None)
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None, hash_key=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_feature_cross_kernel.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
The default value is None, but will become
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY after 2016-11-20 (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, sparse_tensor.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs
if isinstance(i, sparse_tensor.SparseTensor)]
dense_inputs = [i for i in inputs
if not isinstance(i, sparse_tensor.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
if hash_key:
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross_v2(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
hash_key=hash_key,
out_type=out_type,
internal_type=internal_type,
name=name))
else:
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
ops.NotDifferentiable("SparseFeatureCross")
ops.NotDifferentiable("SparseFeatureCrossV2")
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
6c7a1cd28299eeeddc802b36c228d41fdab88e8c
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/python/basic/class.py
|
f5dbcf0e2b60d201d45824592934ddc0e4c39888
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392
| 2023-08-27T06:45:20
| 2023-08-27T06:45:20
| 181,903,332
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/python
class TTest:
x= 10
def __init__(self):
self.y= None
def __repr__(self):
return 'x=%r y=%r' % (self.x, self.y)
class TTestB:
x= 10
y= None
test1= TTest()
test1.x= 20
test1.y= 200
test2= TTest()
print 'test1=',test1
print 'test2=',test2
TTest.x= 30
TTest.y= 300
test3= TTest()
print 'test3=',test3
print '-----'
test1= TTestB()
test1.x= 20
test1.y= 200
test2= TTestB()
print 'test1=',test1.x,test1.y
print 'test2=',test2.x,test2.y
TTestB.x= 30
TTestB.y= 300
test3= TTestB()
print 'test2=',test3.x,test3.y
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
fc98453f489a4a248f7440bdcc7b9b95490ea51e
|
097dda217c3d31b69cb309369dc0357fe0f229ab
|
/app/customadmin/migrations/0005_shopproduct.py
|
38f7c55e8acce58b5eb16fcb194cc2e69f1c26f6
|
[] |
no_license
|
Jaycitrusbug/book-python
|
57a96ee343eee5b63ca5f7ee2461db82426321b5
|
b5a4de74c9114546ee03b8aa5de1381719ddf74e
|
refs/heads/master
| 2023-06-20T01:52:29.484415
| 2021-07-16T13:06:05
| 2021-07-16T13:06:05
| 386,638,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
# Generated by Django 3.1.4 on 2020-12-09 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customadmin', '0004_auto_20201209_1008'),
]
operations = [
migrations.CreateModel(
name='ShopProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Date when created.', null=True, verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Date when updated.', null=True, verbose_name='Updated At')),
('product_image', models.ImageField(blank=True, null=True, upload_to='products', verbose_name='Product images')),
('name', models.CharField(blank=True, default='', max_length=200, null=True)),
('price', models.TextField(blank=True, default='', max_length=500, null=True)),
('detail', models.CharField(blank=True, default='', max_length=255, null=True)),
],
options={
'verbose_name': 'Shop Product',
'verbose_name_plural': 'Shop Products',
'ordering': ['-created_at'],
},
),
]
|
[
"jay.citrusbug@gmail.com"
] |
jay.citrusbug@gmail.com
|
b2583d170c8144e89f9ed5ffc15ded383410cb49
|
534570bbb873293bd2646a1567b63d162fbba13c
|
/Python/Data Structure/Linear List/Array/K Sum/259.3-sum-smaller.py
|
d38c68bf8ab2e63a5ee2b79f4cefc4222690339d
|
[] |
no_license
|
XinheLIU/Coding-Interview
|
fa3df0f7167fb1bc6c8831748249ebaa6f164552
|
d6034c567cef252cfafca697aa316c7ad4e7d128
|
refs/heads/master
| 2022-09-17T14:30:54.371370
| 2022-08-19T15:53:35
| 2022-08-19T15:53:35
| 146,382,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
class Solution:
def threeSumSmaller(self, nums: List[int], target: int) -> int:
nums.sort()
ret = 0
for i in range(0, len(nums)-2):
l, r = i + 1, len(nums) - 1
while l < r:
Sum = nums[i] + nums[l] + nums[r]
if Sum < target:
ret += r - l
l += 1
else:
r -= 1
return ret
|
[
"LIUXinhe@outlook.com"
] |
LIUXinhe@outlook.com
|
94df604e74040fe35e0f339fc89e6977d72911ab
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/budget_campaign_association_status.py
|
a3c9d3bbc4cb3eedea948a2bd7839c61f472bdb3
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'BudgetCampaignAssociationStatusEnum',
},
)
class BudgetCampaignAssociationStatusEnum(proto.Message):
r"""Message describing the status of the association between the
Budget and the Campaign.
"""
class BudgetCampaignAssociationStatus(proto.Enum):
r"""Possible statuses of the association between the Budget and
the Campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
79f1cf41b9b75f0519be704b7f70b2277ae2a03c
|
e0ff22c17eaa1e7009089a58575567b0ead63d49
|
/scan/forms.py
|
96def1cf32e59163612c71d895e7389098144f02
|
[] |
no_license
|
slehaff/dblive
|
afcb5297057ad4d78177b886013250d2ed068424
|
7f7bdf38998bd65d00e5ac7df3ef5289b2781e83
|
refs/heads/master
| 2023-05-27T16:38:21.865344
| 2021-06-07T10:09:28
| 2021-06-07T10:09:28
| 303,078,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
from PIL import Image
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class PicForm2(forms.Form):
deviceid = forms.CharField(required=False, max_length=32, strip=True)
cmd = forms.CharField(required=False, label='Cmd', max_length=50, strip=True)
Picture = forms.ImageField(label="Billed", required=False)
Pic1 = forms.ImageField(label="Billed1", required=False)
Pic2 = forms.ImageField(label="Billed2", required=False)
Pic3 = forms.ImageField(label="Billed3", required=False)
# required skal ændres på sigt
def __init__(self, *args, **kwargs):
super(PicForm2, self).__init__(*args, **kwargs)
self.helper = FormHelper()
#self.helper.form_class = 'form-horizontal'
#self.helper.label_class = 'col-sm-5 col-sm-offset-2' # control-label
#self.helper.field_class = 'col-sm-4'
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Send'))
self.helper.add_input(Submit('cancel', 'Fortryd', css_class='btn-secondary', formnovalidate='formnovalidate', formaction='/'))
|
[
"samir@lehaff.dk"
] |
samir@lehaff.dk
|
cda6831f28cfe2770d2ced88e722a7cac36e772a
|
468f54cf08a68fd3791f5266996eeb82331db338
|
/pyisy/variables/variable.py
|
ce6f4ad83663bf37bb35e3dc1a63fa58b77a36d3
|
[
"Apache-2.0"
] |
permissive
|
OverloadUT/PyISY
|
79d0b446f8c5358aa31743e8bd633b1d7f4762f2
|
69553057ceac57a6b4300c0070ee4ff163681750
|
refs/heads/master
| 2021-01-20T04:54:43.622920
| 2020-05-16T18:57:50
| 2020-05-16T18:57:50
| 89,751,436
| 0
| 0
| null | 2017-04-28T23:18:44
| 2017-04-28T23:18:44
| null |
UTF-8
|
Python
| false
| false
| 5,988
|
py
|
"""Manage variables from the ISY."""
from ..constants import (
ATTR_INIT,
ATTR_LAST_CHANGED,
ATTR_LAST_UPDATE,
ATTR_SET,
ATTR_STATUS,
ATTR_TS,
PROTO_INT_VAR,
PROTO_STATE_VAR,
TAG_ADDRESS,
URL_VARIABLES,
VAR_INTEGER,
)
from ..helpers import EventEmitter, now
class Variable:
"""
Object representing a variable on the controller.
| variables: The variable manager object.
| vid: List of variable IDs.
| vtype: List of variable types.
| init: List of values that variables initialize to when the controller
starts.
| val: The current variable value.
| ts: The timestamp for the last time the variable was edited.
:ivar init: Watched property that represents the value the variable
initializes to when the controller boots.
:ivar lastEdit: Watched property that indicates the last time the variable
was edited.
:ivar val: Watched property that represents the value of the variable.
"""
def __init__(self, variables, vid, vtype, vname, init, status, ts):
"""Initialize a Variable class."""
super(Variable, self).__init__()
self._id = vid
self._init = init
self._last_edited = ts
self._last_update = now()
self._last_changed = now()
self._name = vname
self._status = status
self._type = vtype
self._variables = variables
self.isy = variables.isy
self.status_events = EventEmitter()
def __str__(self):
"""Return a string representation of the variable."""
return f"Variable(type={self._type}, id={self._id}, value={self.status}, init={self.init})"
def __repr__(self):
"""Return a string representation of the variable."""
return str(self)
@property
def address(self):
"""Return the formatted Variable Type and ID."""
return f"{self._type}.{self._id}"
@property
def init(self):
"""Return the initial state."""
return self._init
@init.setter
def init(self, value):
"""Set the initial state and notify listeners."""
if self._init != value:
self._init = value
self._last_changed = now()
self.status_events.notify(self.status_feedback)
return self._init
@property
def last_changed(self):
"""Return the UTC Time of the last status change for this node."""
return self._last_changed
@property
def last_edited(self):
"""Return the last edit time."""
return self._last_edited
@last_edited.setter
def last_edited(self, value):
"""Set the last edited time."""
if self._last_edited != value:
self._last_edited = value
return self._last_edited
@property
def last_update(self):
"""Return the UTC Time of the last update for this node."""
return self._last_update
@last_update.setter
def last_update(self, value):
"""Set the last update time."""
if self._last_update != value:
self._last_update = value
return self._last_update
@property
def protocol(self):
"""Return the protocol for this entity."""
return PROTO_INT_VAR if self._type == VAR_INTEGER else PROTO_STATE_VAR
@property
def name(self):
"""Return the Variable Name."""
return self._name
@property
def status(self):
"""Return the current node state."""
return self._status
@status.setter
def status(self, value):
"""Set the current node state and notify listeners."""
if self._status != value:
self._status = value
self._last_changed = now()
self.status_events.notify(self.status_feedback)
return self._status
@property
def status_feedback(self):
"""Return information for a status change event."""
return {
TAG_ADDRESS: self.address,
ATTR_STATUS: self._status,
ATTR_INIT: self._init,
ATTR_TS: self._last_edited,
ATTR_LAST_CHANGED: self._last_changed,
ATTR_LAST_UPDATE: self._last_update,
}
@property
def vid(self):
"""Return the Variable ID."""
return self._id
def update(self, wait_time=0):
"""
Update the object with the variable's parameters from the controller.
| wait_time: Seconds to wait before updating.
"""
self._last_update = now()
self._variables.update(wait_time)
def set_init(self, val):
"""
Set the initial value for the variable after the controller boots.
| val: The value to have the variable initialize to.
"""
if val is None:
raise ValueError("Variable init must be an integer. Got None.")
self.set_value(val, True)
def set_value(self, val, init=False):
"""
Set the value of the variable.
| val: The value to set the variable to.
"""
if val is None:
raise ValueError("Variable value must be an integer. Got None.")
req_url = self.isy.conn.compile_url(
[
URL_VARIABLES,
ATTR_INIT if init else ATTR_SET,
str(self._type),
str(self._id),
str(val),
]
)
if not self.isy.conn.request(req_url):
self.isy.log.warning(
"ISY could not set variable%s: %s.%s",
" init value" if init else "",
str(self._type),
str(self._id),
)
return
self.isy.log.debug(
"ISY set variable%s: %s.%s",
" init value" if init else "",
str(self._type),
str(self._id),
)
if not self.isy.auto_update:
self.update()
|
[
"noreply@github.com"
] |
OverloadUT.noreply@github.com
|
ee16236cee149923cbec2f4caed7e7a2a579a99e
|
67c77918ba8bb55b8508a252d7bddbbaa1cd414e
|
/Bioinformatics_Textbook_Track/02-BA1B.py
|
d7b475d32f515903acd3a28076de07f5db946532
|
[] |
no_license
|
anoubhav/Rosalind-Solutions
|
52406c4677d9daa68814141c9bf22d19ad896a8b
|
d7b591fb4850fa2f5c1b146eafabe77945c94d34
|
refs/heads/master
| 2021-08-07T06:25:47.056356
| 2020-04-10T10:52:20
| 2020-04-10T10:52:20
| 144,815,045
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from collections import defaultdict
def MostFreqKmer(dna, k):
""" Finds the most frequent k-mers in a string
(str, int) -> (list of str) """
l = len(dna)
kmer_count = defaultdict(int)
for i in range(l - k + 1):
kmer_count[dna[i:i+k]] += 1
temp = max(kmer_count.values())
return ' '.join([k for k, v in kmer_count.items() if v == temp])
if __name__ == '__main__':
dna = input()
k = int(input())
print(MostFreqKmer(dna, k))
|
[
"anoubhav.agarwaal@gmail.com"
] |
anoubhav.agarwaal@gmail.com
|
1e254ef6262f6a2353a21e1b62b99ba344188ff8
|
19a4365d81507587ef09488edc7850c2227e7165
|
/994.py
|
96554ca2d2b849066c85334c4f0f81e9c29e95a0
|
[] |
no_license
|
akauntotesuto888/Leetcode-Lintcode-Python
|
80d8d9870b3d81da7be9c103199dad618ea8739a
|
e2fc7d183d4708061ab9b610b3b7b9e2c3dfae6d
|
refs/heads/master
| 2023-08-07T12:53:43.966641
| 2021-09-17T19:51:09
| 2021-09-17T19:51:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
rotten = []
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 2:
rotten.append((i, j))
step = -1
while rotten:
new_rotten = []
for x, y in rotten:
for dx, dy in [(-1, 0), (1, 0), (0, 1), (0, -1)]:
new_x, new_y = x+dx, y+dy
if 0 <= new_x < m and 0 <= new_y < n and grid[new_x][new_y] == 1:
grid[new_x][new_y] = 2
new_rotten.append((new_x, new_y))
rotten = new_rotten
step += 1
for i in range(m):
for j in range(n):
if grid[i][j] == 1: return -1
return 0 if step == -1 else step
|
[
"tiant@qualtrics.com"
] |
tiant@qualtrics.com
|
bf2bae30cb193ff8b262ba23d6ec0d870c3220ac
|
d51cf2fe640f3bd5f4c4247ffaa198a30a58d96a
|
/mongo_db_course/01_data_extraction/16_extract_airports_quiz.py
|
a968065384c5c95507f14c724185a35f039ff185
|
[] |
no_license
|
AlexSkrn/sqlite_bash_scripts
|
44c6dbfc83b0e6be6126ede06e9ffb9d5805bae9
|
6f1d8c6581ace44a6d77c736cee6d5cdd40001b6
|
refs/heads/master
| 2021-07-09T08:43:48.193623
| 2021-04-08T19:44:29
| 2021-04-08T19:44:29
| 238,682,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete the 'extract_airports()' function so that it returns a list of airport
codes, excluding any combinations like "All".
Refer to the 'options.html' file in the tab above for a stripped down version
of what is actually on the website. The test() assertions are based on the
given file.
"""
from bs4 import BeautifulSoup
html_page = "options.html"
def extract_airports(page):
data = []
with open(page, "r") as html:
# do something here to find the necessary values
soup = BeautifulSoup(html, "lxml")
data = []
airport_list = soup.find(id='AirportList')
for option in airport_list.find_all('option'):
val = option['value']
if not val.startswith('All'):
data.append(val)
return data
def test():
data = extract_airports(html_page)
assert len(data) == 15
assert "ATL" in data
assert "ABR" in data
if __name__ == "__main__":
test()
|
[
"askrn123@gmail.com"
] |
askrn123@gmail.com
|
2d5b87286cdfe5d088153553ecc12ce5664f7f4a
|
8ddda8fb6e5853126dcdafa3281c75071ada45c1
|
/vyperlogix/gds/space.py
|
52a4dad46eb96d4deecf17860057fc4ee2adde8b
|
[
"CC0-1.0"
] |
permissive
|
raychorn/chrome_gui
|
a48f3f9d931922a018e894f891ccd952476cd1ee
|
f1fade70b61af12ee43c55c075aa9cfd32caa962
|
refs/heads/master
| 2022-12-19T19:46:04.656032
| 2020-10-08T14:45:14
| 2020-10-08T14:45:14
| 299,167,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,226
|
py
|
'''
This module provides a function that constructs a list containing
the sizes of directories under a specified directory.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import os
__version__ = "$Id: space.py,v 1.4 2002/08/21 12:41:49 donp Exp $"
listG = []
def GetTotalFileSize(dummy_param, directory, list_of_files):
'''Given a list of files and the directory they're in, add the
total size and directory name to the global list listG.
'''
global listG
currdir = os.getcwd()
os.chdir(directory)
total_size = 0
if len(list_of_files) != 0:
for file in list_of_files:
if file == ".." or file == ".": continue
size = os.stat(file)[6]
total_size = total_size + size
listG.append([total_size, directory])
os.chdir(currdir)
def GetSize(directory):
'''Returns a list of the form [ [a, b], [c, d], ... ] where
a, c, ... are the number of total bytes in the directory and
b, d, ... are the directory names. The indicated directory
is recursively descended and the results are sorted by directory
size with the largest directory at the beginning of the list.
'''
import os
global listG
listG = []
os.path.walk(directory, GetTotalFileSize, "")
listG.sort()
listG.reverse()
def ShowBiggestDirectories(directory):
import string
GetSize(directory)
# Get total number of bytes
total_size = 0
for dir in listG:
total_size = total_size + dir[0]
if total_size != 0:
print "For directory '%s': " % directory,
print "[total space = %.1f MB]" % (total_size / 1e6)
print " % MB Directory"
print "------ ----- " + "-" * 50
not_shown_count = 0
for dir in listG:
percent = 100.0 * dir[0] / total_size
dir[1] = string.replace(dir[1], "\\\\", "/")
if percent >= 0.1:
print "%6.1f %5d %s" % (percent, int(dir[0]/1e6), dir[1])
else:
not_shown_count = not_shown_count + 1
if not_shown_count > 0:
if not_shown_count > 1:
print " [%d directories not shown]" % not_shown_count
else:
print " [%d directory not shown]" % not_shown_count
if __name__ == '__main__':
import sys
name = sys.argv[0]
sys.argv = sys.argv[1:]
if len(sys.argv) == 0:
sys.argv.append(".")
ShowBiggestDirectories(sys.argv[0])
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
4c1ee37f5712dd73553e9b461af163e6df479098
|
34b09bc83e5726fccb524a93cf2742f5aeadedef
|
/8. Tree/2_answer.py
|
d5b9861160ea84ed0907fad53f3211d82894046b
|
[] |
no_license
|
mjson1954/WIC
|
57eb20ffe7aaf8695d679c893efacdeede573e72
|
670112209aacd274d09f6e9a89d948120486bfc8
|
refs/heads/master
| 2023-03-20T00:57:19.740025
| 2021-03-05T10:52:51
| 2021-03-05T10:52:51
| 289,925,829
| 0
| 0
| null | 2021-02-21T02:16:11
| 2020-08-24T12:46:58
|
Python
|
UTF-8
|
Python
| false
| false
| 351
|
py
|
def inorder(n, last):
global cnt
if n <= last:
inorder(n * 2, last)
tree[n] = cnt
cnt += 1
inorder(n * 2 + 1, last)
for test_case in range(int(input())):
N = int(input())
tree = [0] * (N + 1)
cnt=1
inorder(1, N)
print('#{} {} {}'.format(test_case+1, tree[1], tree[N // 2]))
|
[
"mjson1954@gmail.com"
] |
mjson1954@gmail.com
|
28bb8c869e9ae1e4c9b795e3350951ba632fa612
|
3a51de9b289a141f23f7ad7feb97e937484ecbcf
|
/lib/player/player.py
|
9260ad3101f8da06695f47ff4a8a9b333a8ab636
|
[] |
no_license
|
AndyDeany/sunni-remake
|
52c40db465db0fa4cd04b9fbcb7f32e58e0fd52d
|
3d6c5f742e41cc8c2c39bfd2b380f63ea012ef0f
|
refs/heads/main
| 2023-05-01T05:47:50.858883
| 2021-05-07T17:35:44
| 2021-05-07T17:35:52
| 363,158,917
| 0
| 0
| null | 2021-05-12T13:32:49
| 2021-04-30T14:03:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,620
|
py
|
from collections import namedtuple
from lib.image import Image
from .moves import Kick, Headbutt, Frostbeam, Heal
from lib.character import Character, NotEnoughManaError
class Player(Character):
"""Class representing the Player (the character controlled by the user)."""
CHARACTER_1 = "character1"
CHARACTER_2 = "character2"
CHOOSE_CHARACTER = "choose character"
CHOOSE_ABILITY = "choose ability"
DEAD = "player dead"
INFO_X = 10
def __init__(self, game, name="Sunni", character=None, *, level=1):
super().__init__(game, name, level=level, display_stat_x=170, display_stat_y_start=360)
self.calculate_stats()
self.fully_restore()
self.x = 150
self.y = 380
self.num_idle_frames = 4
self.idle_fps = 6
self.character = character
Moves = namedtuple("Moves", "heal kick headbutt frostbeam")
self.moves = Moves(Heal(160, 170, 350), Kick(), Headbutt(), Frostbeam())
self.offensive_moves = [self.moves.kick, self.moves.headbutt, self.moves.frostbeam]
self.defensive_moves = [self.moves.heal]
self.selected_moves = None
@property
def character(self):
return self._character
@character.setter
def character(self, character):
self._character = character
if character is None:
return
self.idle_frames = [Image(f"player/{character}_normal{n}.png") for n in range(self.num_idle_frames)]
self.character_normal = Image(f"player/{character}_normal1.png")
self.character_backwards = Image(f"player/{character}_backwards.png")
self.character_scared = Image(f"player/{character}_scared.png", (self.x, self.y))
self.character_scared_redflash = Image(f"player/{character}_scared_redflash.png", (self.x, self.y))
self.character_tilt_left = Image(f"player/{character}_tilt_left.png")
self.character_tilt_right = Image(f"player/{character}_tilt_right.png")
self.character_dead = Image(f"player/{character}_dead.png")
self.character_headbutt_stance = Image(f"player/{character}_headbutt_stance.png")
self.character_frostbeam_stance = Image(f"player/{character}_frostbeam_stance.png", (self.x, self.y))
def level_up(self, levels=1.0, restore=True):
"""Level the player up by the given number of levels (default 1).
Restores the player to full if they pass an integer level and `restore==True` (default).
"""
old_level = self.level
self.level += levels
if int(self.level) > int(old_level): # i.e. if we actually levelled up
self.calculate_stats()
if restore:
self.fully_restore()
def calculate_stats(self):
self.max_hp = 90 + 10*int(self.level)
self.max_mana = 95 + 5*int(self.level)
def use_move(self, move):
try:
self.change_mana(move)
except NotEnoughManaError:
self.game.page.show_mana_notification()
else:
self.selected_moves = None
self.game.page.hide_mana_notification()
self.game.page.current = move
def next_move(self):
"""Continues to find out the player's next move."""
if self.is_dead:
self.game.page.current = self.DEAD
self.level_up(0.25, restore=False)
self.game.save()
return
self.game.page.current = self.CHOOSE_ABILITY
def _idle_display(self):
self.idle_animation(self.x, self.y)
def _dead_display(self):
self.character_dead.display(150, 480)
|
[
"oneandydean@hotmail.com"
] |
oneandydean@hotmail.com
|
9835945c0bfb18ed417925e0b02705344f7870b7
|
a4e502e9487cf17c53f9f931ec0dbc12168fea52
|
/tests/pyre/weaver/expressions_c.py
|
09c724e94b47be07f1a16ff25188f81bbe895f1b
|
[
"BSD-3-Clause"
] |
permissive
|
bryanvriel/pyre
|
bdc5dd59c46d53ff81f2ece532b9073ac3b65be1
|
179359634a7091979cced427b6133dd0ec4726ea
|
refs/heads/master
| 2021-09-28T00:10:26.454282
| 2018-11-11T16:42:07
| 2018-11-11T16:42:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Exercise a C expression weaver
"""
def test():
# get the packages
import pyre.weaver
import pyre.calc
# instantiate a weaver
weaver = pyre.weaver.weaver(name="sanity")
weaver.language = "c"
# access its mill
mill = weaver.language
# build a few nodes
zero = pyre.calc.var(value=0)
one = pyre.calc.var(value=1)
# check expression generation
# the trivial cases
assert mill.expression(zero) == '0'
assert mill.expression(one) == '1'
# arithmetic
assert mill.expression(one + zero) == '(1) + (0)'
assert mill.expression(one - zero) == '(1) - (0)'
assert mill.expression(one * zero) == '(1) * (0)'
assert mill.expression(one / zero) == '(1) / (0)'
assert mill.expression(one // zero) == '(1) / (0)'
assert mill.expression(one % zero) == '(1) % (0)'
assert mill.expression(one ** zero) == 'pow(1,0)'
assert mill.expression(-one) == '-(1)'
assert mill.expression(abs(one)) == 'abs(1)'
# comparisons
assert mill.expression(one == zero) == '(1) == (0)'
assert mill.expression(one != zero) == '(1) != (0)'
assert mill.expression(one <= zero) == '(1) <= (0)'
assert mill.expression(one >= zero) == '(1) >= (0)'
assert mill.expression(one < zero) == '(1) < (0)'
assert mill.expression(one > zero) == '(1) > (0)'
# boolean
assert mill.expression(one & zero) == '(1) && (0)'
assert mill.expression(one | zero) == '(1) || (0)'
# return the configured weaver
return weaver
# main
if __name__ == "__main__":
test()
# end of file
|
[
"michael.aivazis@orthologue.com"
] |
michael.aivazis@orthologue.com
|
da07e2f0dd5b923746161c2fc6fb1063975a5ddf
|
bc441bb06b8948288f110af63feda4e798f30225
|
/container_sdk/model/tuna_service/test_plan_pb2.pyi
|
65a3b9c5324b9494fb47733d0fb869bd301ffe6d
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,626
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from container_sdk.model.tuna_service.requirement_instance_pb2 import (
RequirementInstance as container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class TestPlan(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
instanceId = ... # type: typing___Text
name = ... # type: typing___Text
reviewDate = ... # type: typing___Text
startExcutePlanDate = ... # type: typing___Text
projectStartDate = ... # type: typing___Text
projectPlanCompleteDate = ... # type: typing___Text
projectActualCompleteDate = ... # type: typing___Text
functionMissCount = ... # type: builtin___int
backendBugCount = ... # type: builtin___int
bugPercent = ... # type: typing___Text
bugTotal = ... # type: builtin___int
capabilityCount = ... # type: builtin___int
codingErrCount = ... # type: builtin___int
delayPercent = ... # type: typing___Text
environmentCount = ... # type: builtin___int
frontBugCount = ... # type: builtin___int
projectScore = ... # type: typing___Text
requirementBlurryCount = ... # type: builtin___int
scenarioBugCount = ... # type: builtin___int
scenarioCount = ... # type: builtin___int
status = ... # type: typing___Text
suggestionCount = ... # type: builtin___int
unableAppearCount = ... # type: builtin___int
@property
def requirement_instance(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance]: ...
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
reviewDate : typing___Optional[typing___Text] = None,
startExcutePlanDate : typing___Optional[typing___Text] = None,
projectStartDate : typing___Optional[typing___Text] = None,
projectPlanCompleteDate : typing___Optional[typing___Text] = None,
projectActualCompleteDate : typing___Optional[typing___Text] = None,
functionMissCount : typing___Optional[builtin___int] = None,
backendBugCount : typing___Optional[builtin___int] = None,
bugPercent : typing___Optional[typing___Text] = None,
bugTotal : typing___Optional[builtin___int] = None,
capabilityCount : typing___Optional[builtin___int] = None,
codingErrCount : typing___Optional[builtin___int] = None,
delayPercent : typing___Optional[typing___Text] = None,
environmentCount : typing___Optional[builtin___int] = None,
frontBugCount : typing___Optional[builtin___int] = None,
projectScore : typing___Optional[typing___Text] = None,
requirementBlurryCount : typing___Optional[builtin___int] = None,
scenarioBugCount : typing___Optional[builtin___int] = None,
scenarioCount : typing___Optional[builtin___int] = None,
status : typing___Optional[typing___Text] = None,
suggestionCount : typing___Optional[builtin___int] = None,
unableAppearCount : typing___Optional[builtin___int] = None,
requirement_instance : typing___Optional[typing___Iterable[container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> TestPlan: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> TestPlan: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"backendBugCount",b"backendBugCount",u"bugPercent",b"bugPercent",u"bugTotal",b"bugTotal",u"capabilityCount",b"capabilityCount",u"codingErrCount",b"codingErrCount",u"delayPercent",b"delayPercent",u"environmentCount",b"environmentCount",u"frontBugCount",b"frontBugCount",u"functionMissCount",b"functionMissCount",u"instanceId",b"instanceId",u"name",b"name",u"projectActualCompleteDate",b"projectActualCompleteDate",u"projectPlanCompleteDate",b"projectPlanCompleteDate",u"projectScore",b"projectScore",u"projectStartDate",b"projectStartDate",u"requirementBlurryCount",b"requirementBlurryCount",u"requirement_instance",b"requirement_instance",u"reviewDate",b"reviewDate",u"scenarioBugCount",b"scenarioBugCount",u"scenarioCount",b"scenarioCount",u"startExcutePlanDate",b"startExcutePlanDate",u"status",b"status",u"suggestionCount",b"suggestionCount",u"unableAppearCount",b"unableAppearCount"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
e52b90544d1ceaf93d83e0f1899866350df1397b
|
790589224695a2c7dc3977c718bb0f7cb5f6429e
|
/stats/fan98test.py
|
f321b8ec683a0ee393a9d1f769c96c44158afabb
|
[] |
no_license
|
nikolaims/delayed_nfb
|
58ab51b3a9dd798f18579d9ebfead89c095413ac
|
37a8fbbd6e75a604ff092695fefd6a6421101de4
|
refs/heads/master
| 2022-12-19T04:30:34.252323
| 2020-09-18T13:23:27
| 2020-09-18T13:23:27
| 282,847,322
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,323
|
py
|
import numpy as np
import os
def eval_z_score(data_points1, data_points2):
# mean, std and number of samples
x1_mean = np.mean(data_points1, 0)
x1_std = np.std(data_points1, 0)
x1_n = data_points1.shape[0]
x2_mean = np.mean(data_points2, 0)
x2_std = np.std(data_points2, 0)
x2_n = data_points2.shape[0]
# degree of freedom(see page 1011, left top paragraph)
d = x1_n + x2_n - 2
# z - score over time(see formula 13)
z_score = (x1_mean - x2_mean) / (x1_std ** 2 / x1_n + x2_std ** 2 / x2_n) ** 0.5
return z_score, d
def eval_f_stat(data_points):
n_all_sample = sum([group_data[:, 0].size for group_data in data_points])
n_groups = len(data_points)
n_blocks = data_points[0].shape[1]
n_subjects_list = [data_points[k].shape[0] for k in range(n_groups)]
n_subjects = sum(n_subjects_list)
f_stats_list = []
for b in range(n_blocks):
data = [group_data[:, b] for group_data in data_points]
overall_mean = sum([group_data.sum() for group_data in data])/n_subjects
group_means_list = [group_data.mean() for group_data in data]
between_group_var = sum([n * (group_mean - overall_mean) ** 2
for n, group_mean in zip(n_subjects_list, group_means_list)]) / (n_groups - 1)
within_group_var = sum([np.sum((group_data - group_mean)**2)
for group_data, group_mean in zip(data, group_means_list)]) / (n_all_sample - n_groups)
f_stat = between_group_var /(within_group_var + 1e-10)
f_stats_list.append(f_stat)
d1 = n_groups - 1
d2 = n_all_sample - n_groups
return f_stats_list, d1, d2
def adaptive_neyman_test(z_star, d, return_extra=False):
# eval statistic for each number of first blocks (see fomula 6)
T_an = np.zeros(len(z_star))
d_factor = 1 if d is None else ((d - 2) ** 2 * (d - 4) / (d ** 2 * (d - 1))) ** 0.5
for m in range(len(z_star)):
T_an[m] = np.sum(z_star[:m + 1] ** 2 - 1) * d_factor / (2 * (m + 1)) ** 0.5
# find maximum T (see formula 6)
stat = np.max(T_an)
# compute final stat(see fomula 4)
loglogn = np.log(np.log(len(z_star)))
stat = (2 * loglogn) ** 0.5 * stat - (2 * loglogn + 0.5 * np.log(loglogn) - 0.5 * np.log(4 * np.pi))
if return_extra:
opt_m = np.argmax(T_an) + 1
return stat, opt_m
return stat
def corrcoef_test(z_star, d, return_extra=False):
stat = np.corrcoef(np.arange(len(z_star)), z_star)[0, 1]
if return_extra:
return stat, None
return stat
def fourier_transform(x):
# fft (see formula between 17 and 18)
z_fft = np.fft.fft(x) / len(x) ** 0.5
# colect real and imag coeffs(see example 1, page 1013, 2nd paragraph)
z_star = np.zeros(len(z_fft) * 2 - 1)
z_star[0::2] = np.real(z_fft)
z_star[1::2] = np.imag(z_fft[1:])
return z_star[:len(z_fft)]
def legendre_projector(n):
a = np.arange(n)
basis = np.zeros((n, n))
for k in a:
basis[:, k] = (a - np.mean(a))**k
q, _ = np.linalg.qr(basis)
return q
def legendre_transform(x):
n = len(x)
q = legendre_projector(n)
return x.dot(q)
def identity_transform(x):
return x
def simulate_h0_distribution(n, d, transform, stat_fun, n_iter=200000, verbose=True, sim_verbose=False):
cash_dir = '_fan98_temp'
cash_file = os.path.join(cash_dir, 'h0_{}_{}_n{}_d{}_n_iter{}.npy'
.format(stat_fun.__name__, transform.__name__, n, d, n_iter))
if os.path.exists(cash_file):
if verbose:
print('Load from {}'.format(cash_file))
stats_h0 = np.load(cash_file)
else:
if verbose or sim_verbose:
print('Simulate and save to {}'.format(cash_file))
stats_h0 = np.zeros(n_iter)
for k in range(n_iter):
if d is None:
z_star = np.random.normal(size=n)
else:
z_star = np.random.standard_t(d, size=n)
z_star = transform(z_star)
stats_h0[k] = stat_fun(z_star, d)
if not os.path.exists(cash_dir):
os.makedirs(cash_dir)
np.save(cash_file, stats_h0)
return stats_h0
def get_p_val_one_tailed(val, h0_distribution):
p_val = np.sum(val < h0_distribution)/h0_distribution.shape[0]
return p_val
def get_p_val_two_tailed(val, h0_distribution):
upper_count = np.sum(np.abs(val) < h0_distribution)
lower_count = np.sum(h0_distribution < -np.abs(val))
p_val = (upper_count + lower_count) / h0_distribution.shape[0]
return p_val
if __name__ == '__main__':
n = 20
import pandas as pd
paper_table = pd.read_csv('release/stats/upperquartile.csv', sep=';').values
p_vals = paper_table[0, 1:]
stats = paper_table[paper_table[:, 0] == n, 1:][0]
d = None
stats_h0 = simulate_h0_distribution(n, d, transform='legendre')
levels = np.zeros_like(p_vals)
for k, p_val in enumerate(p_vals):
levels[k] = np.quantile(stats_h0, 1 - p_val)
print(' '.join(['{:.2f}'.format(p*100) for p in p_vals]))
print(' '.join(['{:.2f}'.format(level) for level in levels]))
print(' '.join(['{:.2f}'.format(level) for level in stats]))
print(' '.join(['{:.2f}'.format(level) for level in (stats-levels)/stats*100]))
|
[
"n.m.smetanin@gmail.com"
] |
n.m.smetanin@gmail.com
|
229f80c2b25ce34f5817bf9a25216175cb2e46cd
|
48983b88ebd7a81bfeba7abd6f45d6462adc0385
|
/HakerRank/algorithms/sorting/countingsort1.py
|
e135cc9ca8e5df255df2c85165a7a381e75f68d4
|
[] |
no_license
|
lozdan/oj
|
c6366f450bb6fed5afbaa5573c7091adffb4fa4f
|
79007879c5a3976da1e4713947312508adef2e89
|
refs/heads/master
| 2018-09-24T01:29:49.447076
| 2018-06-19T14:33:37
| 2018-06-19T14:33:37
| 109,335,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# author: Daniel Lozano
# source: HackerRank ( https://www.hackerrank.com )
# problem name: Algorithms: Sorting: Counting Sort 1
# problem url: https://www.hackerrank.com/challenges/countingsort1/problem
# date: 8/20/2017
n = int(input())
array = [int(i) for i in input().split()]
count = [0 for i in range(max(array)+1)]
for x in array:
count[x] +=1
print(*count)
|
[
"lozanodaniel02@gmail.com"
] |
lozanodaniel02@gmail.com
|
8a39bf68b2b2cea3c56735111181d89cb786306c
|
a742bd051641865d2e5b5d299c6bc14ddad47f22
|
/algorithm/牛客网/14-链表中倒数第k个节点.py
|
b5d4a48dcbdb55bdc68a5ec35835d1df39b8ff13
|
[] |
no_license
|
lxconfig/UbuntuCode_bak
|
fb8f9fae7c42cf6d984bf8231604ccec309fb604
|
3508e1ce089131b19603c3206aab4cf43023bb19
|
refs/heads/master
| 2023-02-03T19:10:32.001740
| 2020-12-19T07:27:57
| 2020-12-19T07:27:57
| 321,351,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
"""
输入一个链表,输出该链表中倒数第k个结点。
以空间换时间
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def FindKthToTail(self, head, k):
# write code here
'''
# 运行时间:22ms 占用内存:5856k
ret = []
while head:
ret.insert(0, head.val) # 写成head才能过
head = head.next
if k > len(ret) or k <= 0:
return
return ret[k-1]
'''
# 运行时间:30ms 占用内存:5704k
# 就像一把尺子,当把尺子的右端移动到链表末尾,尺子的左端就对应着那个值,即倒数的第k个节点
temp = head
if head == None or k <= 0:
return
while k > 1:
if temp.next:
# 先让temp走k步
temp = temp.next
k -= 1
else:
# temp已经移到链表外面,说明k不合法
return
# 之后temp和head一起走,直到temp走到末尾
while temp.next:
head = head.next
temp = temp.next
return head.val
if __name__ == "__main__":
solution = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
print(solution.FindKthToTail(a, 3))
|
[
"525868229@qq.com"
] |
525868229@qq.com
|
8350946547ae600ae9e371657261443a53ba657a
|
e5aff0646237acf3639ac805652143cd8267bf33
|
/template.py
|
828dd581ec0c8f3190ff0720f6f760fc3cb2513a
|
[
"Apache-2.0"
] |
permissive
|
HTDerekLiu/BlenderToolbox
|
42943cf9fee7277d319ff5baffe7810c4c27dfe4
|
8044e77268ff018514ad1501c291f6deb6a07ec6
|
refs/heads/master
| 2023-07-20T05:14:58.736225
| 2023-07-10T16:33:17
| 2023-07-10T16:33:17
| 162,408,776
| 408
| 48
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,529
|
py
|
import sys, os
sys.path.append(os.path.join(os.path.abspath(os.getcwd()))) # change this to your path to “path/to/BlenderToolbox/
import BlenderToolBox as bt
import os, bpy, bmesh
import numpy as np
cwd = os.getcwd()
'''
RENDER AN IMAGE STEP-BY-STEP:
1. copy "template.py" to your preferred local folder
2. In "template.py":
- change the second line to your path to the BlenderToolbox, such as "sys.path.append('path/to/BlenderToolbox/')"
- change "meshPath"
- set your desired material (select one from the demo scripts)
3. run "blender --background --python template.py" in terminal, then terminate the code when it starts rendering. This step outputs a "test.blend"
4. open "test.blend" with your blender software
5. In blender UI, adjust:
- location, rotation, scale of the mesh
- material parameters
6. In "template.py":
- type in the adjusted parameters from GUI
- set outputPath and increase imgRes_x, imgRes_y, numSamples
7. run "blender --background --python template.py" again to output your final image
'''
outputPath = os.path.join(cwd, './template.png')
## initialize blender
imgRes_x = 720 # recommend > 1080
imgRes_y = 720 # recommend > 1080
numSamples = 100 # recommend > 200
exposure = 1.5
use_GPU = True
bt.blenderInit(imgRes_x, imgRes_y, numSamples, exposure, use_GPU)
## read mesh
meshPath = './meshes/spot.ply'
location = (1.12, -0.14, 0) # (GUI: click mesh > Transform > Location)
rotation = (90, 0, 227) # (GUI: click mesh > Transform > Rotation)
scale = (1.5,1.5,1.5) # (GUI: click mesh > Transform > Scale)
mesh = bt.readMesh(meshPath, location, rotation, scale)
## set shading (uncomment one of them)
bpy.ops.object.shade_smooth() # Option1: Gouraud shading
# bpy.ops.object.shade_flat() # Option2: Flat shading
# bt.edgeNormals(mesh, angle = 10) # Option3: Edge normal shading
## subdivision
bt.subdivision(mesh, level = 1)
###########################################
## Set your material here (see other demo scripts)
# bt.colorObj(RGBA, Hue, Saturation, Value, Bright, Contrast)
RGBA = (144.0/255, 210.0/255, 236.0/255, 1)
meshColor = bt.colorObj(RGBA, 0.5, 1.0, 1.0, 0.0, 2.0)
bt.setMat_plastic(mesh, meshColor)
## End material
###########################################
## set invisible plane (shadow catcher)
bt.invisibleGround(shadowBrightness=0.9)
## set camera
## Option 1: don't change camera setting, change the mesh location above instead
camLocation = (3, 0, 2)
lookAtLocation = (0,0,0.5)
focalLength = 45 # (UI: click camera > Object Data > Focal Length)
cam = bt.setCamera(camLocation, lookAtLocation, focalLength)
## Option 2: if you really want to set camera based on the values in GUI, then
# camLocation = (3, 0, 2)
# rotation_euler = (63,0,90)
# focalLength = 45
# cam = bt.setCamera_from_UI(camLocation, rotation_euler, focalLength = 35)
## set light
## Option1: Three Point Light System
# bt.setLight_threePoints(radius=4, height=10, intensity=1700, softness=6, keyLoc='left')
## Option2: simple sun light
lightAngle = (6, -30, -155)
strength = 2
shadowSoftness = 0.3
sun = bt.setLight_sun(lightAngle, strength, shadowSoftness)
## set ambient light
bt.setLight_ambient(color=(0.1,0.1,0.1,1))
## set gray shadow to completely white with a threshold (optional but recommended)
bt.shadowThreshold(alphaThreshold = 0.05, interpolationMode = 'CARDINAL')
## save blender file so that you can adjust parameters in the UI
bpy.ops.wm.save_mainfile(filepath=os.getcwd() + '/test.blend')
## save rendering
bt.renderImage(outputPath, cam)
|
[
"hsuehtil@cs.toronto.edu"
] |
hsuehtil@cs.toronto.edu
|
3415a4ecb83d7e175dabb499de9265c8cc036262
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_appointing.py
|
26b9e8439c7ab22458130ace2febc7adeaf9fdc6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _APPOINTING():
def __init__(self,):
self.name = "APPOINTING"
self.definitions = appoint
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['appoint']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
850ec92026f401141c59fd3e0a0d2ad28d4ca8fe
|
ab464f019ed1669fa4f0fbf2a7f25e662d996948
|
/proj1/Python-Test1/morsels_proxydict2.py
|
c91c7e265c8e16301a026dd6092f7af3df73b900
|
[] |
no_license
|
maniraja1/Python
|
fed2aa746c690560d7744b1378259af1cdfa9bb0
|
c9e6e12836fed47cdba495a07f43d7599265bea1
|
refs/heads/master
| 2021-06-03T19:17:16.591303
| 2021-01-04T16:06:25
| 2021-01-04T16:06:25
| 89,430,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
class ProxyDict:
def __init__(self,data={}):
self.data=data
def __getitem__(self, item):
return self.data[item]
def __iter__(self):
for key, value in self.data.items():
yield key
def keys(self):
x = []
for key in self.data.keys():
x.append(key)
return x
def __len__(self):
return len([key for key in self.data.keys()])
def items(self):
x=[]
for key,value in self.data.items():
x.append((key, value))
return x
def values(self):
x = []
for value in self.data.values():
x.append(value)
return x
def get(self, key, default=None):
return {self.data.get(key,default)}
def __repr__(self):
return f"ProxyDict({self.data})"
def __eq__(self, other):
if isinstance(other, ProxyDict):
return True if self.data == other.data else False
elif isinstance(other, dict):
return True if self.data == other else False
else:
return False
user_data = {'name': 'Trey Hunner', 'active': False}
proxy_data = ProxyDict(user_data)
print(proxy_data.keys())
print(set(proxy_data.keys()))
print(proxy_data['name'])
print(proxy_data['active'])
user_data['active'] = True
print(proxy_data['active'])
print(len(proxy_data))
print(proxy_data.items())
print(proxy_data.values())
print(proxy_data.get('name'))
print(proxy_data.get('shoe_size', 0))
print(proxy_data.get('d'))
for key in proxy_data:
print(key)
print(proxy_data)
p1 = ProxyDict(user_data)
p2 = ProxyDict(user_data.copy())
print(p1==p2)
print(p2 == user_data)
if None == None:
print(True)
else:
print(False)
|
[
"mrajagopal@mrajagopal-ltm.internal.salesforce.com"
] |
mrajagopal@mrajagopal-ltm.internal.salesforce.com
|
5c8633a022c983ef715a29fc3751eecef317daca
|
0e47f4e7765938324cee0186c4ba0bf257507b0b
|
/docs/source/conf.py
|
51cc86e7396589f9260c1bc7af7ae2d7111a6bbe
|
[
"BSD-3-Clause"
] |
permissive
|
JarryShaw/lorem.js
|
fcd9cceb6ff9d21a273a995dd36ad9a1fd4644c1
|
b2e8e163e065b0b5d2a367f564a3a8ac756104ca
|
refs/heads/master
| 2023-08-03T05:49:24.006149
| 2023-07-30T01:58:22
| 2023-07-30T01:58:22
| 213,123,617
| 0
| 0
|
BSD-3-Clause
| 2023-07-30T01:58:24
| 2019-10-06T07:09:14
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'lorem.js'
copyright = '2020, Jarry Shaw'
author = 'Jarry Shaw'
# The full version, including alpha/beta/rc tags
release = '0.1.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_js',
]
primary_domain = 'js'
js_language = 'typescript'
js_source_path = '../../ts/'
# jsdoc_config_path = 'conf.json'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'JarryShaw',
'github_repo': 'lorem.js',
'github_banner': True,
'github_type': 'star',
#'show_related': False,
#'note_bg': '#FFF59C',
#'travis_button': True,
#'codecov_button': True,
}
|
[
"jarryshaw@icloud.com"
] |
jarryshaw@icloud.com
|
97286bf0248e398684ddbc5cbc43952b3ebf61b4
|
187f114edca30e0fec49cdaee873bbe614295442
|
/docs/source/conf.py
|
802726555ddced6fbbd0320aa0d3c743866fc976
|
[
"MIT"
] |
permissive
|
scorphus/pvsim
|
c5b3f4535d8c404cf1a4fbe9c731c5dee7fc8251
|
778349147245c754e37ab7d44d385b03780105ac
|
refs/heads/master
| 2022-10-24T02:12:28.331528
| 2020-06-08T17:09:49
| 2020-06-08T17:09:49
| 109,788,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,807
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of pvsim.
# https://github.com/scorphus/pvism
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2017, Pablo Santiago Blum de Aguiar <pablo.aguiar@gmail.com>
# PV Simulator Challenge documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 3 21:36:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# import os
import sys
sys.path.append('..')
sys.path.append('../..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PV Simulator Challenge'
copyright = '2017, Pablo Santiago Blum de Aguiar'
author = 'Pablo Santiago Blum de Aguiar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PVSimulatorChallengedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PVSimulatorChallenge.tex', 'PV Simulator Challenge Documentation',
'Pablo Santiago Blum de Aguiar', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pvsimulatorchallenge', 'PV Simulator Challenge Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PVSimulatorChallenge', 'PV Simulator Challenge Documentation',
author, 'PVSimulatorChallenge', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[
"scorphus@gmail.com"
] |
scorphus@gmail.com
|
c7eaf6bea58237cdccc33d21a51b534bf54ca155
|
d1b59545c498a1188b84a874324522385dbadfe4
|
/A01/q1/q2.py
|
2ff0ac09805da363d938afc94d3b1439a1e4c0be
|
[] |
no_license
|
khat3680/basic_python-104
|
570c64daf6a89c5696966c75a9901b0d15eb758e
|
c3a79e08cb5fe6dcba5130c295f395c6130d559b
|
refs/heads/master
| 2022-12-09T21:32:27.068654
| 2020-09-11T17:08:47
| 2020-09-11T17:08:47
| 294,756,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
"""
------------------------------------------------------------------------
Assignment 1 Question 2
C
------------------------------------------------------------------------
Author: Anshul Khatri
ID: 193313680
Email: khat3680@mylaurier.ca
__updated__ = "2019-09-14"
------------------------------------------------------------------------
"""
print("hello , welcome to new innings of life")
varig =12
varig0 =10
print( " result", varig/varig0)
outtput =0
outtput += varig0
print (outtput)
|
[
"anshulskhatri@gmail.com"
] |
anshulskhatri@gmail.com
|
149137c48aaf71c39a2d48bd44e95b892c37bca9
|
627094b5e463bd113f626450eaceb01dfa4ff5d5
|
/udsoncan/services/ReadDataByPeriodicIdentifier.py
|
d943ab4a2cdd1f4a238f9e91cbb7fec3ea179296
|
[
"MIT"
] |
permissive
|
DaleChen0351/python-udsoncan
|
49eefcb299e2a4fabe0bf168905cc86ef43d6f62
|
c495e872c69c4ea05e3b477d2a1088cb83167a17
|
refs/heads/master
| 2020-04-20T06:10:25.252315
| 2019-03-28T07:38:17
| 2019-03-28T07:38:17
| 168,675,483
| 0
| 0
|
MIT
| 2019-03-28T07:38:19
| 2019-02-01T09:42:02
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
class ReadDataByPeriodicIdentifier(BaseService):
_sid = 0x2A
supported_negative_response = [ Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestOutOfRange,
Response.Code.SecurityAccessDenied
]
@classmethod
def make_request(cls):
raise NotImplementedError('Service is not implemented')
@classmethod
def interpret_response(cls, response):
raise NotImplementedError('Service is not implemented')
class ResponseData(BaseResponseData):
def __init__(self):
super().__init__(ReadDataByPeriodicIdentifier)
|
[
"py.lessard@gmail.com"
] |
py.lessard@gmail.com
|
bad5caea1aca73bb22c458643376be763d78501b
|
7d4e3f36fb1bac247599510820c0f537417f99e4
|
/jnpy/__init__.py
|
27623371e04ec7b6314a18697122d86eded15828
|
[
"MIT"
] |
permissive
|
jojoquant/jonpy
|
562065ea74ac1038c36d3993f43e71cc39a799a7
|
c874060af4b129ae09cee9f8542517b7b2f6573b
|
refs/heads/master
| 2022-09-08T07:15:37.051279
| 2022-03-18T06:41:22
| 2022-03-18T06:41:22
| 244,432,056
| 7
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2020/1/22 上午1:40
# @Author : Fangyang
# @Software : PyCharm
if __name__ == '__main__':
pass
|
[
"fangyang.jing@hotmail.com"
] |
fangyang.jing@hotmail.com
|
17c62b25766d04711a43abe1a6664544e0969e56
|
82080ef68a203f141ab6435c32fdcc79351b448e
|
/web_dynamic/3-hbnb.py
|
f2591110ea9142633bb3c28838dee8b09cc5a20f
|
[
"MIT"
] |
permissive
|
dalexach/AirBnB_clone_v4
|
60291a8d10d58f75d707fdc4a0c11095e4c36dc5
|
04e1db0691cbe5cefb6a5f42f7e008e8ba24d5d6
|
refs/heads/master
| 2023-02-08T02:26:40.018351
| 2020-02-18T20:14:16
| 2020-02-18T20:14:16
| 240,527,417
| 0
| 2
|
MIT
| 2023-02-02T03:27:32
| 2020-02-14T14:32:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
from uuid import uuid4
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/3-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid4()
return render_template('3-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
|
[
"diahancaroll@hotmail.com"
] |
diahancaroll@hotmail.com
|
04eef729215eff975b7ee7bd345bbf3cd621784c
|
f5f87fabe766e69c7a003d070b6447ef5a45c603
|
/stylelens_search/models/__init__.py
|
6c599ebfcc38fd644fb7183bd6deae699ba00867
|
[] |
no_license
|
BlueLens/stylelens-search
|
4a4f17f876e1781a6fee5663bfa62f83f657d7db
|
7c21d59d49915688777714fb896d65a101dab28a
|
refs/heads/master
| 2021-07-15T08:55:45.256052
| 2017-10-21T15:44:35
| 2017-10-21T15:44:35
| 107,691,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# coding: utf-8
"""
stylelens-search
This is a API document for Image search on fashion items\"
OpenAPI spec version: 0.0.1
Contact: devops@bluehack.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .image import Image
from .image_search_response import ImageSearchResponse
from .image_search_response_data import ImageSearchResponseData
from .images_array import ImagesArray
|
[
"master@bluehack.net"
] |
master@bluehack.net
|
ef03dc7074da5007a1486441d0229d4e4db99142
|
e0e9b1446ccaccdd7332b2f916e737cdaced8a8d
|
/.scripts/submit.py
|
6ce7eff369601b1752b5709ac46a92bb65b5cbf9
|
[] |
no_license
|
marygmccann/cse-34872-su20-assignments
|
3afd51f8cfc7c0262b7937d113755baf49cbd20e
|
c2c2f21da1abd86e646c16ea86f21702efb34d68
|
refs/heads/master
| 2022-08-24T10:32:31.545059
| 2020-05-19T21:03:43
| 2020-05-19T21:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,252
|
py
|
#!/usr/bin/env python3
import glob
import json
import os
import sys
import requests
import yaml
# Globals
ASSIGNMENTS = {}
DREDD_QUIZ_URL = 'https://dredd.h4x0r.space/quiz/cse-34872-su20/'
DREDD_QUIZ_MAX = 2.0
if bool(os.environ.get('DEBUG', False)):
DREDD_CODE_URL = 'https://dredd.h4x0r.space/debug/cse-34872-su20/'
else:
DREDD_CODE_URL = 'https://dredd.h4x0r.space/code/cse-34872-su20/'
DREDD_CODE_MAX = 6.0
# Utilities
def add_assignment(assignment, path=None):
if path is None:
path = assignment
if assignment.startswith('reading') or assignment.startswith('challenge'):
ASSIGNMENTS[assignment] = path
def print_results(results):
for key, value in sorted(results):
try:
print('{:>8} {:.2f}'.format(key.title(), value))
except ValueError:
if key in ('stdout', 'diff'):
print('{:>8}\n{}'.format(key.title(), value))
else:
print('{:>8} {}'.format(key.title(), value))
# Submit Functions
def submit_quiz(assignment, path):
answers = None
for mod_load, ext in ((json.load, 'json'), (yaml.safe_load, 'yaml')):
try:
answers = mod_load(open(os.path.join(path, 'answers.' + ext)))
except IOError as e:
pass
except Exception as e:
print('Unable to parse answers.{}: {}'.format(ext, e))
return 1
if answers is None:
print('No quiz found (answers.{json,yaml})')
return 1
print('\nSubmitting {} quiz ...'.format(assignment))
response = requests.post(DREDD_QUIZ_URL + assignment, data=json.dumps(answers))
print_results(response.json().items())
return 0 if response.json().get('score', 0) >= DREDD_QUIZ_MAX else 1
def submit_code(assignment, path):
sources = glob.glob(os.path.join(path, 'program.*'))
if not sources:
print('No code found (program.*)')
return 1
result = 1
for source in sources:
print('\nSubmitting {} {} ...'.format(assignment, os.path.basename(source)))
response = requests.post(DREDD_CODE_URL + assignment, files={'source': open(source)})
print_results(response.json().items())
result = min(result, 0 if response.json().get('score', 0) >= DREDD_CODE_MAX else 1)
return result
# Main Execution
# Add GitLab/GitHub branch
for variable in ['CI_BUILD_REF_NAME', 'GITHUB_HEAD_REF']:
try:
add_assignment(os.environ[variable])
except KeyError:
pass
# Add local git branch
try:
add_assignment(os.popen('git symbolic-ref -q --short HEAD 2> /dev/null').read().strip())
except OSError:
pass
# Add current directory
add_assignment(os.path.basename(os.path.abspath(os.curdir)), os.curdir)
# For each assignment, submit quiz answers and program code
if not ASSIGNMENTS:
print('Nothing to submit!')
sys.exit(1)
exit_code = 0
for assignment, path in sorted(ASSIGNMENTS.items()):
print('Submitting {} assignment ...'.format(assignment))
if 'reading' in assignment:
exit_code += submit_quiz(assignment, path)
elif 'challenge' in assignment:
exit_code += submit_code(assignment, path)
sys.exit(exit_code)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
[
"pbui@nd.edu"
] |
pbui@nd.edu
|
6407d8cd05af0356ac59c26e791b779813da547d
|
ef1f62cf4e53f856bf763ac0dee73f054518530d
|
/Week_07/208.Implement_Trie.py
|
00f27ebe6bcf165fca9b12eb1cd01c797725850f
|
[] |
no_license
|
ZHHJemotion/algorithm008-class01
|
3338af3619d8e1754a62af6a852f517b47298d95
|
5bb7d2b74110df0b5788b94c69582552d711563a
|
refs/heads/master
| 2022-11-12T09:26:24.941738
| 2020-06-30T15:29:20
| 2020-06-30T15:29:20
| 255,102,230
| 0
| 0
| null | 2020-04-12T14:39:17
| 2020-04-12T14:39:17
| null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
# Implement a trie with insert, search, and startsWith methods.
#
# Example:
#
#
# Trie trie = new Trie();
#
# trie.insert("apple");
# trie.search("apple"); // returns true
# trie.search("app"); // returns false
# trie.startsWith("app"); // returns true
# trie.insert("app");
# trie.search("app"); // returns true
#
#
# Note:
#
#
# You may assume that all inputs are consist of lowercase letters a-z.
# All inputs are guaranteed to be non-empty strings.
#
# Related Topics Design Trie
# leetcode submit region begin(Prohibit modification and deletion)
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = {}
self.end_of_word = '#'
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
node = self.root
for char in word:
node = node.setdefault(char, {})
node[self.end_of_word] = self.end_of_word
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
node = self.root
for char in word:
if char not in node:
return False
node = node[char]
return self.end_of_word in node
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
node = self.root
for char in prefix:
if char not in node:
return False
node = node[char]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
# leetcode submit region end(Prohibit modification and deletion)
|
[
"zhhjemotion@hotmail.com"
] |
zhhjemotion@hotmail.com
|
a573ee26fbe78730cc20595670f4408e417d3492
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/GLES1/OES/single_precision.py
|
d67e1d503c813791b2ce19f41892ef8db20a7272
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
'''OpenGL extension OES.single_precision
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.single_precision to provide a more
Python-friendly API
Overview (from the spec)
This extension adds commands with single-precision floating-point
parameters corresponding to the commands that only variants that
accept double-precision floating-point input. This allows an
application to avoid using double-precision floating-point
data types. New commands are added with an 'f' prefix.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/single_precision.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.single_precision import *
from OpenGL.raw.GLES1.OES.single_precision import _EXTENSION_NAME
def glInitSinglePrecisionOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glClipPlanefOES=wrapper.wrapper(glClipPlanefOES).setInputArraySize(
'equation', 4
)
glGetClipPlanefOES=wrapper.wrapper(glGetClipPlanefOES).setOutput(
'equation',size=(4,),orPassIn=True
)
### END AUTOGENERATED SECTION
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
2f9a53d49fd4ed9c71a2cd957ff8bd6d59d9d5d0
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/web-dev-notes-resource-site/2-content/Data-Structures/1-Python/maths/gcd.py
|
51d2711d3abfc8b69402ae7ee013a4a7b2f630df
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
def gcd(a, b):
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
"""Computes the lowest common multiple of integers a and b."""
return a * b / gcd(a, b)
"""
Given a positive integer x, computes the number of trailing zero of x.
Example
Input : 34(100010)
~~~~~^
Output : 1
Input : 40(101000)
~~~^^^
Output : 3
"""
def trailing_zero(x):
cnt = 0
while x and not x & 1:
cnt += 1
x >>= 1
return cnt
"""
Given two non-negative integer a and b,
computes the greatest common divisor of a and b using bitwise operator.
"""
def gcd_bit(a, b):
tza = trailing_zero(a)
tzb = trailing_zero(b)
a >>= tza
b >>= tzb
while b:
if a < b:
a, b = b, a
a -= b
a >>= trailing_zero(a)
return a << min(tza, tzb)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
1d996f9d9e66d2a64deb825439e2acd7feef60e3
|
28a9cc19537f7264421afeb9883962aa480c2616
|
/deals/migrations/0002_product_data_category_id.py
|
cc5cb13b2afca538cacf42cdf415cf940e913c68
|
[] |
no_license
|
ujjwalagrawal17/BrokerAppBackend
|
b33df886b389aabfcfe7278c3e41c99d13d4fbb3
|
1b8ffd18e4c5257d222c17b8aece3351b549b204
|
refs/heads/master
| 2021-01-22T21:23:18.807792
| 2017-03-18T19:06:44
| 2017-03-18T19:06:44
| 85,425,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-29 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product_data',
name='category_id',
field=models.IntegerField(default=0),
),
]
|
[
"ujjwal.iitism@gmail.com"
] |
ujjwal.iitism@gmail.com
|
4340e9b7a8286e8c59be954b02524b64f84cd8d3
|
c36aae30fad06ea64dd5d158d364c18462a70799
|
/website/books/models.py
|
ea80d86c5c777f2270f02934900264f101096070
|
[] |
no_license
|
srikanthpragada/building_web_applications_using_django_framework
|
60222ca503c97aa375726d4496db9e36a72ebb4b
|
920780593d6324414d0c727a9bce2db171614350
|
refs/heads/master
| 2023-01-03T12:31:10.851750
| 2020-10-31T15:49:10
| 2020-10-31T15:49:10
| 308,915,547
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=30, unique=True, null=False)
author = models.CharField(max_length=30, null=False)
price = models.IntegerField(null=False)
def __str__(self):
return f"{self.id} - {self.title} - {self.author} - {self.price}"
class Meta:
db_table = 'books'
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
8fb78e1b6dc83ef71a9e50e3592e4b6d439b160e
|
b77a36eb7c2214151feccca0c4894381147d6051
|
/movie/migrations/0009_alter_movie_movie_rating.py
|
2d33a2519713b24a769d3ba4f64c82b9413b674c
|
[] |
no_license
|
mnk-q/watchlist
|
b15536bb8a2a81f1935bb08a25b04dc27953f896
|
3ac14b1ba48a2bd7aae84e7a3c64cf60fedd6f17
|
refs/heads/master
| 2023-06-04T21:14:51.235899
| 2021-06-25T17:41:37
| 2021-06-25T17:41:37
| 349,739,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.2.3 on 2021-06-17 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0008_alter_movie_movie_studio'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='movie_rating',
field=models.CharField(max_length=6),
),
]
|
[
"="
] |
=
|
305d5f76e5fb3052bcd63b76465b8fe2c3e33e0b
|
92e26b93057723148ecb8ca88cd6ad755f2e70f1
|
/SE/BottleNeck/r40_SE/network.py
|
bba54ad210394d7c821cc9509f84a78419bb598a
|
[] |
no_license
|
lyuyanyii/CIFAR
|
5906ad9fbe1377edf5b055098709528e06b5ace2
|
d798834942d6a9d4e3295cda77488083c1763962
|
refs/heads/master
| 2021-08-30T20:09:52.819883
| 2017-12-19T08:37:37
| 2017-12-19T08:37:37
| 112,701,370
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
import numpy as np
from megskull.network import Network
from megskull.opr.all import (
Conv2D, Pooling2D, FullyConnected, Softmax,
CrossEntropyLoss, Dropout, ElementwiseAffine
)
from megskull.opr.helper.elemwise_trans import ReLU, Identity, Sigmoid
from megskull.graph.query import GroupNode
from megskull.opr.netsrc import DataProvider
import megskull.opr.helper.param_init as pinit
from megskull.opr.helper.param_init import AutoGaussianParamInitializer as G
from megskull.opr.helper.param_init import ConstantParamInitializer as C
from megskull.opr.regularizer import BatchNormalization as BN
import megskull.opr.arith as arith
from megskull.network import NetworkVisitor
global idx
idx = 0
def conv_bn(inp, ker_shape, stride, padding, out_chl, isrelu):
global idx
idx += 1
l1 = Conv2D(
"conv{}".format(idx), inp, kernel_shape = ker_shape, stride = stride, padding = padding,
output_nr_channel = out_chl,
#W = G(mean = 0, std = ((1) / (ker_shape**2 * inp.partial_shape[1]))**0.5),
#b = C(0),
nonlinearity = Identity()
)
l2 = BN("bn{}".format(idx), l1, eps = 1e-9)
l2 = ElementwiseAffine("bnaff{}".format(idx), l2, shared_in_channels = False, k = C(1), b = C(0))
if isrelu:
l2 = arith.ReLU(l2)
return l2
def res_layer(inp, chl, stride = 1, proj = False):
pre = inp
inp = conv_bn(inp, 1, stride, 0, chl // 4, True)
inp = conv_bn(inp, 3, 1, 1, chl // 4, True)
inp = conv_bn(inp, 1, 1, 0, chl, False)
if proj:
pre = conv_bn(pre, 1, stride, 0, chl, False)
name = inp.name
#Global Average Pooling
SE = inp.mean(axis = 3).mean(axis = 2)
#fc0
SE = FullyConnected(
"fc0({})".format(name), SE, output_dim = SE.partial_shape[1],
nonlinearity = ReLU()
)
#fc1
SE = FullyConnected(
"fc1({})".format(name), SE, output_dim = SE.partial_shape[1],
nonlinearity = Sigmoid()
)
inp = inp * SE.dimshuffle(0, 1, 'x', 'x')
inp = arith.ReLU(inp + pre)
return inp
def res_block(inp, chl, i, n):
stride = 2
if i == 0:
stride = 1
inp = res_layer(inp, chl, stride = stride, proj = True)
for i in range(n - 1):
inp = res_layer(inp, chl)
return inp
def make_network(minibatch_size = 128, debug = False):
patch_size = 32
inp = DataProvider("data", shape = (minibatch_size, 3, patch_size, patch_size), dtype = np.float32)
label = DataProvider("label", shape = (minibatch_size, ), dtype = np.int32)
lay = conv_bn(inp, 3, 1, 1, 16, True)
n = 4
lis = [16 * 4, 32 * 4, 64 * 4]
for i in range(len(lis)):
lay = res_block(lay, lis[i], i, n)
#global average pooling
#feature = lay.mean(axis = 2).mean(axis = 2)
feature = Pooling2D("pooling", lay, window = 8, stride = 8, padding = 0, mode = "AVERAGE")
pred = Softmax("pred", FullyConnected(
"fc0", feature, output_dim = 10,
#W = G(mean = 0, std = (1 / 64)**0.5),
#b = C(0),
nonlinearity = Identity()
))
network = Network(outputs = [pred])
network.loss_var = CrossEntropyLoss(pred, label)
if debug:
visitor = NetworkVisitor(network.loss_var)
for i in visitor.all_oprs:
print(i)
print(i.partial_shape)
print("input = ", i.inputs)
print("output = ", i.outputs)
print()
return network
if __name__ == "__main__":
make_network(debug = True)
|
[
"315603442@qq.com"
] |
315603442@qq.com
|
13d7260187bc3dedef4fe80405688fa7a830b32e
|
2c4ba5a56b7a3d3e1c286b678eb8068f51c23046
|
/week3/2-Resolve-with-Functions/solutions/twin_primes.py
|
ff03fddc715021dd92dde7664a6c768f15866bd7
|
[] |
no_license
|
OgnyanPenkov/Programming0-1
|
3b69757bd803814585d77479fc987a0ee92d0390
|
8078f316ea2b81216c21cf78e7cf1afc17f54846
|
refs/heads/master
| 2021-01-21T15:12:20.814368
| 2015-10-07T18:16:39
| 2015-10-07T18:16:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
def is_prime(n):
if n <= 1:
return False
start = 2
is_prime = True
while start < n:
if n % start == 0:
is_prime = False
break
start += 1
return is_prime
p = input("Enter number: ")
p = int(p)
q = p - 2
r = p + 2
is_p_prime = is_prime(p)
is_q_prime = is_prime(q)
is_r_prime = is_prime(r)
if is_p_prime and (not is_q_prime) and (not is_r_prime):
print(str(p) + " is prime")
print("But " + str(q) + " and " + str(r) + " are not.")
elif is_p_prime:
if is_q_prime:
print(q, p)
if is_r_prime:
print(p, r)
else:
print(str(p) + " is not prime.")
|
[
"radorado@hackbulgaria.com"
] |
radorado@hackbulgaria.com
|
f250d33cc3036d938d84a63612f7bddeef67203a
|
fe19d2fac4580d463132e61509bd6e3cc2cf958d
|
/toontown/toon/PlayerDetailPanel.py
|
7fab34d34832d1ccb0846ed7cb013f3fb958085d
|
[] |
no_license
|
t00nt0wn1dk/c0d3
|
3e6db6dd42c3aa36ad77709cf9016176a3f3a44f
|
7de105d7f3de0f8704b020e32fd063ee2fad8d0d
|
refs/heads/master
| 2021-01-01T16:00:15.367822
| 2015-03-21T21:25:52
| 2015-03-21T21:25:55
| 32,647,654
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,657
|
py
|
# 2013.08.22 22:26:15 Pacific Daylight Time
# Embedded file name: toontown.toon.PlayerDetailPanel
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
import DistributedToon
from toontown.friends import FriendInviter
import ToonTeleportPanel
from toontown.toonbase import TTLocalizer
from toontown.hood import ZoneUtil
from toontown.toonbase.ToontownBattleGlobals import Tracks, Levels
globalAvatarDetail = None
def showPlayerDetail(avId, avName, playerId = None):
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
globalAvatarDetail = PlayerDetailPanel(avId, avName, playerId)
return
def hidePlayerDetail():
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
return
def unloadPlayerDetail():
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
return
class PlayerDetailPanel(DirectFrame):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('ToonAvatarDetailPanel')
def __init__(self, avId, avName, playerId = None, parent = aspect2dp, **kw):
self.playerId = playerId
self.isPlayer = 0
self.playerInfo = None
if playerId:
self.isPlayer = 1
if base.cr.playerFriendsManager.playerId2Info.has_key(playerId):
self.playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if not self.playerInfo.onlineYesNo:
avId = None
else:
avId = None
self.avId = avId
self.avName = avName
self.avatar = None
self.createdAvatar = None
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
detailPanel = gui.find('**/avatarInfoPanel')
textScale = 0.132
textWrap = 10.4
if self.playerId:
textScale = 0.1
textWrap = 18.0
optiondefs = (('pos', (0.525, 0.0, 0.525), None),
('scale', 0.5, None),
('relief', None, None),
('image', detailPanel, None),
('image_color', GlobalDialogColor, None),
('text', '', None),
('text_wordwrap', textWrap, None),
('text_scale', textScale, None),
('text_pos', (-0.125, 0.75), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent)
self.dataText = DirectLabel(self, text='', text_scale=0.085, text_align=TextNode.ALeft, text_wordwrap=15, relief=None, pos=(-0.85, 0.0, 0.725))
if self.avId:
self.avText = DirectLabel(self, text=TTLocalizer.PlayerToonName % {'toonname': self.avName}, text_scale=0.09, text_align=TextNode.ALeft, text_wordwrap=15, relief=None, pos=(-0.85, 0.0, 0.56))
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.gotoToonButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=1.15, text=TTLocalizer.PlayerShowToon, text_scale=0.08, text_pos=(0.0, -0.02), textMayChange=0, pos=(0.43, 0, 0.415), command=self.__showToon)
ToonTeleportPanel.hideTeleportPanel()
FriendInviter.hideFriendInviter()
self.bCancel = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.AvatarDetailPanelCancel, text_scale=0.05, text_pos=(0.12, -0.01), pos=(-0.865, 0.0, -0.765), scale=2.0, command=self.__handleCancel)
self.bCancel.show()
self.initialiseoptions(PlayerDetailPanel)
self.__showData()
buttons.removeNode()
gui.removeNode()
return
def cleanup(self):
if self.createdAvatar:
self.avatar.delete()
self.createdAvatar = None
self.destroy()
return
def __handleCancel(self):
unloadPlayerDetail()
def __showData(self):
if self.isPlayer and self.playerInfo:
if self.playerInfo.onlineYesNo:
someworld = self.playerInfo.location
else:
someworld = TTLocalizer.OfflineLocation
text = TTLocalizer.AvatarDetailPanelPlayer % {'player': self.playerInfo.playerName,
'world': someworld}
else:
text = TTLocalizer.AvatarDetailPanelOffline
self.dataText['text'] = text
def __showToon(self):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(self.avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(self.avId)
if handle != None:
self.notify.info("Clicked on name in friend's list. doId = %s" % handle.doId)
messenger.send('clickedNametagPlayer', [handle, self.playerId, 0])
return
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\toon\PlayerDetailPanel.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:26:15 Pacific Daylight Time
|
[
"anonymoustoontown@gmail.com"
] |
anonymoustoontown@gmail.com
|
94c01ee844d433e79abd65caad5625c1412139c1
|
ec31d26a8b619ec98dc5c586f525420572cc39a1
|
/scripts/map_timings.py
|
c76a6e265894314d3ec6f5f64a7eeab8b4fa9405
|
[
"MIT"
] |
permissive
|
stestagg/timing
|
b0fab6ff8814e5edeeaa98b7bcd470aa9e527742
|
57f40229616b2dc385afc447f3f587940158c3b4
|
refs/heads/master
| 2021-06-25T19:20:17.158308
| 2017-09-11T22:26:23
| 2017-09-11T22:26:23
| 103,174,114
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import sys
from collections import namedtuple
from pprint import pformat
import yaml
from jinja2 import Environment, FileSystemLoader, select_autoescape
Output = namedtuple("Output", ["cpu", "human"])
def unit_sort(x):
raw = 1 - x[0]
if raw < 0:
return abs(raw) * 0.001
return raw
def render(**kw):
env = Environment(loader=FileSystemLoader('./'), autoescape=select_autoescape(['html']))
template = env.get_template('template.html')
return template.render(**kw)
def main():
units = yaml.load(open("../data/units.yaml"))
actions = yaml.load(open("../data/actions.yaml"))
timings = yaml.load(open("../data/timings.yaml"))
def to_secs(value, unit):
return value * units[unit]
for action in actions:
action['actual_min'] = to_secs(action['min'], action['units'])
action['actual_max'] = to_secs(action['max'], action['units'])
def to_unit_val(value):
scaled = [(value / num_secs, name) for name, num_secs in units.items() if name != "cycle"]
return sorted(scaled, key=unit_sort)[0]
def best_action(min_val, max_val):
for action in actions:
if action['actual_min'] < max_val and action['actual_max'] > min_val:
return action
blink_scale = to_secs(0.25, 'cycle') / 0.1
outputs = []
for i, timing in enumerate(timings, 2):
actual_min = to_secs(timing['min'], timing['units'])
actual_max = to_secs(timing['max'], timing['units'])
blink_min = actual_min / blink_scale
blink_max = actual_max / blink_scale
unit_min = to_unit_val(actual_min)
unit_max = to_unit_val(actual_max)
timing['unit_min'] = "%.1f %s" % unit_min
timing['unit_max'] = "%.1f %s" % unit_max
best = best_action(blink_min, blink_max)
if best is None:
sys.stderr.write(f'{pformat(timing)} - {to_unit_val(blink_min)}\n')
else:
outputs.append(Output(timing, best))
print(render(timings=outputs, enumerate=enumerate))
if __name__ == '__main__':
sys.exit(main())
|
[
"stestagg@gmail.com"
] |
stestagg@gmail.com
|
5da5b673a32056f2d144a312b0032ee8c2690cc3
|
3faffa899187e9244c7f3bccc02bf451e496637e
|
/python/chapter 1/1-1.py
|
ef166815faf24e009175ecdbc670c22723e9f8a0
|
[] |
no_license
|
WuLC/Beauty_OF_Programming
|
909aa6e3dff3dc090da6a3f375aec15222b14f6a
|
63accae07afbbece292a1115ce37b44e03839e31
|
refs/heads/master
| 2020-04-06T04:12:15.573389
| 2016-11-21T03:42:24
| 2016-11-21T03:42:24
| 58,548,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
# -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2016-10-16 12:27:57
# @Last modified by: WuLC
# @Last Modified time: 2016-10-16 20:35:05
# @Email: liangchaowu5@gmail.com
########################################
# take up certain usage of a cpu cor
#########################################
import time
import psutil
# for CPU with n cores, a dead loop can take up(100/n)% of CPU usage
def take_up_a_core():
i = 0
while True:
i += 1
# 260000 loops will take up about 50% of a CPU of 2.4GHz
# without system call, just caculate the number of loops based on the frequency of CPU and number of instructions of statement
# pay attention the instructions of for statement in python is different from that in C, and CPython is used in this test
def take_up_half():
while True:
for i in xrange(260000):
pass
time.sleep(0.01)
# take up certain percent with psutil, only apply to single-core CPU, default 50%
def take_up_certain_percent(percent = 50):
i = 0
while True:
while (psutil.cpu_percent() > percent):
time.sleep(0.01)
i += 1
if __name__ == '__main__':
take_up_certain_percent()
|
[
"liangchaowu5@gmail.com"
] |
liangchaowu5@gmail.com
|
7bf7fd3eaeee8d2f8d11255b72f453b53bd61041
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/P.O.R.-master/pirates/npc/DistributedGhostAI.py
|
8b7ab4804bd8b8e385c65685fd59e4d316aa705e
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
from direct.distributed import DistributedObjectAI
class DistributedGhostAI(DistributedObjectAI.DistributedObjectAI):
def ___init___(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
def announceGenerate(self):
DistributedObjectAI.DistributedObjectAI.announceGenerate(self)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
def delete(self):
DistributedObjectAI.DistributedObjectAI.delete(self)
def disable(self):
DistributedObjectAI.DistributedObjectAI.disable(self)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
72c70e99db2ead16f2ef5be4abc6008f77fad04c
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0044. Wildcard Matching.py
|
9df5bb742fe54fee40a24b8f728f83976693a948
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812
| 2021-12-28T12:28:28
| 2021-12-28T12:28:28
| 182,653,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
class Solution:
def isMatch(self, s: str, p: str) -> bool:
l1 = len(s)
l2 = len(p)
dp = [[False]*(l2+1) for i in range(l1+1)]
dp[0][0] = True
for j in range(1,l2+1):
if(p[j-1]=='*'):
dp[0][j] = dp[0][j-1]
for i in range(1,l1+1):
for j in range(1,l2+1):
if(s[i-1]==p[j-1] or p[j-1]=='?'):
dp[i][j] = dp[i-1][j-1]
elif(p[j-1]=='*'):
dp[i][j] = dp[i-1][j-1] or dp[i][j-1] or dp[i-1][j]
return dp[-1][-1]
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
4e28dafd47b60ac34e28b715db2cbfcf5fefbdb2
|
c08b5edb5075e7840e716b0a09006dae0a4d05ac
|
/.history/Missions_to_Mars/scrape_mars_20200808232435.py
|
3a588df03c13e75f15c5b6cfa084daff2ad77809
|
[] |
no_license
|
OlgaDlzk/web-scraping-challenge-1
|
06f915eb76c55c9bc37889017dd9af81122dc1a5
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
refs/heads/master
| 2023-03-18T00:58:37.928024
| 2020-09-22T20:32:47
| 2020-09-22T20:32:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
# Dependencies
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
# Initialize browser
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
#executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
mars_weather = soup.find(text=re.compile(
'p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text.strip()
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
|
[
"ermiasgelaye@gmail.com"
] |
ermiasgelaye@gmail.com
|
7e48b766b0f55a68c8fea240590cb3cbe02c5a0d
|
77772edccbdb5fe07229358a48471cfeca395893
|
/restau/urls.py
|
23b136010aa6519649d9415cbe92f9971b8637d7
|
[] |
no_license
|
ShreySumariya07/Restaurant-Drf
|
a4b08b2522da37ab88e807cb42978b014dce639a
|
a06fba134a50b1803d1ce59eeb5a3c4e7c1a9528
|
refs/heads/master
| 2023-06-22T21:55:09.949692
| 2021-01-29T13:45:25
| 2021-01-29T13:45:25
| 334,158,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('restaurants/', views.Restaurants.as_view()),
path('restaurants/<str:restaurant_id>/', views.RestaurantDetail.as_view()),
path('restaurants/<str:restaurant_id>/recipes/', views.Recipes.as_view()),
path('restaurants/<str:restaurant_id>/recipes/<str:recipe_id>/', views.RecipeDetail.as_view()),
]
|
[
"you@example.com"
] |
you@example.com
|
2c29dde493d22062c4ad341523e1cddfa11d7c80
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/object_method_complex_call-153.py
|
384c6c28ad4f88a1960d2add9439c0e2ccbf5dda
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
class A(object):
a:int = 42
def foo(self:"A", ignore:object) -> int:
return self.a
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
def bar(self:"B") -> int:
return self.foo(self.foo(print("...")))
def foo(self:"B", ignore:object) -> int:
return 1
print($Exp())
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
3100bf04aa4c403c82b611c69b52c10ec9c06173
|
4be5c172c84e04c35677f5a327ab0ba592849676
|
/python/interviewbit/strings/min_chars_to_make_palindrome/min_chars_to_make_palindrome.py
|
a703d91003b0bbc9c59a79a4de9d67fc79ac951e
|
[] |
no_license
|
niranjan-nagaraju/Development
|
3a16b547b030182867b7a44ac96a878c14058016
|
d193ae12863971ac48a5ec9c0b35bfdf53b473b5
|
refs/heads/master
| 2023-04-06T20:42:57.882882
| 2023-03-31T18:38:40
| 2023-03-31T18:38:40
| 889,620
| 9
| 2
| null | 2019-05-27T17:00:29
| 2010-09-05T15:58:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
'''
https://www.interviewbit.com/problems/minimum-characters-required-to-make-a-string-palindromic/
Minimum Characters required to make a String Palindromic
Given an string A. The only operation allowed is to insert characters in the beginning of the string.
Find how many minimum characters are needed to be inserted to make the string a palindrome string.
Input Format
The only argument given is string A.
Output Format
Return the minimum characters that are needed to be inserted to make the string a palindrome string.
For Example
Input 1:
A = "ABC"
Output 1:
2
Explanation 1:
Insert 'B' at beginning, string becomes: "BABC".
Insert 'C' at beginning, string becomes: "CBABC".
Input 2:
A = "AACECAAAA"
Output 2:
2
Explanation 2:
Insert 'A' at beginning, string becomes: "AAACECAAAA".
Insert 'A' at beginning, string becomes: "AAAACECAAAA".
'''
'''
Solution Outline:
Consider 'BACB'
Using two-pointers to compare B(left) and B(right)
and then when A(left) and C(right) don't match, we have to include C after B to get 'BCACB'. (this isnt allowed)
Instead, the characters needed to make 'BACB' palindromic are 'BCA' => 'BCABACB'
Brute-force solution:
1. Start with adding 1 character from the right to the left, and check if its a palindrome
2. If it is, we are done, else add more characters from the right to the left
3. Slight optimization would be to skip the number of characters we just added from comparison.
for e.g., BC ... CB, we just added BC, we can skip comparing the first 2 and last 2 characters.
Sample run:
s: "BACXB"
Is 's' a palindrome? NO
Add 'B' to the left
s: "BBACXB"
is s[1:-1] == "BACX" a palindrome? NO
Add 2 characters
s: "BXBACXB"
is s[2:-2] == "BAC" a palindrome? NO
Add 3 characters
s: "BXCBACXB"
is s[3:-3] == "BA" a palindrome? NO
Add 4 characters
s: "BXCABACXB"
is s[4:-4] == "B" a palindrome? YES
return 4
Sample run 2:
s: "AACECAAAA"
is 's' a palindrome? NO
Add 'A' to the left
s: 'AAACECAAAA'
is s[1:-1] == "AACECAAA" a palindrome? NO
Add 'AA' to the left
s: 'AAAACECAAAA'
is s[2:-2] == 'AACECAA' a palindrome? YES
Alternately,
Simulate adding 1 character to the left
=> s: 'A' + "AACECAAAA"
we check if s[0:-1] is a palindrome
is "AACECAAA" a palindrome? NO
Simulate adding 2 characters to the left
=> s: "AA" + "AACECAAAA"
we check if s[0:-2] is a palindrome
is "AACECAA" a palindrome? YES
return 2
'''
class Solution:
def min_chars_to_make_palindrome(self, A):
# check if A[lb:ub] is a palindrome
def is_palindrome(A, lb, ub):
while lb < ub:
if A[lb] != A[ub]:
return False
lb += 1
ub -= 1
return True
n = len(A)
# A is already a palindrome
# no additions needed
if not A or is_palindrome(A, 0, n-1):
return 0
j = 1
while j < n-1:
if is_palindrome(A, 0, n-j-1):
return j
j += 1
return j
if __name__ == '__main__':
s = Solution()
assert s.min_chars_to_make_palindrome("AB") == 1
assert s.min_chars_to_make_palindrome("racecar") == 0
assert s.min_chars_to_make_palindrome("BACXB") == 4
assert s.min_chars_to_make_palindrome("ABC") == 2
assert s.min_chars_to_make_palindrome("AACECAAAA") == 2
|
[
"vinithepooh@gmail.com"
] |
vinithepooh@gmail.com
|
aa6c975ee2f19bb1dc0094b1d58e5ff4d74ef6c8
|
86abbc013ab6209d11d58b735048a560ce059e72
|
/tests/test_01_main/test_env_vars_2.py
|
ca3fab4037b7b777f6209d3922be51fa56d92bee
|
[
"MIT"
] |
permissive
|
alexiri/meinheld-gunicorn-docker
|
e518e10f1845856f1c2c81181e825b5c8aebf99a
|
e9ff167987a967071b408c0ecb7790232f378ea7
|
refs/heads/master
| 2020-04-16T23:44:49.276125
| 2019-01-12T14:12:12
| 2019-01-12T14:12:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
import time
import docker
import pytest
from ..utils import (
CONTAINER_NAME,
get_config,
get_process_names,
stop_previous_container,
)
client = docker.from_env()
@pytest.mark.parametrize(
"image",
[
("tiangolo/meinheld-gunicorn:python3.6"),
("tiangolo/meinheld-gunicorn:python3.7"),
("tiangolo/meinheld-gunicorn:latest"),
("tiangolo/meinheld-gunicorn:python3.6-alpine3.8"),
("tiangolo/meinheld-gunicorn:python3.7-alpine3.8"),
],
)
def test_env_vars_2(image):
stop_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={"WEB_CONCURRENCY": 1, "HOST": "127.0.0.1"},
ports={"80": "8000"},
detach=True,
)
time.sleep(1)
process_names = get_process_names(container)
config_data = get_config(container)
assert config_data["workers"] == 1
assert len(process_names) == 2 # Manager + worker
assert config_data["host"] == "127.0.0.1"
assert config_data["port"] == "80"
assert config_data["loglevel"] == "info"
assert config_data["bind"] == "127.0.0.1:80"
container.stop()
container.remove()
|
[
"tiangolo@gmail.com"
] |
tiangolo@gmail.com
|
33642132bf671fca1fb601bf2a308944456d0679
|
17f6881c70401dc63757cc7b5fa4d9dd396689e3
|
/src/main/com/libin/yfl/2.py
|
ab40b869edeb44fd720e43b62d3e0074571c639f
|
[] |
no_license
|
BigDataRoad/Algorithm
|
0ab493eeb478125b4beb62d78ce18c73e30b0496
|
2f2fb4f4b84f6c9df8adbada63b327c43ce29ddd
|
refs/heads/master
| 2023-07-02T04:06:51.025648
| 2021-07-28T14:04:55
| 2021-07-28T14:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
''''
面试题50. 第一个只出现一次的字符
在字符串 s 中找出第一个只出现一次的字符。如果没有,返回一个单空格。
示例:
s = "abaccdeff"
返回 "b"
s = ""
返回 " "
'''
class Solution:
def firstUniqChar(self, s: str) -> str:
d1 = {}
for each in s:
if each not in d1:
d1[each] = 1
else:
d1[each] += 1
for key in s:
if d1[key] == 1:
return key
return ' '
|
[
"yangfengling@inttech.cn"
] |
yangfengling@inttech.cn
|
1b41e87af402abd0b44ebbe92b9d6c0550a0335c
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/servicenetworking/azure-mgmt-servicenetworking/generated_samples/frontend_patch.py
|
cb930d459dcae096783768ee9d0d2b0061cf2ee1
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.servicenetworking import ServiceNetworkingMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-servicenetworking
# USAGE
python frontend_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ServiceNetworkingMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.frontends_interface.update(
resource_group_name="rg1",
traffic_controller_name="TC1",
frontend_name="publicIp1",
properties={
"properties": {
"ipAddressVersion": "IPv4",
"mode": "public",
"publicIPAddress": {"id": "resourceUriAsString"},
}
},
)
print(response)
# x-ms-original-file: specification/servicenetworking/resource-manager/Microsoft.ServiceNetworking/cadl/examples/FrontendPatch.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
df76809e8cc0c86466eac41b4177052d87b4c49f
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/pef/RelocLgByImport.pyi
|
27b5bea67c6a66a0caa4003b216de6e66cc6fd77
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
pyi
|
import ghidra.app.util.bin.format.pef
import ghidra.app.util.importer
import ghidra.program.model.data
import ghidra.program.model.listing
import ghidra.util.task
import java.lang
class RelocLgByImport(ghidra.app.util.bin.format.pef.Relocation):
"""
See Apple's -- PEFBinaryFormat.h
"""
def apply(self, importState: ghidra.app.util.bin.format.pef.ImportStateCache, relocState: ghidra.app.util.bin.format.pef.RelocationState, header: ghidra.app.util.bin.format.pef.ContainerHeader, program: ghidra.program.model.listing.Program, log: ghidra.app.util.importer.MessageLog, monitor: ghidra.util.task.TaskMonitor) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getIndex(self) -> int: ...
def getOpcode(self) -> int: ...
def getSizeInBytes(self) -> int: ...
def hashCode(self) -> int: ...
def isMatch(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def index(self) -> int: ...
@property
def match(self) -> bool: ...
@property
def sizeInBytes(self) -> int: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
ccbde9d6da768d83b804f523d581a547d9ba4769
|
81407be1385564308db7193634a2bb050b4f822e
|
/the-python-standard-library-by-example/math/math_inverse_trig.py
|
26b2a3fc86ae12776e4c1f17fcb31bef477a193f
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819
| 2020-10-09T05:33:23
| 2020-10-09T05:33:23
| 305,266,398
| 0
| 0
|
MIT
| 2020-10-19T04:56:02
| 2020-10-19T04:53:05
| null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Inverse trigonometric functions
"""
#end_pymotw_header
import math
for r in [ 0, 0.5, 1 ]:
print 'arcsine(%.1f) = %5.2f' % (r, math.asin(r))
print 'arccosine(%.1f) = %5.2f' % (r, math.acos(r))
print 'arctangent(%.1f) = %5.2f' % (r, math.atan(r))
print
|
[
"350840291@qq.com"
] |
350840291@qq.com
|
356a0bc40cd0d90f4452998c8f8c8ff403d7b25b
|
b1742abd82bd3c27b511d5ba855b9b1e87e6854a
|
/lab_03_magic_8_ball.py
|
b8ad94d1f6d1263e2666335d8b65c5b1fb29e783
|
[] |
no_license
|
tomaccosheep/simple_django_render
|
8702e13e96462e495287b2f5624dcd2d142d72b4
|
6dd2a5bab32e352cc6ff9b7b542ba2d126f01728
|
refs/heads/master
| 2023-05-03T05:36:47.601119
| 2020-02-28T21:44:26
| 2020-02-28T21:44:26
| 243,859,346
| 0
| 0
| null | 2023-04-21T20:51:57
| 2020-02-28T21:41:12
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
'''DJANGO STUFF'''
import os
from django.shortcuts import render
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
'''MAGIC 8 BALL LAB'''
import random
input("What's your question?\n:")
with open('lab_03_out.html', 'w') as f:
f.write(render(None, 'lab_03_in.html', {'answer': random.choice(['It will happen', "It won't happen"])}).content.decode('utf-8'))
|
[
"al.burns.email@gmail.com"
] |
al.burns.email@gmail.com
|
64cf0bf1871a6b84f9a729968ec396164bec5eff
|
732b0b3e2ae0e6c498cfd2ed893de60b9fc22a32
|
/tests/integration/actions/collections/test_welcome_interactive_noee.py
|
5c0bbeddc6c0b39a11d26ac6b3341c0b5bf0a460
|
[
"Apache-2.0"
] |
permissive
|
didib/ansible-navigator
|
eb7b77c1df30b2e90b663383f0f76b6224e92c02
|
62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36
|
refs/heads/main
| 2023-08-30T06:43:42.876079
| 2021-10-14T18:42:17
| 2021-10-14T18:42:17
| 425,540,819
| 0
| 0
|
Apache-2.0
| 2021-11-07T15:27:54
| 2021-11-07T15:27:53
| null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
""" collections from welcome interactive w/0 ee
"""
import pytest
from .base import BaseClass
CLI = "ansible-navigator --execution-environment false"
testdata = [
(0, CLI, "ansible-navigator welcome screen"),
(1, ":collections", "ansible-navigator collections top window"),
(2, ":0", "Browse testorg.coll_1 plugins window"),
(3, ":0", "lookup_1 plugin docs window"),
(4, ":back", "Back to browse testorg.coll_1 plugins window"),
(5, ":1", "mod_1 plugin docs window"),
(6, ":back", "Back to browse testorg.coll_1 plugins window"),
(7, ":back", "Back to ansible-navigator collections browse window"),
(8, ":1", "Browse testorg.coll_2 plugins window"),
(9, ":0", "lookup_2 plugin docs window"),
(10, ":back", "Back to browse testorg.coll_2 plugins window"),
(11, ":1", "mod_2 plugin docs window"),
(12, ":back", "Back to browse testorg.coll_2 plugins window"),
(13, ":back", "Back to ansible-navigator collections browse window"),
]
@pytest.mark.parametrize("index, user_input, comment", testdata)
class Test(BaseClass):
"""run the tests"""
TEST_FOR_MODE = "interactive"
UPDATE_FIXTURES = False
|
[
"noreply@github.com"
] |
didib.noreply@github.com
|
6a9815bcd0c734fda3cd74d9658b0d8ab02503a6
|
14bca3c05f5d8de455c16ec19ac7782653da97b2
|
/lib/kubernetes/client/models/v1_toleration.py
|
770bb006050329795a673f8662ff0e3e39b446f0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hovu96/splunk_as_a_service_app
|
167f50012c8993879afbeb88a1f2ba962cdf12ea
|
9da46cd4f45603c5c4f63ddce5b607fa25ca89de
|
refs/heads/master
| 2020-06-19T08:35:21.103208
| 2020-06-16T19:07:00
| 2020-06-16T19:07:00
| 196,641,210
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,907
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Toleration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'effect': 'str',
'key': 'str',
'operator': 'str',
'toleration_seconds': 'int',
'value': 'str'
}
attribute_map = {
'effect': 'effect',
'key': 'key',
'operator': 'operator',
'toleration_seconds': 'tolerationSeconds',
'value': 'value'
}
def __init__(self, effect=None, key=None, operator=None, toleration_seconds=None, value=None):
"""
V1Toleration - a model defined in Swagger
"""
self._effect = None
self._key = None
self._operator = None
self._toleration_seconds = None
self._value = None
self.discriminator = None
if effect is not None:
self.effect = effect
if key is not None:
self.key = key
if operator is not None:
self.operator = operator
if toleration_seconds is not None:
self.toleration_seconds = toleration_seconds
if value is not None:
self.value = value
@property
def effect(self):
"""
Gets the effect of this V1Toleration.
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:return: The effect of this V1Toleration.
:rtype: str
"""
return self._effect
@effect.setter
def effect(self, effect):
"""
Sets the effect of this V1Toleration.
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param effect: The effect of this V1Toleration.
:type: str
"""
self._effect = effect
@property
def key(self):
"""
Gets the key of this V1Toleration.
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:return: The key of this V1Toleration.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this V1Toleration.
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param key: The key of this V1Toleration.
:type: str
"""
self._key = key
@property
def operator(self):
"""
Gets the operator of this V1Toleration.
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:return: The operator of this V1Toleration.
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""
Sets the operator of this V1Toleration.
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param operator: The operator of this V1Toleration.
:type: str
"""
self._operator = operator
@property
def toleration_seconds(self):
"""
Gets the toleration_seconds of this V1Toleration.
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:return: The toleration_seconds of this V1Toleration.
:rtype: int
"""
return self._toleration_seconds
@toleration_seconds.setter
def toleration_seconds(self, toleration_seconds):
"""
Sets the toleration_seconds of this V1Toleration.
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param toleration_seconds: The toleration_seconds of this V1Toleration.
:type: int
"""
self._toleration_seconds = toleration_seconds
@property
def value(self):
"""
Gets the value of this V1Toleration.
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
:return: The value of this V1Toleration.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this V1Toleration.
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
:param value: The value of this V1Toleration.
:type: str
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Toleration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"robert.fujara@gmail.com"
] |
robert.fujara@gmail.com
|
17040fde877a4f28ad58496fd1e547336763246d
|
9bf7d7ace42a61991970fd967c19071a50609b9e
|
/ipython/MultiDot.py
|
d5d3b44b3bd700618f18adcdb674674c8abe775a
|
[] |
no_license
|
ParsonsRD/SciPy-CookBook
|
29b68eace76962ae00735039bc3d488f31714e50
|
52f70a7aa4bd4fd11217a13fc8dd5e277f2388ea
|
refs/heads/master
| 2020-03-17T17:33:28.827269
| 2013-05-17T06:56:54
| 2013-05-17T06:56:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,663
|
py
|
# <markdowncell>
# The matrix multiplication function, numpy.dot(), only takes two
# arguments. That means to multiply more than two arrays together you end
# up with nested function calls which are hard to read:
#
# <codecell>
dot(dot(dot(a,b),c),d)
# <markdowncell>
# versus infix notation where you'd just be able to write
#
# <codecell>
a*b*c*d
# <markdowncell>
# There are a couple of ways to define an 'mdot' function that acts like
# dot but accepts more than two arguments. Using one of these allows you
# to write the above expression as
#
# <codecell>
mdot(a,b,c,d)
# <markdowncell>
# Using reduce
# ------------
#
# The simplest way it to just use reduce.
#
# <codecell>
def mdot(*args):
return reduce(numpy.dot, args)
# <markdowncell>
# Or use the equivalent loop (which is apparently the preferred style [for
# Py3K](http://www.python.org/dev/peps/pep-3100/#id53)):
#
# <codecell>
def mdot(*args):
ret = args[0]
for a in args[1:]:
ret = dot(ret,a)
return ret
# <markdowncell>
# This will always give you left to right associativity, i.e. the
# expression is interpreted as \`(((a\*b)\*c)\*d)\`.
#
# You also can make a right-associative version of the loop:
#
# <codecell>
def mdotr(*args):
ret = args[-1]
for a in reversed(args[:-1]):
ret = dot(a,ret)
return ret
# <markdowncell>
# which evaluates as \`(a\*(b\*(c\*d)))\`. But sometimes you'd like to
# have finer control since the order in which matrix multiplies are
# performed can have a big impact on performance. The next version gives
# that control.
#
# Controlling order of evaluation
# -------------------------------
#
# If we're willing to sacrifice Numpy's ability to treat tuples as arrays,
# we can use tuples as grouping constructs. This version of \`mdot\`
# allows syntax like this:
#
# <codecell>
mdot(a,((b,c),d))
# <markdowncell>
# to control the order in which the pairwise \`dot\` calls are made.
#
# <codecell>
import types
import numpy
def mdot(*args):
"""Multiply all the arguments using matrix product rules.
The output is equivalent to multiplying the arguments one by one
from left to right using dot().
Precedence can be controlled by creating tuples of arguments,
for instance mdot(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of dot(a,b) and mdot(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args)==1:
return args[0]
elif len(args)==2:
return _mdot_r(args[0],args[1])
else:
return _mdot_r(args[:-1],args[-1])
def _mdot_r(a,b):
"""Recursive helper for mdot"""
if type(a)==types.TupleType:
if len(a)>1:
a = mdot(*a)
else:
a = a[0]
if type(b)==types.TupleType:
if len(b)>1:
b = mdot(*b)
else:
b = b[0]
return numpy.dot(a,b)
# <markdowncell>
# Multiply
# --------
#
# Note that the elementwise multiplication function \`numpy.multiply\` has
# the same two-argument limitation as \`numpy.dot\`. The exact same
# generalized forms can be defined for multiply.
#
# Left associative versions:
#
# <codecell>
def mmultiply(*args):
return reduce(numpy.multiply, args)
# <markdowncell>
#
#
# <codecell>
def mmultiply(*args):
ret = args[0]
for a in args[1:]:
ret = multiply(ret,a)
return ret
# <markdowncell>
# Right-associative version:
#
# <codecell>
def mmultiplyr(*args):
ret = args[-1]
for a in reversed(args[:-1]):
ret = multiply(a,ret)
return ret
# <markdowncell>
# Version using tuples to control order of evaluation:
#
# <codecell>
import types
import numpy
def mmultiply(*args):
"""Multiply all the arguments using elementwise product.
The output is equivalent to multiplying the arguments one by one
from left to right using multiply().
Precedence can be controlled by creating tuples of arguments,
for instance mmultiply(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of multiply(a,b) and mmultiply(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args)==1:
return args[0]
elif len(args)==2:
return _mmultiply_r(args[0],args[1])
else:
return _mmultiply_r(args[:-1],args[-1])
def _mmultiply_r(a,b):
"""Recursive helper for mmultiply"""
if type(a)==types.TupleType:
if len(a)>1:
a = mmultiply(*a)
else:
a = a[0]
if type(b)==types.TupleType:
if len(b)>1:
b = mmultiply(*b)
else:
b = b[0]
return numpy.multiply(a,b)
# <markdowncell>
#
#
|
[
"matti.pastell@helsinki.fi"
] |
matti.pastell@helsinki.fi
|
38192ff80015c3eaf94f38a786aaa3a32e84f80e
|
06bf7c9f24b2a9cfe1f9feb1481838b37713904e
|
/scripts/ansible-vagrant-inventory.py
|
b88e0db54e121ddfbc708d93aa319328c75d5ce2
|
[
"MIT"
] |
permissive
|
marcelodevops/dotfiles
|
31efb4d62b7e5161fba40c204612d01e44cc35bd
|
eeb63ea94f27aef0eef8777bd8a423ae81133217
|
refs/heads/master
| 2022-03-08T20:37:08.749623
| 2019-10-22T20:14:45
| 2019-10-22T20:14:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import os
import os.path
import subprocess
import sys
def exit_err(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def get_vagrant_index():
path = os.path.expanduser('~/.vagrant.d/data/machine-index/index')
with open(path) as filehandle:
return json.load(filehandle)
def find_relevant_machines(index_data, root_dir):
for machine in index_data['machines'].values():
if machine['state'] == 'running' and machine['vagrantfile_path'] == root_dir:
yield machine
def get_vagrant_file(machine, filename):
return os.path.join(
machine['local_data_path'], 'machines', machine['name'],
machine['provider'], filename,
)
def get_vagrant_privkey(machine):
return get_vagrant_file(machine, 'private_key')
def get_machine_ssh_info(machine):
# this works if the virtualbox machine has guest additions installed
vbox_id_path = get_vagrant_file(machine, 'id')
with open(vbox_id_path) as filehandle:
vbox_id = filehandle.read().decode()
vbox_out = subprocess.check_output([
'vboxmanage', 'guestproperty', 'get', vbox_id,
'/VirtualBox/GuestInfo/Net/1/V4/IP',
]).strip()
if vbox_out != 'No value set!':
return vbox_out.split()[1], None
# fall back to the forwarded port that vagrant uses
ssh_conf = subprocess.check_output(['vagrant', 'ssh-config', machine['name']])
ssh_conf_lines = (line.split(None, 1) for line in ssh_conf.splitlines() if line)
ssh_config_dict = {key.lower(): val for key, val in ssh_conf_lines}
return ssh_config_dict['hostname'], ssh_config_dict['port']
def get_machine_group_data(machine, ansible_vars=None):
ansible_vars = ansible_vars or {}
ansible_vars['ansible_ssh_private_key_file'] = get_vagrant_privkey(machine)
ip, port = get_machine_ssh_info(machine)
# TODO: change ansible_ssh_ to ansible_ when upgrading to ansible 2
ansible_vars['ansible_ssh_host'] = ip
if port:
ansible_vars['ansible_ssh_port'] = port
return {
'hosts': [ip],
'vars': ansible_vars,
}
def get_inventory_data(root_dir):
ssh_args = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
data = {
'all': {
'vars': {
'ansible_user': 'root',
'ansible_ssh_common_args': ssh_args,
},
},
'vagrant': {'children': []},
}
index_data = get_vagrant_index()
for machine in find_relevant_machines(index_data, root_dir):
data[machine['name']] = get_machine_group_data(machine)
data['vagrant']['children'].append(machine['name'])
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root-dir', default=os.getcwd())
args = parser.parse_args()
data = get_inventory_data(args.root_dir)
print(json.dumps(data))
if __name__ == '__main__':
main()
|
[
"anlutro@gmail.com"
] |
anlutro@gmail.com
|
e8b58c05a571106b4b6e583e9fb783bceebe4a72
|
236e6a7c4604443f0f4acd778bcccd747011080e
|
/ax/storage/sqa_store/sqa_config.py
|
4c184fcadc002f2ad2f11fec983230902d70b085
|
[
"MIT"
] |
permissive
|
MayukhBagchiTrento/Ax
|
a161e1fee615c4e570de51b32f9e656063dc228d
|
7c925ba8365af714d9671208de490ba48814bfaa
|
refs/heads/master
| 2023-05-09T20:14:14.525338
| 2021-06-02T18:46:51
| 2021-06-02T18:46:51
| 268,482,239
| 0
| 0
|
MIT
| 2020-06-01T09:43:39
| 2020-06-01T09:43:38
| null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Dict, NamedTuple, Optional, Type
from ax.core.arm import Arm
from ax.core.base import Base
from ax.core.batch_trial import AbandonedArm
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun, GeneratorRunType
from ax.core.metric import Metric
from ax.core.parameter import Parameter
from ax.core.parameter_constraint import ParameterConstraint
from ax.core.runner import Runner
from ax.core.trial import Trial
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.storage.sqa_store.db import SQABase
from ax.storage.sqa_store.sqa_classes import (
SQAAbandonedArm,
SQAArm,
SQAData,
SQAExperiment,
SQAGenerationStrategy,
SQAGeneratorRun,
SQAMetric,
SQAParameter,
SQAParameterConstraint,
SQARunner,
SQATrial,
)
# pyre-fixme[9]: class_to_sqa_class has type `Dict[Type[Base], Type[SQABase]]`; used
# as `Dict[Type[Union[AbandonedArm, Arm, Data, Experiment, GenerationStrategy,
# GeneratorRun, Metric, Parameter, ParameterConstraint, Runner, Trial]],
# Type[Union[SQAAbandonedArm, SQAArm, SQAData, SQAExperiment, SQAGenerationStrategy,
# SQAGeneratorRun, SQAMetric, SQAParameter, SQAParameterConstraint, SQARunner,
# SQATrial]]]`.
class SQAConfig(NamedTuple):
"""Metadata needed to save and load an experiment to SQLAlchemy.
Attributes:
class_to_sqa_class: Mapping of user-facing class to SQLAlchemy class
that it will be encoded to. This allows overwriting of the default
classes to provide custom save functionality.
experiment_type_enum: Enum containing valid Experiment types.
generator_run_type_enum: Enum containing valid Generator Run types.
"""
class_to_sqa_class: Dict[Type[Base], Type[SQABase]] = {
AbandonedArm: SQAAbandonedArm,
Arm: SQAArm,
Data: SQAData,
Experiment: SQAExperiment,
GenerationStrategy: SQAGenerationStrategy,
GeneratorRun: SQAGeneratorRun,
Parameter: SQAParameter,
ParameterConstraint: SQAParameterConstraint,
Metric: SQAMetric,
Runner: SQARunner,
Trial: SQATrial,
}
experiment_type_enum: Optional[Enum] = None
generator_run_type_enum: Optional[Enum] = GeneratorRunType
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
9a5e9d041ce5d5aec6a271c32fdf6721bfcdf335
|
0d9445f17a7175c3872c6ffb9280c3269b8a1eb9
|
/test/unfolding/test_unfolding.py
|
06ece9628fe17d60ef34aa60e688ece98132da0e
|
[] |
permissive
|
neukirchen-212/phonopy
|
8aa4fc9f63cb124acaa6f7ab052275a423c000cb
|
e34588dcb32fb15aa2a6604ffd3e62ebb0927c0f
|
refs/heads/develop
| 2023-08-24T00:47:35.908407
| 2021-10-16T12:02:27
| 2021-10-16T12:02:27
| 328,015,607
| 0
| 0
|
BSD-3-Clause
| 2021-01-11T12:53:48
| 2021-01-08T21:24:17
| null |
UTF-8
|
Python
| false
| false
| 4,753
|
py
|
import numpy as np
from phonopy import Phonopy
from phonopy.unfolding import Unfolding
# from phonopy.interface.vasp import write_vasp
import os
data_dir = os.path.dirname(os.path.abspath(__file__))
def test_Unfolding_NaCl(ph_nacl):
"""Test to reproduce proper band structure of primitive cell
Results are written to "bin-unfolding-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = np.array([[x, ] * 3 for x in range(nd)]) / float(nd) - 0.5
unfolding_supercell_matrix = [[-2, 2, 2],
[2, -2, 2],
[2, 2, -2]]
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(ph,
unfolding_supercell_matrix,
ph.supercell.scaled_positions,
mapping,
qpoints)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding.dat")
filename_out = os.path.join(data_dir, "bin-unfolding-test.dat")
_compare(weights,
os.path.join(data_dir, "bin-unfolding.dat"),
filename_out=None)
def test_Unfolding_SC(ph_nacl):
"""Test to reproduce unfoled band structure
Atomic positions are considered as the lattice ponts.
Results are written to "bin-unfolding_to_atoms-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding_to_atoms-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = np.array([[x, ] * 3 for x in range(nd)]) / float(nd) - 0.5
unfolding_supercell_matrix = np.diag([4, 4, 4])
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(ph,
unfolding_supercell_matrix,
ph.supercell.scaled_positions,
mapping,
qpoints)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding_to_atoms.dat")
filename_out = os.path.join(data_dir, "bin-unfolding_to_atoms-test.dat")
_compare(weights,
os.path.join(data_dir, "bin-unfolding_to_atoms.dat"),
filename_out=None)
def _compare(weights, filename, filename_out=None):
bin_data = _binning(weights)
if filename_out:
_write_bin_data(bin_data, filename_out)
with open(filename) as f:
bin_data_in_file = np.loadtxt(f)
np.testing.assert_allclose(bin_data, bin_data_in_file, atol=1e-2)
def _get_weights(unfolding, qpoints):
weights = unfolding.unfolding_weights
freqs = unfolding.frequencies
out_vals = []
for i, q in enumerate(qpoints):
for f, w in zip(freqs[i], weights[i]):
out_vals.append([q[0], q[1], q[2], f, w])
return out_vals
def _write_weights(weights, filename):
with open(filename, 'w') as w:
lines = ["%10.7f %10.7f %10.7f %12.7f %10.7f" % tuple(x)
for x in weights]
w.write("\n".join(lines))
def _write_bin_data(bin_data, filename):
with open(filename, 'w') as w:
lines = ["%8.5f %8.5f %8.5f" % tuple(v) for v in bin_data]
w.write("\n".join(lines))
def _binning(data):
x = []
y = []
w = []
for vals in data:
if vals[4] > 1e-3:
x.append(vals[0])
y.append(vals[3])
w.append(vals[4])
x = np.around(x, decimals=5)
y = np.around(y, decimals=5)
w = np.array(w)
points = {}
for e_x, e_y, e_z in zip(x, y, w):
if (e_x, e_y) in points:
points[(e_x, e_y)] += e_z
else:
points[(e_x, e_y)] = e_z
x = []
y = []
w = []
for key in points:
x.append(key[0])
y.append(key[1])
w.append(points[key])
data = np.transpose([x, y, w])
data = sorted(data, key=lambda data: data[1])
data = sorted(data, key=lambda data: data[0])
return np.array(data)
def _get_phonon(ph_in):
ph = Phonopy(ph_in.supercell, supercell_matrix=[1, 1, 1])
ph.force_constants = ph_in.force_constants
born_elems = {s: ph_in.nac_params['born'][i]
for i, s in enumerate(ph_in.primitive.symbols)}
born = [born_elems[s] for s in ph_in.supercell.symbols]
epsilon = ph_in.nac_params['dielectric']
factors = ph_in.nac_params['factor']
ph.nac_params = {'born': born,
'factor': factors,
'dielectric': epsilon}
return ph
|
[
"atz.togo@gmail.com"
] |
atz.togo@gmail.com
|
c16fe6d08af84f79148a92df9ca28ec80d1fbd1b
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/planner/leaf.py
|
a26a48f95065c8ded94a02ebbeb6e1facd5221d6
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,862
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Leaf(Mo):
"""
Optimizer deployment leaf
"""
meta = ClassMeta("cobra.model.planner.Leaf")
meta.moClassName = "plannerLeaf"
meta.rnFormat = "leaf-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Switch node"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.planner.Resource")
meta.childClasses.add("cobra.model.planner.RsDeployedFex")
meta.childClasses.add("cobra.model.planner.RsDeployedObject")
meta.childClasses.add("cobra.model.planner.RsNodeLabels")
meta.childClasses.add("cobra.model.planner.Violation")
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsDeployedObject", "rsdeployedObject-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsDeployedFex", "rsdeployedFex-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsNodeLabels", "rsnodeLabels-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.Violation", "violation-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.Resource", "res-"))
meta.parentClasses.add("cobra.model.planner.Deployment")
meta.superClasses.add("cobra.model.planner.Node")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('leaf-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "isExisting", "isExisting", 28926, PropCategory.REGULAR)
prop.label = "Indicates if Node is from Existing topology or Template"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isExisting", prop)
prop = PropMeta("str", "label", "label", 21222, PropCategory.REGULAR)
prop.label = "Label"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("label", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "maxPctUse", "maxPctUse", 21967, PropCategory.REGULAR)
prop.label = "Max percent usage across all the resources"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 100)]
prop.defaultValue = 255
prop.defaultValueStr = "none"
prop._addConstant("none", "not-applicable", 255)
meta.props.add("maxPctUse", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "model", "model", 21223, PropCategory.REGULAR)
prop.label = "Model"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "N9K-C9372PX"
prop._addConstant("N9K-C93108TC-EX", "n9k-c93108tc-ex", 14)
prop._addConstant("N9K-C93128TX:N9K-M12PQ", "n9k-c93128tx:n9k-m12pq", 6)
prop._addConstant("N9K-C93128TX:N9K-M6PQ", "n9k-c93128tx:n9k-m6pq", 7)
prop._addConstant("N9K-C93128TX:N9K-M6PQ-E", "n9k-c93128tx:n9k-m6pq-e", 8)
prop._addConstant("N9K-C93180YC-EX", "n9k-c93180yc-ex", 13)
prop._addConstant("N9K-C9332PQ", "n9k-c9332pq", 2)
prop._addConstant("N9K-C9372PX", "n9k-c9372px", 0)
prop._addConstant("N9K-C9372PX-E", "n9k-c9372px-e", 12)
prop._addConstant("N9K-C9372TX", "n9k-c9372tx", 1)
prop._addConstant("N9K-C9396PX:N9K-M12PQ", "n9k-c9396px:n9k-m12pq", 3)
prop._addConstant("N9K-C9396PX:N9K-M6PQ", "n9k-c9396px:n9k-m6pq", 4)
prop._addConstant("N9K-C9396PX:N9K-M6PQ-E", "n9k-c9396px:n9k-m6pq-e", 5)
prop._addConstant("N9K-C9396TX:N9K-M12PQ", "n9k-c9396tx:n9k-m12pq", 9)
prop._addConstant("N9K-C9396TX:N9K-M6PQ", "n9k-c9396tx:n9k-m6pq", 10)
prop._addConstant("N9K-C9396TX:N9K-M6PQ-E", "n9k-c9396tx:n9k-m6pq-e", 11)
meta.props.add("model", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 21244, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 21224, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
d4df2caa6ed25adc9924e59b16ebf3b4dc71c06b
|
64cd09628f599fe18bf38528309349f7ac0df71e
|
/ML.Autoencoder/Reference/1_Autoencoder.py
|
59899e4665c25aec2652d6535c1f0822d0364e62
|
[] |
no_license
|
JunyoungJang/Python
|
958c057b2fd37c03876d3cf566ee27ee637bb020
|
76d4cd441deff8061e10608e0848360bc4f34490
|
refs/heads/master
| 2021-01-19T21:54:42.208469
| 2020-02-14T09:54:17
| 2020-02-14T09:54:17
| 83,768,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
# Gradient-Based Learning Applied to Document Recognition [Y LeCun 1998] http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
# Greedy Layer-Wise Training of Deep Networks [Bengio 07] http://papers.nips.cc/paper/3048-greedy-layer-wise-training-of-deep-networks.pdf/
# Extracting and Composing Robust Features with Denoising Autoencoders [Vincent 08] http://www.iro.umontreal.ca/~vincentp/Publications/denoising_autoencoders_tr1316.pdf
# Introduction Auto-Encoder (turing1759@gmail.com) https://wikidocs.net/3413
# Autoencoders http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/
# Autoencoder vs RBM (+ vs CNN) # http://khanrc.tistory.com/entry/Autoencoder-vs-RBM-vs-CNN
import tensorflow as tf, numpy as np, matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mnist = input_data.read_data_sets("/Users/sungchul/Dropbox/Data/MNIST/", one_hot=True)
learning_rate = 0.01
training_epochs = 100
batch_size = 256
display_step = 1
examples_to_show = 10
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
return layer_2
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
tf.global_variables_initializer().run()
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.draw()
plt.waitforbuttonpress()
|
[
"lakino@yonsei.ac.kr"
] |
lakino@yonsei.ac.kr
|
a1088c8dfa47439b1217fe6d2ebf2519ad1696a3
|
ae6189642a07fd789f51caadb924328a54919cac
|
/100-problems/review/dinamic-programming/37-coin-2.py
|
f4d3151f226422f8a88fdfd7ba7a84605042607d
|
[] |
no_license
|
d-matsui/atcorder
|
201e32403653b2fdf0d42188faf095eb8b793b86
|
22ec1af8206827e10a986cb24cf12acc52ab1d6a
|
refs/heads/master
| 2020-09-27T23:15:27.281877
| 2020-09-01T13:24:34
| 2020-09-01T13:24:34
| 226,632,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
#!/usr/bin/env python3
from pprint import pprint
import sys
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.buffer.readline
INF = float('inf')
n, m = map(int, input().split())
coins = list(map(int, input().split()))
# dp[i] := ちょうどi円支払うときのコインの最小枚数
dp = [INF] * (n + 1)
dp[0] = 0
for i in range(n + 1):
for c in coins:
if i - c < 0:
continue
dp[i] = min(dp[i], dp[i-c] + 1)
print(dp[n])
|
[
"mti.daiki@gmail.com"
] |
mti.daiki@gmail.com
|
c8d1d2b5066d64780127495106322b5db11767f8
|
8b9bf5891152762a0dbaef2f01ba8a302d3213fa
|
/git_blankfile.py
|
9e977d1f7d84a0fba2c33fa1f8a736a35c14897b
|
[] |
no_license
|
WRGrantham/github_practice
|
bd79ec1d3bfbefe50e00686f63bb247529293370
|
a4a1e06b1cbe7ef2667934584293d3a401382fb2
|
refs/heads/master
| 2020-03-22T06:49:39.473734
| 2018-07-04T03:15:24
| 2018-07-04T03:15:24
| 139,660,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
def bitch_please(string):
print ("bitch, PLEASE!")
bitch_please(poop_butt)
poop_butt = "Mr Poopy Butthole, we've got changes"
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
70b8250d91c0fe9c9201349f493e908f51d62f94
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/jit/test_MaxPool3D_11.py
|
0d68f5ac5631e67e9f271a8346573b1b1fc46ec8
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 620
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_MaxPool3D_11():
"""test MaxPool3D_11"""
jit_case = JitTrans(case=yml.get_case_info("MaxPool3D_11"))
jit_case.jit_run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
eee8b28412164b068298414bbbfdd00a7682dde6
|
d190750d6cb34e9d86ae96724cf4b56a2f57a74a
|
/tests/r/test_gilgais.py
|
cf929d3c6685b3eb562f0d35660052a163cdc03a
|
[
"Apache-2.0"
] |
permissive
|
ROAD2018/observations
|
a119f61a48213d791de0620804adb8d21c2ad9fb
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
refs/heads/master
| 2021-09-24T04:28:02.725245
| 2018-09-16T23:06:30
| 2018-09-16T23:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.gilgais import gilgais
def test_gilgais():
"""Test module gilgais.py by downloading
gilgais.csv and testing shape of
extracted data has 365 rows and 9 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = gilgais(test_path)
try:
assert x_train.shape == (365, 9)
except:
shutil.rmtree(test_path)
raise()
|
[
"dustinviettran@gmail.com"
] |
dustinviettran@gmail.com
|
0be288da85863450afa7a166af6d2304e7aa4300
|
6fb37fee016346120d4c14c4343516532304055a
|
/src/genie/libs/parser/iosxe/tests/test_show_ip.py
|
87faf2deeb76a8bc95aeaaf838eda94ca386a3b5
|
[
"Apache-2.0"
] |
permissive
|
devbollinger/genieparser
|
011526ebbd747c6dcd767535ce4bd33167e15536
|
ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c
|
refs/heads/master
| 2020-12-20T11:36:00.750128
| 2020-01-24T18:45:40
| 2020-01-24T18:45:40
| 236,061,155
| 0
| 0
|
Apache-2.0
| 2020-01-24T18:38:43
| 2020-01-24T18:38:42
| null |
UTF-8
|
Python
| false
| false
| 6,901
|
py
|
# Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
from ats.topology import loader
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError, SchemaMissingKeyError
# iosxe show_lisp
# from genie.libs.parser.iosxe.show_lisp import ShowLispSession
# iosxe show_ip_parser
from genie.libs.parser.iosxe.show_ip import ShowIPAlias, \
ShowIPAliasDefaultVrf
# =================================
# Unit test for 'show ip aliases', 'show ip aliases default-vrf', 'show ip aliases vrf {vrf}'
# =================================
class test_show_ip_alias(unittest.TestCase):
'''
Unit test for:
show ip aliases
show ip aliases default-vrf
show ip aliases vrf {vrf}
'''
device = Device(name = 'aDevice')
empty_output = { 'execute.return_value' : '' }
# show ip aliases
golden_parsed_output1 = {
'vrf': {
'default': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '10.169.197.94',
},
2: {
'address_type': 'Interface',
'ip_address': '10.169.197.254',
},
3: {
'address_type': 'Interface',
'ip_address': '172.16.1.56',
},
4: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
5: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
6: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
7: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
8: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
9: {
'address_type': 'Interface',
'ip_address': '192.168.144.254',
},
},
},
},
}
golden_output1 = { 'execute.return_value':
'''
show ip aliases
Address Type IP Address Port
Interface 10.169.197.94
Interface 10.169.197.254
Interface 172.16.1.56
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.144.254
'''
}
# show ip aliases default-vrf
golden_parsed_output2 = {
'vrf': {
'default': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '10.169.197.94',
},
2: {
'address_type': 'Interface',
'ip_address': '10.169.197.254',
},
3: {
'address_type': 'Interface',
'ip_address': '172.16.1.56',
},
4: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
5: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
6: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
7: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
8: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
9: {
'address_type': 'Interface',
'ip_address': '192.168.144.254',
},
},
},
},
}
golden_output2 = { 'execute.return_value':
'''
show ip aliases default-vrf
Address Type IP Address Port
Interface 10.169.197.94
Interface 10.169.197.254
Interface 172.16.1.56
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.144.254
'''
}
# show ip aliases vrf {vrf}
golden_parsed_output3 = {
'vrf': {
'L3VPN-1538': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
},
},
},
}
golden_output3 = { 'execute.return_value':
'''
show ip aliases vrf L3VPN-1538
Address Type IP Address Port
Interface 192.168.10.254
'''
}
def test_empty(self):
self.maxDiff = None
self.device = Mock(**self.empty_output)
obj = ShowIPAlias(device = self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output1)
obj = ShowIPAlias(device = self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output1)
def test_golden2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowIPAliasDefaultVrf(device = self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output2)
def test_golden3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output3)
obj = ShowIPAlias(device = self.device)
parsed_output = obj.parse(vrf = 'L3VPN-1538')
self.assertEqual(parsed_output, self.golden_parsed_output3)
if __name__ == '__main__':
unittest.main()
|
[
"karmoham@cisco.com"
] |
karmoham@cisco.com
|
853b5efb4360f589ed00f617b2c4e5f5ad8dc3c6
|
c49849e8c0234ab60d4c5c17233b84ae63932074
|
/src/decision-trees.py
|
d3e57413bfcf73273372e63481c02ec9d78856e9
|
[] |
no_license
|
AlexisDrch/Machine-Learning
|
97a79c15087765ac97b8693c39f42807255d2a22
|
f60cf4147f38a900dd606bb1c07e986a6c72d262
|
refs/heads/master
| 2021-04-06T09:11:48.926794
| 2018-03-12T09:02:32
| 2018-03-12T09:02:32
| 124,861,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
# coding: utf-8
from utils import *
# ### 1. Decision Trees
# In[18]:
clf_dt = tree.DecisionTreeClassifier(criterion='gini') # explain use of gini and information gain
# #### 1. Balancing wine dataset
# In[19]:
cv = StratifiedKFold(n_splits=10)
title = 'Learning Curve - Decision Tree - wine imbalanced'
plt, score = plot_learning_curve(clf_dt, title, X2, y2, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-wine-imbalanced.png')
plt.show()
title = 'Learning Curve - Decision Tree - wine balanced'
plt,score= plot_learning_curve(clf_dt, title, X2_up, y2_up, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-wine-balanced.png')
plt.show()
# In[20]:
# on Pima
title = 'Learning Curve - Decision Tree - pima '
plt,score = plot_learning_curve(clf_dt, title, X1, y1, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-pima.png')
plt.show()
# #### 2. Parameters tuning.
# In[42]:
# for pima
max_d = 30
title = " Validation Curve - max_depth - pima "
xlabel = "max_depth"
ylabel = "Score"
clf_dt.fit(X1, y1)
valid_curve_dt_pima, pima_dt_score, best_param = plot_validation_curve(clf_dt, title, xlabel, ylabel,X1, y1, param_name = 'max_depth', ylim=None,
cv = cv, n_jobs = 1, param_range = np.arange(1, max_d))
valid_curve_dt_pima.savefig('./output/valid_curve_dt_pima.png')
valid_curve_dt_pima.show()
print("Best score for pima is " + str(pima_dt_score) + ", max_depth = " + str(best_param))
# for wine
title = " Validation Curve - max_depth - wine "
clf_dt.fit(X2_up, y2_up)
valid_curve_dt_wine, wine_dt_score, best_param = plot_validation_curve(clf_dt, title, xlabel, ylabel,X2_up, y2_up, param_name = 'max_depth', ylim=None,
cv = cv, n_jobs = 1, param_range = np.arange(1, max_d))
valid_curve_dt_wine.savefig('./output/valid_curve_dt_wine.png')
valid_curve_dt_wine.show()
print("Best score for wine is " + str(wine_dt_score) + ", max_depth = " + str(best_param))
|
[
"aleksi.durocher@wanadoo.fr"
] |
aleksi.durocher@wanadoo.fr
|
b73af01c3a4ae24080e795dfdfa6fc5f0dded805
|
9e1bda53da4c5e98190f5f25235f528d692ee5a8
|
/.history/my_app/forms_20210405180446.py
|
9fd1f9a4b32ae169cb9e3865f612d88a43ae36d9
|
[] |
no_license
|
Jumayev-A/Project-3
|
3d373181af6a87e3fe319a13d28fcd18941167b7
|
34ddd009726cbba9ae52e74a46d554fd735566e2
|
refs/heads/main
| 2023-06-10T11:02:06.446151
| 2021-07-07T06:19:11
| 2021-07-07T06:19:11
| 350,375,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
from django import forms
from my_app.models import BlogModel
class BlogForm(forms.ModelForm):
class Meta:
model = BlogModel
fields = ['title','description']
|
[
"abdy.jumayev@gmail.com"
] |
abdy.jumayev@gmail.com
|
8f8f52e2e5ddc47176d85bf7b051d523a6670890
|
5dd03f9bd8886f02315c254eb2569e4b6d368849
|
/tests/python/twitter/common/string/test_basic_scanf.py
|
00b280c41ad6bed4daf5a43c6fffc5bfa45b65b2
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
adamsxu/commons
|
9e1bff8be131f5b802d3aadc9916d5f3a760166c
|
9fd5a4ab142295692994b012a2a2ef3935d35c0b
|
refs/heads/master
| 2021-01-17T23:13:51.478337
| 2012-03-11T17:30:24
| 2012-03-11T17:30:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,569
|
py
|
import pytest
import unittest
from twitter.common.string.scanf import ScanfParser
def almost_equal(a, b, digits=7):
return abs(a-b) < 0.1**digits
def basic_scanf(fmt, string, extra=False):
formatter = ScanfParser(fmt)
result = formatter.parse(string, extra)
assert len(result.ungrouped()) == 1
return result.ungrouped()[0]
def test_bad_input():
conversions = ScanfParser.CONVERSIONS.keys()
bad_stuff = [
" a", " 1", " +",
"a ", "1 ", "+ ",
]
garbage_stuff = [
0, 1, None, dir, [], {}, (), type
]
for c in conversions:
for b in bad_stuff:
with pytest.raises(ScanfParser.ParseError):
basic_scanf(c, b)
for b in garbage_stuff:
with pytest.raises(TypeError):
basic_scanf(c, b)
def test_no_matches():
match = ScanfParser("%%").parse("%")
assert len(match.groups()) == 0
assert len(match.ungrouped()) == 0
test_strings = ["a", " ", "hello hello", "1.0 hello nothing to see here move along", ""]
for t_s in test_strings:
match = ScanfParser(t_s).parse(t_s)
assert len(match.groups()) == 0
assert len(match.ungrouped()) == 0
def test_garbage_formats():
garbage_input = [0, 1, None, dir, [], {}, (), type]
for garbage in garbage_input:
with pytest.raises(TypeError):
ScanfParser(garbage)
def test_special_characters():
special_stuffs = [
(')', '('),
('(', ')'), ('[', ']'), ('{', '}'),
('(', ')+'),
('(|', ')'),
('{,', '}'),
('$', '^'), ('^', '$'),
(' ', '+'), (' ', '*'), (' ', '?')
]
for before, after in special_stuffs:
assert basic_scanf(before+'%c'+after, before+'a'+after) == 'a'
assert basic_scanf(before+'%c'+after, before+u'a'+after) == 'a'
assert basic_scanf(before+'%c'+after, before+' '+after) == ' '
def test_character_conversion():
assert basic_scanf('%c', 'a') == 'a'
assert basic_scanf('%c', u'a') == 'a'
assert basic_scanf('%c', ' ') == ' '
def test_integer_conversion():
for conversion in ('%d', '%ld', '%lld'):
assert basic_scanf(conversion, '1') == 1
assert basic_scanf(conversion, '01') == 1
assert basic_scanf(conversion, '+01') == 1
assert basic_scanf(conversion, '-01') == -1
def test_failing_integer_conversion():
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "\x90")
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "x")
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "hello")
def test_long_conversion():
for conversion in ('%u', '%lu', '%llu'):
assert basic_scanf(conversion, '1') == 1
assert basic_scanf(conversion, '01') == 1
def test_float_conversion():
factor_tests = {
'': 1.0,
'e-0': 1.0,
'e-1': 0.1,
'e+1': 10.0,
'e1': 10.0,
'e0': 1.0,
'e5': 1.e5,
}
for exponent, xfactor in factor_tests.items():
assert almost_equal(basic_scanf('%f', '0' + exponent), 0 * xfactor)
assert almost_equal(basic_scanf('%f', '.1' + exponent), .1 * xfactor)
assert almost_equal(basic_scanf('%f', '2.' + exponent), 2 * xfactor)
assert almost_equal(basic_scanf('%f', '3.4' + exponent), 3.4 * xfactor)
assert almost_equal(basic_scanf('%f', '-.5' + exponent), -0.5 * xfactor)
def test_string_conversion():
for st in ('a', u'a', '123', u'123', 'a\x12\x23'):
assert basic_scanf('%s', st) == st
assert basic_scanf('%s', '\x00') == ''
def test_extra_stuff():
extra_stuff = [ ' ', ' a', ' a b', ' $']
for extra in extra_stuff:
for st in ('a', u'a', '123', u'123', 'a\x12\x23'):
assert basic_scanf('%s', st+extra, extra=True) == st
|
[
"jsirois@twitter.com"
] |
jsirois@twitter.com
|
087e5477ba0dc7f53e31d552937861e0ef8d456b
|
5831b0293cbb6f9e0660ac4ec952cbdb047d051d
|
/tests/test_corpus.py
|
f045ffdac25a7a65fc7e1b85f1b460d32a078e9a
|
[
"Apache-2.0"
] |
permissive
|
mdlynch37/textacy
|
03e3287fd8ee8bd4d06e48b7b87edf8324a987e5
|
c1c7376a84a62faeee496e9b8cc2a29edc28c7d1
|
refs/heads/master
| 2021-01-20T09:29:54.627035
| 2017-12-04T05:31:14
| 2017-12-04T05:31:14
| 101,596,726
| 0
| 0
| null | 2017-08-28T02:36:30
| 2017-08-28T02:36:30
| null |
UTF-8
|
Python
| false
| false
| 3,565
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
import unittest
from textacy import Corpus
from textacy import Doc
from textacy import cache
from textacy import compat
from textacy import fileio
from textacy.datasets.capitol_words import CapitolWords
DATASET = CapitolWords()
@unittest.skipUnless(
DATASET.filename, 'CapitolWords dataset must be downloaded before running tests')
class CorpusInitTestCase(unittest.TestCase):
def test_corpus_init_lang(self):
self.assertIsInstance(Corpus('en'), Corpus)
self.assertIsInstance(Corpus(cache.load_spacy('en')), Corpus)
for bad_lang in (b'en', None):
with self.assertRaises(TypeError):
Corpus(bad_lang)
def test_corpus_init_texts(self):
limit = 3
corpus = Corpus('en', texts=DATASET.texts(limit=limit))
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
def test_corpus_init_texts_and_metadatas(self):
limit = 3
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=limit), 'text')
texts = list(texts)
metadatas = list(metadatas)
corpus = Corpus('en', texts=texts, metadatas=metadatas)
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
for i in range(limit):
self.assertEqual(texts[i], corpus[i].text)
self.assertEqual(metadatas[i], corpus[i].metadata)
def test_corpus_init_docs(self):
limit = 3
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=limit), 'text')
docs = [Doc(text, lang='en', metadata=metadata)
for text, metadata in zip(texts, metadatas)]
corpus = Corpus('en', docs=docs)
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
for i in range(limit):
self.assertEqual(corpus[i].metadata, docs[i].metadata)
corpus = Corpus(
'en', docs=docs, metadatas=({'foo': 'bar'} for _ in range(limit)))
for i in range(limit):
self.assertEqual(corpus[i].metadata, {'foo': 'bar'})
class CorpusMethodsTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(
prefix='test_corpus', dir=os.path.dirname(os.path.abspath(__file__)))
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=3), 'text')
self.corpus = Corpus('en', texts=texts, metadatas=metadatas)
def test_corpus_save_and_load(self):
filepath = os.path.join(self.tempdir, 'test_corpus_save_and_load.pkl')
self.corpus.save(filepath)
new_corpus = Corpus.load(filepath)
self.assertIsInstance(new_corpus, Corpus)
self.assertEqual(len(new_corpus), len(self.corpus))
self.assertEqual(new_corpus.lang, self.corpus.lang)
self.assertEqual(
new_corpus.spacy_lang.pipe_names,
self.corpus.spacy_lang.pipe_names)
self.assertIsNone(
new_corpus[0].spacy_doc.user_data['textacy'].get('spacy_lang_meta'))
for i in range(len(new_corpus)):
self.assertEqual(new_corpus[i].metadata, self.corpus[i].metadata)
def tearDown(self):
shutil.rmtree(self.tempdir)
|
[
"burton@chartbeat.com"
] |
burton@chartbeat.com
|
3439f18b0ee4568641def717417ce8bf67b35fa8
|
0141361f7c4d276f471ac278580479fa15bc4296
|
/Greedy/videoStitching.py
|
35e26476c753b3396c97ac391d7f749443758fc4
|
[] |
no_license
|
tr1503/LeetCode
|
a7f2f1801c9424aa96d3cde497290ac1f7992f58
|
6d361cad2821248350f1d8432fdfef86895ca281
|
refs/heads/master
| 2021-06-24T19:03:08.681432
| 2020-10-09T23:53:22
| 2020-10-09T23:53:22
| 146,689,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
class Solution:
def videoStitching(self, clips: List[List[int]], T: int) -> int:
end = -1
end2 = 0
res = 0
for i, j in sorted(clips):
if end2 >= T or i > end2:
break
elif end < i and i <= end2:
res += 1
end = end2
end2 = max(end2, j)
return res if end2 >= T else -1
|
[
"noreply@github.com"
] |
tr1503.noreply@github.com
|
5eb1b4c72a607c8c34436d80e1ae1bdf17b45c32
|
c55aedc3479a4d311fb406d8133b0e0ceb99d2df
|
/example/new_system_app/1_BFS_aug.py
|
c36ba8e8a01e7f60215e6fb316506cd4a9217873
|
[] |
no_license
|
tarwcz111111111/DashCam_python
|
4a33cdb3e5a8368b81ddc7c0596d4f0802b7c9d6
|
6e025ff49261c146205eb56bbbf4175f1d413f54
|
refs/heads/master
| 2020-08-25T04:55:16.695561
| 2017-08-28T04:34:59
| 2017-08-28T04:34:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
#!/usr/bin/python2
# ==============================================================
# Step.0
# Fetch the Google data
# ==============================================================
import sys
sys.path.append('/home/andy/Documents/gitHub/DashCam_python/module') # use the module under 'module'
import file_process
import google_store
# Create PanoFetcher
zoom, radius = 1, 30
panoFetcher = google_store.PanoFetcher(zoom, radius)
# Create dashCamFileProcess and load 50 top Dashcam
dashCamFileProcess = file_process.DashCamFileProcessor()
# Select one of the fileName among the 50 selected files
for i in range(3, 4):
index = i
fileID = str(dashCamFileProcess.list50[index][1])
print(fileID, index)
"""
# 1. use info_3d pathPoint
"""
pathPoint_set_info3d = dashCamFileProcess.get_path_info3d(file_id=fileID)
#print(pathPoint_set_info3d)
#panoFetcher.info_3d(fileID, pathPoint_set_info3d)
"""
# 2. use BFS
# Here use the first point in info_3d
"""
lat, lon = None, None
for pathPoint in pathPoint_set_info3d:
print(pathPoint)
[lat, lon] = pathPoint.split(',')
break
panoFetcher.bfs_aug(fileID, (lat, lon), 15)
|
[
"ydnaandy123@gmail.com"
] |
ydnaandy123@gmail.com
|
d6ed4bf04f30d4d1b1b82eeb248dc600ede27fd9
|
eae6dddca9285702c4c7ed6ba6bdaceef9631df2
|
/CCC-2018/Junior/Junior-1/J1.py
|
475041d11174a226860f6028287db67757ce1cb8
|
[] |
no_license
|
simrit1/CCC-Solutions-2
|
7823ce14801c4219f6f1dd4c42fb013c2dfc45dd
|
ee2883aa38f933e526ce187d50ca68763876cb58
|
refs/heads/master
| 2023-07-04T02:19:37.320261
| 2021-08-07T22:12:36
| 2021-08-07T22:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# CCC 2018 Junior 1: Telemarketer or not?
#
# Author: Charles Chen
#
# Simple if statement
digit1 = int(input())
digit2 = int(input())
digit3 = int(input())
digit4 = int(input())
if (digit1 == 8 or digit1 == 9) and (digit2 == digit3) and (digit4 == 8 or digit4 == 9):
print("ignore")
else:
print("answer")
|
[
"noreply@github.com"
] |
simrit1.noreply@github.com
|
4c62fa5221d7e7acc4a6d1fd239915dd62942b79
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/landing/migrations/0079_auto_20200215_2305.py
|
d85a68046299dd8b76f851e1157494274efd585e
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661
| 2023-07-11T19:44:26
| 2023-07-11T19:44:26
| 102,959,477
| 2
| 1
|
Apache-2.0
| 2023-02-08T01:03:17
| 2017-09-09T14:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 522
|
py
|
# Generated by Django 2.2.10 on 2020-02-15 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0078_auto_20200215_1909'),
]
operations = [
migrations.AlterField(
model_name='section',
name='template',
field=models.CharField(choices=[('standard_section.html', 'standard section'), ('cards_section.html', 'cards section')], max_length=50, verbose_name='standard template'),
),
]
|
[
"flavius476@gmail.com"
] |
flavius476@gmail.com
|
53bfd78a3bc2f711a4860ef3c45e69b66823e37c
|
b57b0a14df5c6841f04cccb7b02ad04afbca18f8
|
/avi_vantage/datadog_checks/avi_vantage/config_models/instance.py
|
017f57d0d5fea29b61a601bec35b7ad9e169c9c1
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
zeroc0d3/integrations-core
|
d9c99803c049668b7f9f9c796d338e343d3d46ee
|
634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb
|
refs/heads/master
| 2021-09-28T18:37:00.650406
| 2021-09-13T11:59:45
| 2021-09-13T11:59:45
| 199,758,958
| 0
| 0
|
BSD-3-Clause
| 2019-07-31T02:01:25
| 2019-07-31T02:01:24
| null |
UTF-8
|
Python
| false
| false
| 4,652
|
py
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import annotations
from typing import Any, Literal, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, Extra, Field, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
class Config:
allow_mutation = False
reader: Optional[Mapping[str, Any]]
writer: Optional[Mapping[str, Any]]
class ExtraMetric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Metric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Proxy(BaseModel):
class Config:
allow_mutation = False
http: Optional[str]
https: Optional[str]
no_proxy: Optional[Sequence[str]]
class ShareLabel(BaseModel):
class Config:
allow_mutation = False
labels: Optional[Sequence[str]]
match: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
auth_token: Optional[AuthToken]
auth_type: Optional[str]
avi_controller_url: str
aws_host: Optional[str]
aws_region: Optional[str]
aws_service: Optional[str]
cache_metric_wildcards: Optional[bool]
cache_shared_labels: Optional[bool]
collect_counters_with_distributions: Optional[bool]
collect_histogram_buckets: Optional[bool]
connect_timeout: Optional[float]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
enable_health_service_check: Optional[bool]
entities: Optional[Sequence[Literal['controller', 'pool', 'serviceengine', 'virtualservice']]]
exclude_labels: Optional[Sequence[str]]
exclude_metrics: Optional[Sequence[str]]
exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]]
extra_headers: Optional[Mapping[str, Any]]
extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]]
headers: Optional[Mapping[str, Any]]
histogram_buckets_as_distributions: Optional[bool]
hostname_format: Optional[str]
hostname_label: Optional[str]
ignore_tags: Optional[Sequence[str]]
kerberos_auth: Optional[str]
kerberos_cache: Optional[str]
kerberos_delegate: Optional[bool]
kerberos_force_initiate: Optional[bool]
kerberos_hostname: Optional[str]
kerberos_keytab: Optional[str]
kerberos_principal: Optional[str]
log_requests: Optional[bool]
metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]]
min_collection_interval: Optional[float]
namespace: Optional[str] = Field(None, regex='\\w+')
non_cumulative_histogram_buckets: Optional[bool]
ntlm_domain: Optional[str]
openmetrics_endpoint: Optional[str]
password: Optional[str]
persist_connections: Optional[bool]
proxy: Optional[Proxy]
raw_line_filters: Optional[Sequence[str]]
raw_metric_prefix: Optional[str]
read_timeout: Optional[float]
rename_labels: Optional[Mapping[str, Any]]
request_size: Optional[float]
service: Optional[str]
share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]]
skip_proxy: Optional[bool]
tags: Optional[Sequence[str]]
telemetry: Optional[bool]
timeout: Optional[float]
tls_ca_cert: Optional[str]
tls_cert: Optional[str]
tls_ignore_warning: Optional[bool]
tls_private_key: Optional[str]
tls_use_host_header: Optional[bool]
tls_verify: Optional[bool]
use_latest_spec: Optional[bool]
use_legacy_auth_encoding: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
|
[
"noreply@github.com"
] |
zeroc0d3.noreply@github.com
|
9c3fde3ac2b31c11bd87657492d509c219cfb13d
|
601f604ea3eed7d106513a9d40b7df98a0cf8c95
|
/make-report.py
|
12318d9d79fba8c77d47cdde77c40b94df84d43f
|
[] |
no_license
|
exbracer/dynet-benchmark
|
53e892d55f26f075dfdaf43859d5c1af49c15a75
|
9cbc2d66ca20b7b8040a55a66d2024edd5bf5c42
|
refs/heads/master
| 2021-09-06T11:40:06.802647
| 2018-02-06T05:47:00
| 2018-02-06T05:47:00
| 120,275,569
| 0
| 0
| null | 2018-02-05T08:24:03
| 2018-02-05T08:24:03
| null |
UTF-8
|
Python
| false
| false
| 8,103
|
py
|
#!/usr/bin/env python
# This should be used as
# mkdir -p report
# grep '\(per_sec\|startup\)' log/*/*.log | python make-report.py
import sys
import re
from collections import defaultdict
stats = defaultdict(lambda: {})
allstats = defaultdict(lambda: [])
##### Regexes
fnameregex = re.compile(r"log/([a-z-]+?)(-gpu|)/(dynet-py|dynet-cpp|dynet-seq|chainer|theano|tensorflow)-(.*?)-t([123]).log:(.*)")
startregex = re.compile(r"startup time: (.*)")
eqregex = re.compile(r"(.*)=(.*)")
commentregex = re.compile(r"^ *((#|//).*)?")
##### Various data
canonicalize = {
"word_per_sec": "speed",
"words_per_sec": "speed",
"sent_per_sec": "speed",
"nll": "accuracy",
"tag_acc": "accuracy",
"acc": "accuracy",
"time": "time"
}
taskna = {
("tensorflow", "bilstm-tagger-withchar"): 1,
("tensorflow", "treenn"): 1,
("theano", "treenn"): 1,
("dynet-seq", "bilstm-tagger"): 1,
("dynet-seq", "bilstm-tagger-withchar"): 1,
("dynet-seq", "treenn"): 1,
}
toolkits = ["dynet-cpp", "dynet-py", "chainer", "dynet-seq", "theano", "tensorflow"]
prettyname = {
"dynet-cpp": "DyC++",
"dynet-py": "DyPy",
"dynet-seq": "DyC++ Seq",
"tensorflow":"TF",
"chainer": "Chainer",
"theano": "Theano"
}
##### Load from log files
for line in sys.stdin:
line = line.replace("rnnlm-seq/dynet-cpp", "rnnlm-batch/dynet-seq")
line = line.replace("rnnlm-seq-gpu/dynet-cpp", "rnnlm-batch-gpu/dynet-seq")
m = re.search(fnameregex, line.strip())
if m:
task = m.group(1)
device = "gpu" if m.group(2) == "-gpu" else "cpu"
toolkit = m.group(3)
params = m.group(4)
trial = int(m.group(5))
idtup = (task, device, toolkit, params, trial)
data = m.group(6)
m = re.search(startregex, data)
if m:
stats[idtup]["startup"] = float(m.group(1))
else:
mystats = {}
for val in data.split(", "):
m = re.search(eqregex, val)
if not m:
print("unmatched line: %s" % line)
sys.exit(1)
if m.group(1) in canonicalize:
can = canonicalize[m.group(1)]
val = float(m.group(2))
mystats[can] = val
if can == "accuracy":
if "rnnlm" not in task: val *= 100
else: val *= -1
stats[idtup][can] = max(val, stats[idtup].get(can,-1e10))
else:
stats[idtup][can] = val
allstats[idtup].append(mystats)
else:
print("unmatched line: %s" % line)
sys.exit(1)
# print(stats)
# def format_num(num):
# if num > 1e6:
# return "%.03gM" % (float(num)/1e6)
# elif num > 1e3:
# return "%.03gk" % (float(num)/1e3)
# else:
# return "%.03g" % float(num)
# TODO: There must be a better way to do this...
def format_num(num):
fnum = float(num)
val = "%.03g" % fnum
if fnum >= 1 and fnum < 10:
val = "%.2f" % fnum
elif fnum >= 10 and fnum < 100:
val = "%.1f" % fnum
elif float(num) > 1000:
val = "%.f" % float(val)
return val
def getmaxstat(task, device, toolkit, setting, stat, mult=1):
my_stats = []
for trial in range(1,4):
my_id = (task, device, toolkit, setting, trial)
if my_id in stats and stat in stats[my_id]:
my_stats.append(mult*stats[my_id][stat])
return format_num(mult*max(my_stats)) if len(my_stats) > 0 else "TODO"
def getminstat(task, device, toolkit, setting, stat):
return getmaxstat(task, device ,toolkit, setting, stat, mult=-1)
###### First section: toolkit comparison
# CPU/GPU speeds for all toolkits/tasks
tasks = [
("RNNLM (MB=1) ", "rnnlm-batch", "ms01-es128-hs256-sp0"),
("RNNLM (MB=4)", "rnnlm-batch", "ms04-es128-hs256-sp0"),
("RNNLM (MB=16)", "rnnlm-batch", "ms16-es128-hs256-sp0"),
("RNNLM (MB=64)", "rnnlm-batch", "ms64-es128-hs256-sp0"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su0"),
("BiLSTM Tag +sparse", "bilstm-tagger", "ws128-hs50-mlps32-su1"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su0"),
("BiLSTM Tag+Char +sparse", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su1"),
("TreeLSTM", "treenn", "ws128-hs128-su0"),
("TreeLSTM +sparse", "treenn", "ws128-hs128-su1"),
]
def make_speed_table(device):
print("\\begin{table}")
print("\\begin{tabular}{c|rrr|rrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task, setting in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
if (toolkit, task) in taskna:
cols.append("\\multicolumn{1}{c}{-}")
else:
cols.append(getmaxstat(task, device, toolkit, setting, "speed"))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Processing speed for each toolkit on %s. Speeds are measured in words/sec for RNNLM and Tagger and sentences/sec for TreeLSTM.}" % device.upper())
print("\\label{tab:speeds%s}" % device)
print("\\end{table}")
print("")
make_speed_table("cpu")
make_speed_table("gpu")
# Startup time table
tasks = [
("RNNLM", "rnnlm-batch", "ms01-es128-hs256-sp0"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su0"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su0"),
("TreeLSTM", "treenn", "ws128-hs128-su0"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rrr|rrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task, setting in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
if (toolkit, task) in taskna:
cols.append("\\multicolumn{1}{c}{-}")
else:
cols.append(getminstat(task, device, toolkit, setting, "startup"))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Startup time for programs written in each toolkit.}")
print("\\label{tab:startup}")
print("\\end{table}")
print("")
# Code complexities
def get_code_complexity(toolkit, task):
chars = 0
if toolkit == "dynet-seq":
if not task == "rnnlm-batch":
return "\\multicolumn{1}{c}{-}"
toolkit = "dynet-cpp"
task = "rnnlm-seq"
if (toolkit, task) in taskna:
return "\\multicolumn{1}{c}{-}"
with open("%s/%s.%s" % (toolkit, task, "cc" if toolkit == "dynet-cpp" else "py"), "r") as f:
for line in f:
line = re.sub(commentregex, "", line.strip())
chars += len(line)
return str(chars)
tasks = [
("RNNLM", "rnnlm-batch"),
("BiLSTM Tag", "bilstm-tagger"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar"),
("TreeLSTM", "treenn"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rrrrrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
cols.append(get_code_complexity(toolkit, task))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Number of non-comment characters in the implementation of each toolkit.}")
print("\\label{tab:complexity}")
print("\\end{table}")
print("")
###### Second section: effect of minibatching and net size
###### Third section: effect of sparse update
tasks = [
("RNNLM (MB=1) ", "rnnlm-batch", "ms01-es128-hs256-sp"),
("RNNLM (MB=16)", "rnnlm-batch", "ms16-es128-hs256-sp"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su"),
("TreeLSTM", "treenn", "ws128-hs128-su"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rr|rr|rr|rr}")
print(" & \\multicolumn{4}{c|}{Speed} & \\multicolumn{4}{c}{Accuracy} \\\\")
print(" & \\multicolumn{2}{c|}{Dense} & \\multicolumn{2}{c|}{Sparse} & \\multicolumn{2}{c|}{Dense} & \\multicolumn{2}{c}{Sparse} \\\\")
print(" & "+" & ".join(["CPU & GPU"] * 4)+" \\\\ \\hline")
for name, task, setting in tasks:
cols = [name]
for criterion in ("speed", "accuracy"):
for ds in ("0", "1"):
for device in ("cpu", "gpu"):
cols.append(getmaxstat(task, device, "dynet-cpp", setting+ds, criterion))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Processing speed and accuracy after 10 minutes with dense or sparse updates.}")
print("\\label{tab:sparseresults}")
print("\\end{table}")
print("")
|
[
"neubig@gmail.com"
] |
neubig@gmail.com
|
468730321e33a424ae9e609b460c46656deba515
|
63ae3faa596333fa89ed6059332ed956b6dd3ae1
|
/career/models/base.py
|
518e8700dd9167d358c289a74f2d9d65c6953a78
|
[] |
no_license
|
wyzane/tornado-skill-general
|
28470d786dc949a2616143b0b35e593d90f2fdc4
|
915ec990a20b2bb76d56c040cade57a7627d0ebe
|
refs/heads/master
| 2020-06-13T13:43:02.401369
| 2019-07-28T08:20:17
| 2019-07-28T08:20:17
| 194,675,732
| 0
| 0
| null | 2019-07-28T08:20:18
| 2019-07-01T13:14:11
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
# from sqlalchemy import Table, MetaData
# from sqlalchemy.ext.declarative import declarative_base
#
# from config.db import engine_db_career
#
#
# Base = declarative_base()
#
#
# def model(table):
# class BaseModel(Base):
# __tablename__ = table
# metadata = MetaData(engine_db_career)
#
# # 映射数据库中同名的表
# Table(__tablename__, metadata, autoload=True)
#
# return BaseModel
|
[
"wyzane1207@163.com"
] |
wyzane1207@163.com
|
df195ccf60e3003e51d78a0956fd1691ae0fb0b1
|
6a6984544a4782e131510a81ed32cc0c545ab89c
|
/src/icetray/python/i3logging.py
|
efb475dd0d2c1a00a4a5b5832c425bca34254a58
|
[] |
no_license
|
wardVD/IceSimV05
|
f342c035c900c0555fb301a501059c37057b5269
|
6ade23a2fd990694df4e81bed91f8d1fa1287d1f
|
refs/heads/master
| 2020-11-27T21:41:05.707538
| 2016-09-02T09:45:50
| 2016-09-02T09:45:50
| 67,210,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,068
|
py
|
import logging, string, traceback
from icecube.icetray import I3Logger, I3LogLevel
class LoggingBridge(I3Logger):
pylevels = {
I3LogLevel.LOG_TRACE : 5,
I3LogLevel.LOG_DEBUG : logging.DEBUG,
I3LogLevel.LOG_INFO : logging.INFO,
I3LogLevel.LOG_NOTICE : 25,
I3LogLevel.LOG_WARN : logging.WARNING,
I3LogLevel.LOG_ERROR : logging.ERROR,
I3LogLevel.LOG_FATAL : logging.CRITICAL,
}
i3levels = dict([(v, k) for k, v in pylevels.items()])
def __init__(self):
I3Logger.__init__(self)
self.getLogger("").setLevel(logging.INFO)
def getLogger(self, unit):
if len(unit) > 0:
name = "icecube.%s" % unit
else:
name = "icecube"
return logging.getLogger(name)
def log(self, level, unit, file, line, func, msg):
logger = self.getLogger(unit)
if logger.isEnabledFor(self.pylevels[level]):
record = logging.LogRecord(logger.name, self.pylevels[level], file, line, msg, tuple(), None, None)
logger.handle(record)
def get_level_for_unit(self, unit):
return self.i3levels.get(self.getLogger(unit).getEffectiveLevel(), I3LogLevel.LOG_FATAL)
def set_level_for_unit(self, unit, level):
self.getLogger(unit).setLevel(self.pylevels[level])
def set_level(self, level):
self.getLogger("").setLevel(self.pylevels[level])
class ColorFormatter(logging.Formatter):
def format(self, record):
record.message = record.getMessage()
if string.find(self._fmt,"%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
d = dict(record.__dict__)
if record.levelname in ("CRITICAL", "ERROR"):
d['levelname'] = "\x1b[1;31m %s \x1b[0m" % d['levelname']
d['filename'] = "\x1b[1m%s\x1b[0m" % d['filename']
s = self._fmt % d
return "\x1b[1m%s\x1b[0m" % s
BASIC_FORMAT = "%(filename)s:%(lineno)s %(levelname)s: %(message)s"
def _setup(format=BASIC_FORMAT):
logging.addLevelName(5, 'TRACE')
logging.basicConfig(format=format)
I3Logger.global_logger = LoggingBridge()
def console(colors=True):
import sys
from os import isatty
_setup()
if colors and isatty(sys.stderr.fileno()):
logging.root.handlers[0].setFormatter(ColorFormatter(BASIC_FORMAT))
else:
logging.root.handlers[0].setFormatter(logging.Formatter(BASIC_FORMAT))
def rotating_files(filename, maxBytes=0, backupCount=0):
from logging.handlers import RotatingFileHandler
_setup()
handler = RotatingFileHandler(filename, maxBytes=maxBytes, backupCount=backupCount)
handler.setFormatter(logging.Formatter("[%(asctime)s] "+BASIC_FORMAT))
logging._acquireLock()
logging.root.handlers = list()
logging.root.addHandler(handler)
logging._releaseLock()
def syslog():
from logging.handlers import SysLogHandler
_setup()
handler = SysLogHandler()
handler.setFormatter(logging.Formatter("[%(asctime)s] "+BASIC_FORMAT))
logging._acquireLock()
logging.root.handlers = list()
logging.root.addHandler(handler)
logging._releaseLock()
def _translate_level(name):
if isinstance(name, I3LogLevel):
return name
elif hasattr(I3LogLevel, 'LOG_'+name.upper()):
return getattr(I3LogLevel, 'LOG_'+name.upper())
else:
raise ValueError("Unknown logging level '%s'" % name)
def set_level(level):
"""
Set the global logging level.
:param level: the log level. This may also be specified as a string.
Examples::
icetray.logging.set_level(icetray.logging.I3LogLevel.LOG_INFO)
icetray.logging.set_level('INFO')
"""
I3Logger.global_logger.set_level(_translate_level(level))
def set_level_for_unit(unit, level):
"""
Set the logging level for a specific logging unit.
:param level: the log level. This may also be specified as a string.
Examples::
icetray.logging.set_level_for_unit('I3Reader', icetray.logging.I3LogLevel.LOG_TRACE)
icetray.logging.set_level('I3Reader', 'TRACE')
"""
I3Logger.global_logger.set_level_for_unit(unit, _translate_level(level))
def log_trace(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_TRACE, unit, tb[0], tb[1],
tb[2], message)
def log_debug(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_DEBUG, unit, tb[0], tb[1],
tb[2], message)
def log_info(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_INFO, unit, tb[0], tb[1],
tb[2], message)
def log_notice(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_NOTICE, unit, tb[0], tb[1],
tb[2], message)
def log_warn(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_WARN, unit, tb[0], tb[1],
tb[2], message)
def log_error(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_ERROR, unit, tb[0], tb[1],
tb[2], message)
def log_fatal(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_FATAL, unit, tb[0], tb[1],
tb[2], message)
raise RuntimeError(message + " (in " + tb[2] + ")")
|
[
"wardvandriessche@gmail.com"
] |
wardvandriessche@gmail.com
|
1180150b5396a4f4f5f32bcfded61140220148f6
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/depot_tools/recipe_modules/tryserver/example.py
|
ee0d0c9b7ba47b44b49510cebbd1e8c402c7da6f
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576
| 2019-10-17T02:23:04
| 2019-10-17T02:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'tryserver',
]
def RunSteps(api):
api.path['checkout'] = api.path['slave_build']
api.tryserver.maybe_apply_issue()
api.tryserver.get_files_affected_by_patch()
if api.tryserver.is_tryserver:
api.tryserver.set_subproject_tag('v8')
api.tryserver.set_patch_failure_tryjob_result()
api.tryserver.set_compile_failure_tryjob_result()
api.tryserver.set_test_failure_tryjob_result()
api.tryserver.set_invalid_test_results_tryjob_result()
with api.tryserver.set_failure_hash():
api.python.failing_step('fail', 'foo')
def GenTests(api):
yield (api.test('with_svn_patch') +
api.properties(patch_url='svn://checkout.url'))
yield (api.test('with_git_patch') +
api.properties(
patch_storage='git',
patch_project='v8',
patch_repo_url='http://patch.url/',
patch_ref='johndoe#123.diff'))
yield (api.test('with_rietveld_patch') +
api.properties.tryserver())
yield (api.test('with_wrong_patch') + api.platform('win', 32))
|
[
"2100639007@qq.com"
] |
2100639007@qq.com
|
4b949d091e01e60d0542cd3a3d47c56894232e58
|
7ac271f357f4c8f0c23c697b11966259f836880f
|
/app/web/exception.py
|
96c30998834aeb4bf7f41d79bdd54f9610cf7fca
|
[] |
no_license
|
cheng93/PythonWeb
|
74a58eadee4ee7d2872a582a907bbf47630df371
|
d5ced8dee1d5ba31778125c5e67169c92acf26a0
|
refs/heads/develop
| 2021-01-19T23:59:11.315871
| 2018-03-04T19:26:18
| 2018-03-04T19:26:18
| 89,063,916
| 0
| 0
| null | 2018-03-04T19:26:19
| 2017-04-22T11:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
from pyramid.view import view_config
@view_config(route_name='throw_exception')
def throw_exception(request):
raise Exception()
|
[
"derek.c@hotmail.co.uk"
] |
derek.c@hotmail.co.uk
|
56860c88d4bc8dd9e78a1b54940265c25bb42bfa
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/FreeOpcUa_python-opcua/python-opcua-master/release.py
|
fc05a90bf5b60295b7aab2321c144549eb9401a9
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
import re
import os
def bump_version():
with open("setup.py") as f:
s = f.read()
m = re.search(r'version="(.*)\.(.*)\.(.*)",', s)
v1, v2, v3 = m.groups()
oldv = "{0}.{1}.{2}".format(v1, v2, v3)
newv = "{0}.{1}.{2}".format(v1, v2, str(int(v3) + 1))
print("Current version is: {0}, write new version, ctrl-c to exit".format(oldv))
ans = input(newv)
if ans:
newv = ans
s = s.replace(oldv, newv)
with open("setup.py", "w") as f:
f.write(s)
return newv
def release():
v = bump_version()
ans = input("version bumped, commiting?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git add setup.py")
os.system("git commit -m 'new release'")
os.system("git tag {0}".format(v))
ans = input("change committed, push to server?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git push")
os.system("git push --tags")
ans = input("upload to pip?(Y/n)")
if ans in ("", "y", "yes"):
os.system("python setup.py sdist upload")
if __name__ == "__main__":
release()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
4c979a9566f34d6b5958baa4ca8d9883abd3afa6
|
845d96ba5efe898a1e1272c862f476e983e54d46
|
/10/src/gomoku.py
|
6ee645b50621b32a80852855980e5a4f804a1274
|
[] |
no_license
|
ppaanngggg/rl_course
|
fe5d092d87e61149857f9ee085ac73d98d377cb9
|
05f1db88e96ea9ff21ed5028dc8d5b7e9539bcee
|
refs/heads/master
| 2020-04-30T02:17:05.125542
| 2019-03-19T16:28:13
| 2019-03-19T16:28:13
| 176,554,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
import torch
class Gomoku:
"""
play1: 1,
play2: -1,
empty: 0,
"""
def __init__(
self, _size=5, _win_num=4, _player=1, _terminal=None, _history=None, _board=None
):
self.size = _size
self.win_num = _win_num
self.num_actions = _size * _size
self.player = _player # cur player, 1 or -1
self.terminal = _terminal # 1, -1 or 0 for draw, None for unknown
self.history = [] if _history is None else _history
self.board = torch.zeros(self.size, self.size) if _board is None else _board
def clone(self):
return Gomoku(
self.size,
self.win_num,
self.player,
self.terminal,
self.history[:],
self.board.clone(),
)
def get_input(self):
ac_1 = torch.zeros(self.size, self.size)
try:
x, y = self._action2loc(self.history[-1])
ac_1[x, y] = 1
except IndexError:
pass
ac_2 = torch.zeros(self.size, self.size)
try:
x, y = self._action2loc(self.history[-2])
ac_2[x, y] = 1
except IndexError:
pass
return torch.stack([self.board * self.player, ac_1, ac_2])
def get_mask(self):
return (self.board == 0).flatten()
def _check_horizon(self, _value, _x, _y):
left = 0
y_left = _y - 1
while y_left >= 0 and self.board[_x, y_left] == _value:
left += 1
y_left -= 1
right = 0
y_right = _y + 1
while y_right < self.size and self.board[_x, y_right] == _value:
right += 1
y_right += 1
if left + right + 1 >= self.win_num: # horizon win
self.terminal = _value
return self.terminal
def _check_vertical(self, _value, _x, _y):
up = 0
x_up = _x - 1
while x_up >= 0 and self.board[x_up, _y] == _value:
up += 1
x_up -= 1
down = 0
x_down = _x + 1
while x_down < self.size and self.board[x_down, _y] == _value:
down += 1
x_down += 1
if up + down + 1 >= self.win_num: # vertical win
self.terminal = _value
return self.terminal
def _check_inv_slash(self, _value, _x, _y):
up_left = 0
x_up = _x - 1
y_left = _y - 1
while x_up >= 0 and y_left >= 0 and self.board[x_up, y_left] == _value:
up_left += 1
x_up -= 1
y_left -= 1
down_right = 0
x_down = _x + 1
y_right = _y + 1
while (
x_down < self.size
and y_right < self.size
and self.board[x_down, y_right] == _value
):
down_right += 1
x_down += 1
y_right += 1
if up_left + down_right + 1 >= self.win_num: # inv slash win
self.terminal = _value
return self.terminal
def _check_slash(self, _value, _x, _y):
up_right = 0
x_up = _x - 1
y_right = _y + 1
while x_up >= 0 and y_right < self.size and self.board[x_up, y_right] == _value:
up_right += 1
x_up -= 1
y_right += 1
down_left = 0
x_down = _x + 1
y_left = _y - 1
while (
x_down < self.size and y_left >= 0 and self.board[x_down, y_left] == _value
):
down_left += 1
x_down += 1
y_left -= 1
if up_right + down_left + 1 >= self.win_num: # slash win
self.terminal = _value
return self.terminal
def _action2loc(self, _action: int):
# get loc
return _action // self.size, _action % self.size
def action(self, _action: int):
x, y = self._action2loc(_action)
# update board and player
assert self.board[x, y] == 0 and self.terminal is None
value = self.player
self.board[x, y] = value
self.history.append(_action)
self.player *= -1
# check terminal
if self._check_horizon(value, x, y) is not None:
return self.terminal
if self._check_vertical(value, x, y) is not None:
return self.terminal
if self._check_inv_slash(value, x, y) is not None:
return self.terminal
if self._check_slash(value, x, y) is not None:
return self.terminal
# check draw
if len(self.history) == self.size * self.size:
self.terminal = 0
return self.terminal
def __repr__(self):
return (
f"Cur Player: {self.player}\n"
f"History: {self.history}\n"
f"Terminal: {self.terminal}\n"
f"Board:\n"
f"{self.board}"
)
def human_self(self):
print(self)
while self.terminal is None:
x = input("Please input x: ")
y = input("Please input y: ")
action = int(x) * self.size + int(y)
self.action(action)
print(self)
print(f"!!Result!!: {self.terminal}")
if __name__ == "__main__":
gomoku = Gomoku()
gomoku.human_self()
|
[
"hantian.pang@gmail.com"
] |
hantian.pang@gmail.com
|
ac7915d8eed8d2af3351c944f396a9b36e5c868f
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/exercises/_algorithms_challenges/pybites/beginner/55_v4/steam.py
|
ce898477f47f532beed65efb932bddd9d2a0b3e6
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 316
|
py
|
# ____ c.. _______ n..
#
# _______ f..
#
# # cached version to have predictable results for testing
# FEED_URL = "http://bit.ly/2IkFe9B"
#
# Game = n..('Game', 'title link')
#
#
# ___ get_games
# """Parses Steam's RSS feed and returns a list of Game namedtuples"""
# r.. ? f.t.. f.l.. ___ ? __ f__.p.. ?.e..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
51069274b991d4ae2de188fbec89a5d455c35b29
|
c5cf46942decbda1c813474edb5a18b9595a8bf1
|
/core/constants/default_values.py
|
a9b45a44b702b7574351ac83c4948842275b9980
|
[
"MIT"
] |
permissive
|
AsiganTheSunk/python3-gnosis-cli
|
360b4da9fd86a642ec1e399bdb6c39d1dbcbb8ce
|
c4c2638aa75b8a8268ad899d6cea1e602227ef19
|
refs/heads/master
| 2023-03-18T17:38:38.594171
| 2019-11-15T00:01:34
| 2019-11-15T00:01:34
| 221,243,913
| 0
| 0
|
MIT
| 2023-03-03T23:35:41
| 2019-11-12T14:59:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 230
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# String Size Of Diferent Type of Addresses
CONTRACT_ADDRESS_LENGTH = 42
TX_ADDRESS_LENGTH = 66
# String Size of API Keys
INFURA_API_KEY_LENGTH = 32
ETHERSCAN_API_KEY_LENGTH = 34
|
[
"asiganthesunk@gmail.com"
] |
asiganthesunk@gmail.com
|
87fbf91d172aff23f865bdbf312d2f0c9889d385
|
344f52fe0c84ec3e6f6b655e7cc2309441506650
|
/SlaverServer/run_slaver.py
|
7586a801df09507a9552f6c7a9ab1c695d3ef21b
|
[
"MIT"
] |
permissive
|
Hanlen520/BigBoss
|
f1c0b1b5a30e45149a1d0eefa914a3060640d5db
|
8a3cfeace1eb2b852de940a6ba6b4a9d6bc814db
|
refs/heads/master
| 2020-04-04T09:49:44.300429
| 2018-09-29T08:21:31
| 2018-09-29T08:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
from tornado.web import Application
from tornado.ioloop import IOLoop
import argparse
from config import *
from router import SLAVER_ROUTER
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, help='set port')
args = parser.parse_args()
port = getattr(args, 'port') or GlobalConf.SLAVER_PORT
application = Application(SLAVER_ROUTER, **GlobalConf.SLAVER_SETTING)
application.listen(port, address='0.0.0.0')
IOLoop.instance().start()
|
[
"178894043@qq.com"
] |
178894043@qq.com
|
764b44f56eb85a9c7ffe5cee4b564152e1b3aa7f
|
85bf9a13bf62c1f074894d134c23dd992ae8688c
|
/javasolutions/p63/Solution.py
|
0a7c244f231e3a5f619d33c503578c2873d98f7e
|
[] |
no_license
|
pololee/oj-leetcode
|
4cca3d309b2c9931d15d3cec4b07b5d9d22733ef
|
78a8b27ee108ba93aa7b659665976112f48fc2c2
|
refs/heads/master
| 2020-06-21T02:15:26.882273
| 2020-02-06T04:56:21
| 2020-02-06T04:56:21
| 197,320,113
| 0
| 0
| null | 2020-02-06T04:56:23
| 2019-07-17T05:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid or len(obstacleGrid) == 0 or len(obstacleGrid[0]) == 0:
return 0
if obstacleGrid[0][0] == 1:
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
DP = [[0 for _ in range(n)]
for _ in range(m)]
DP[0][0] = 1
for i in range(1, m):
DP[i][0] = 0 if obstacleGrid[i][0] == 1 else DP[i-1][0]
for j in range(1, n):
DP[0][j] = 0 if obstacleGrid[0][j] == 1 else DP[0][j-1]
for i in range(1, m):
for j in range(1, n):
if obstacleGrid[i][j] == 1:
DP[i][j] = 0
else:
DP[i][j] = DP[i-1][j] + DP[i][j-1]
return DP[m-1][n-1]
|
[
"pololee1990@gmail.com"
] |
pololee1990@gmail.com
|
27cbb16e31516b3cb28c074ac4404179beda786c
|
deafd775f238b2836f77b9140f4d6e14a3f3c06d
|
/python/ABC/ABC082/082_B.py
|
2d02b85f31176d2942aec83f88cd72ff00a2d545
|
[] |
no_license
|
knakajima3027/Atcoder
|
ab8e2bf912173b7523fddbb11b38abd7e296762e
|
64cb32fcc4b99501f2f84496e5535e1e7b14c467
|
refs/heads/master
| 2021-06-22T03:58:03.777001
| 2020-12-19T11:23:49
| 2020-12-19T11:23:49
| 135,173,223
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
s = input()
t = input()
S = []
T = []
for i in range(len(s)):
S.append(s[i])
for i in range(len(t)):
T.append(t[i])
S.sort()
T.sort()
T.reverse()
if S == T:
print('No')
elif min(S, T) == S:
print('Yes')
else:
print('No')
|
[
"kamihate1012@gmail.com"
] |
kamihate1012@gmail.com
|
29819db9f9c77633b810c3ba2978e48537b4e7a3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_209/544.py
|
22e6c7236e14da498cdcf8d941798513af589657
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
# input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
import math
def get_surface(p):
return math.pi * p[0]**2
def get_side_surface(p):
return 2 * math.pi * p[0] * p[1]
def get_area(p):
if p is None:
return 0
return get_surface(p) + get_side_surface(p)
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
str_number = input().split(" ")
n, k = int(str_number[0]), int(str_number[1])
pancakes = []
for j in range(n):
str_number = input().split(" ")
pancakes.append((int(str_number[0]), int(str_number[1])))
pancakes_hsorted = sorted(pancakes, key=lambda x: get_side_surface(x),reverse=True)
sol = sorted(pancakes_hsorted[:(k-1)], reverse=True)
for p in sol:
pancakes.remove(p)
additional_area = 0
if len(sol) > 0:
for p in pancakes:
if p[0] > sol[0][0]:
additional_area = max(additional_area, get_side_surface(p) + (get_surface(p) - get_surface(sol[0])))
else:
additional_area = max(additional_area, get_side_surface(p))
else:
sol = [sorted(pancakes, key=lambda x: get_area(x), reverse=True)[0]]
area = additional_area
for j in range(len(sol)):
area += get_area(sol[j])
if j+1 < len(sol):
area -= get_surface(sol[j+1])
print("Case #{}: {:.9f}".format(i, area))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.