blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
313d24d9e21f7f163d1bf7b7e6dcd9aa369c6860 | 24aa1d8455a923ce64b6066438742c6d85945c80 | /medi_app/migrations/0002_alter_employeebank_added_on.py | 432e68209ebc2e94f5102be737dc8ab10a0a1a19 | [] | no_license | babor99/react_django_medical_store | f81c5e9f436817b16e2c508a6dd6de3d895d511f | e8ab6d7801b6d1c2be86e78ad0165e6df436688f | refs/heads/main | 2023-07-04T04:33:12.048265 | 2021-08-10T20:32:19 | 2021-08-10T20:32:19 | 394,774,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.2 on 2021-05-06 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medi_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employeebank',
name='added_on',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"islambabor947@gmail.com"
] | islambabor947@gmail.com |
3787d9fc4d0fa05467d40d0804ef8861a48641e2 | 56503bd3bd81541764333f19382488e2bd98cc9c | /a_path.py | b8b48f49c1916a29140ca64ee3d776af2744466e | [] | no_license | idrissabanli/algoritms | ea58ea5f9fcfd9051efc1d216cad950f459d2ea7 | b614750237e16e2de4d9565582ceb67f2c7bc150 | refs/heads/master | 2020-12-27T03:19:59.964091 | 2020-02-02T09:29:49 | 2020-02-02T09:29:49 | 237,747,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,935 | py | import random
# a = [[gedilecek_yol, oldugu_yerden_mesafe, cemi, yerlesme]]
a = [[0,0,0,1], [0,0,0,0], [0,0,0,0], [0,0,0,0]]
a = []
array_count = 10
f_i, f_j = random.randint(0, array_count-1), random.randint(0, array_count-1)
l_i, l_j = random.randint(0, array_count-1), random.randint(0, array_count-1)
print(f_i, f_j)
print(l_i, l_j)
c_i, c_j = f_i, f_j
for i in range(array_count):
a.append([])
for j in range(array_count):
if i == 0 and j ==0:
a[i].append([0,0,0,1])
else:
a[i].append([0,0,0,0])
while (c_i, c_j) != (l_i, l_j):
min_element = 1000
min_element_ci = 0
min_element_cj = 0
if c_j < len(a[c_i])-1 and a[c_i][c_j+1][2] == 0:
a[c_i][c_j+1][0] = 10*(abs(l_i-c_i) + abs(l_j-(c_j+1)))
a[c_i][c_j+1][1] += 10
a[c_i][c_j+1][2] = a[c_i][c_j+1][0] + a[c_i][c_j+1][1]
if min_element > a[c_i][c_j+1][2]:
min_element = a[c_i][c_j+1][2]
min_element_ci = c_i
min_element_cj = c_j+1
if c_j > 0 and a[c_i][c_j-1][2] == 0:
a[c_i][c_j-1][0] = 10*(abs(l_i-c_i) + abs(l_j-(c_j-1)))
a[c_i][c_j-1][1] += 10
a[c_i][c_j-1][2] = a[c_i][c_j-1][1] + a[c_i][c_j-1][0]
if min_element > a[c_i][c_j-1][2]:
min_element = a[c_i][c_j-1][2]
min_element_ci = c_i
min_element_cj = c_j-1
if c_i > 0 and a[c_i-1][c_j][2] == 0:
a[c_i-1][c_j][0] = 10*(abs(l_i-(c_i-1)) + abs(l_j-c_j))
a[c_i-1][c_j][1] += 10
a[c_i-1][c_j][2] = a[c_i-1][c_j][1] + a[c_i-1][c_j][0]
if min_element > a[c_i-1][c_j][2]:
min_element = a[c_i-1][c_j][2]
min_element_ci = c_i-1
min_element_cj = c_j
if c_i < len(a)-1 and a[c_i+1][c_j][2] == 0:
a[c_i+1][c_j][0] = 10*(abs(l_i-(c_i+1)) + abs(l_j-c_j))
a[c_i+1][c_j][1] += 10
a[c_i+1][c_j][2] = a[c_i+1][c_j][1] + a[c_i+1][c_j][0]
if min_element > a[c_i+1][c_j][2]:
min_element = a[c_i+1][c_j][2]
min_element_ci = c_i+1
min_element_cj = c_j
if c_i < len(a)-1 and c_j< len(a[c_i])-1 and a[c_i+1][c_j+1][2] == 0:
a[c_i+1][c_j+1][0] = 10*(abs(l_i-(c_i+1)) + abs(l_j-(c_j+1)))
a[c_i+1][c_j+1][1] += 14
a[c_i+1][c_j+1][2] = a[c_i+1][c_j+1][1] + a[c_i+1][c_j+1][0]
if min_element > a[c_i+1][c_j+1][2]:
min_element = a[c_i+1][c_j+1][2]
min_element_ci = c_i+1
min_element_cj = c_j+1
if c_i > 0 and c_j > 0 and a[c_i-1][c_j-1][2] == 0:
a[c_i-1][c_j-1][0] = 10*(abs(l_i-(c_i-1)) + abs(l_j-(c_j-1)))
a[c_i-1][c_j-1][1] += 14
a[c_i-1][c_j-1][2] = a[c_i-1][c_j-1][1] +a[c_i-1][c_j-1][0]
if min_element > a[c_i-1][c_j-1][2]:
min_element = a[c_i-1][c_j-1][2]
min_element_ci = c_i-1
min_element_cj = c_j-1
if c_i > 0 and c_j < len(a[c_i])-1 and a[c_i-1][c_j+1][2] == 0:
a[c_i-1][c_j+1][0] = 10*(abs(l_i-(c_i-1)) + abs(l_j-c_j+1))
a[c_i-1][c_j+1][1] += 14
a[c_i-1][c_j+1][2] = a[c_i-1][c_j+1][1] + a[c_i-1][c_j+1][0]
if min_element > a[c_i-1][c_j+1][2]:
min_element = a[c_i-1][c_j+1][2]
min_element_ci = c_i-1
min_element_cj = c_j+1
if c_j > 0 and c_i < len(a[c_i])-1 and a[c_i+1][c_j-1][2] == 0:
a[c_i+1][c_j-1][0] = 10*(abs(l_i-(c_i+1)) + abs(l_j-(c_j-1)))
a[c_i+1][c_j-1][1] += 14
a[c_i+1][c_j-1][2] = a[c_i+1][c_j-1][1] + a[c_i+1][c_j-1][0]
if min_element > a[c_i+1][c_j-1][2]:
min_element = a[c_i+1][c_j-1][2]
min_element_ci = c_i+1
min_element_cj = c_j-1
print(min_element, min_element_ci, min_element_cj)
c_i = min_element_ci
c_j = min_element_cj
# for i, el in enumerate (a):
# print()
# # print(i)
# for j in el:
# print(j, end=',')
# input()
| [
"idris@labrin.net"
] | idris@labrin.net |
4681a6c8e7bf85efe88951c9576d5b58a74d27d8 | 87e5a0b52ee4ce63d8713867408cfe86c935cb72 | /src/engineer/kenya_non_crop.py | e2d2556d2493341dae33f57a8db9e9665c3ead7d | [] | no_license | BatmanDao/crop-maml | 593cb04f886658bd577782e0564ac3949c9e6203 | 54296a6f1431a6d729013b5761a296566f03de1b | refs/heads/main | 2023-06-02T18:28:50.423061 | 2021-06-17T18:28:23 | 2021-06-17T18:28:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | from dataclasses import dataclass
import pandas as pd
from pathlib import Path
import geopandas
from datetime import datetime
import json
from typing import Optional
from src.processors import KenyaNonCropProcessor
from src.exporters import KenyaNonCropSentinelExporter
from .base import BaseEngineer, BaseDataInstance
from .pv_kenya import PVKenyaEngineer
@dataclass
class KenyaNonCropDataInstance(BaseDataInstance):
crop_int: Optional[int]
is_crop: bool = False
crop_label: str = "non_crop"
class KenyaNonCropEngineer(BaseEngineer):
sentinel_dataset = KenyaNonCropSentinelExporter.dataset
dataset = KenyaNonCropProcessor.dataset
def __init__(self, data_folder: Path) -> None:
super().__init__(data_folder)
classes_to_index_path = (
data_folder / "features" / PVKenyaEngineer.dataset / "classes_to_index.json"
)
if classes_to_index_path.exists():
with classes_to_index_path.open("r") as f:
c2i = json.load(f)
max_idx = max(c2i.values())
self.non_crop_index: Optional[int] = max_idx + 1
else:
self.non_crop_index = None
print(
f"Using crop_int value: {self.non_crop_index if self.non_crop_index is not None else 'None'}"
)
@staticmethod
def read_labels(data_folder: Path) -> pd.DataFrame:
pv_kenya = data_folder / "processed" / KenyaNonCropProcessor.dataset / "data.geojson"
assert pv_kenya.exists(), "Kenya Non Crop processor must be run to load labels"
return geopandas.read_file(pv_kenya)
def process_single_file(
self,
path_to_file: Path,
nan_fill: float,
max_nan_ratio: float,
add_ndvi: bool,
add_ndwi: bool,
calculate_normalizing_dict: bool,
start_date: datetime,
days_per_timestep: int,
is_test: bool,
) -> Optional[KenyaNonCropDataInstance]:
da = self.load_tif(path_to_file, days_per_timestep=days_per_timestep, start_date=start_date)
# first, we find the label encompassed within the da
min_lon, min_lat = float(da.x.min()), float(da.y.min())
max_lon, max_lat = float(da.x.max()), float(da.y.max())
overlap = self.labels[
(
(self.labels.lon <= max_lon)
& (self.labels.lon >= min_lon)
& (self.labels.lat <= max_lat)
& (self.labels.lat >= min_lat)
)
]
if len(overlap) == 0:
return None
else:
label_lat = overlap.iloc[0].lat
label_lon = overlap.iloc[0].lon
closest_lon = self.find_nearest(da.x, label_lon)
closest_lat = self.find_nearest(da.y, label_lat)
labelled_np = da.sel(x=closest_lon).sel(y=closest_lat).values
if add_ndvi:
labelled_np = self.calculate_ndvi(labelled_np)
if add_ndwi:
labelled_np = self.calculate_ndwi(labelled_np)
labelled_array = self.maxed_nan_to_num(
labelled_np, nan=nan_fill, max_ratio=max_nan_ratio
)
if (not is_test) and calculate_normalizing_dict:
self.update_normalizing_values(self.normalizing_dict_interim, labelled_array)
if labelled_array is not None:
return KenyaNonCropDataInstance(
label_lat=label_lat,
label_lon=label_lon,
instance_lat=closest_lat,
instance_lon=closest_lon,
labelled_array=labelled_array,
crop_int=self.non_crop_index,
)
else:
return None
| [
"gabriel.tseng@mail.mcgill.ca"
] | gabriel.tseng@mail.mcgill.ca |
bf818d5614f87f280643af66ba624c44ea3b74c5 | 7d9d1a6060f42be44e0e45483a16b00eb0b96abf | /mtefdweb/migrations/0001_initial.py | f81055474ddd6c611b2f863723af86512c9f4195 | [] | no_license | aaugustin/mtefd | 88e50fadb936957721f6dd1f4e2424a2ab4a50b4 | a859fbcd9aa8e5169733f8518fde1e251c45f9fe | refs/heads/master | 2023-07-06T04:24:30.114023 | 2015-10-02T13:34:39 | 2015-10-02T13:34:39 | 24,799,959 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Funder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('perk', models.PositiveSmallIntegerField(choices=[(0, b''), (1, b'Thanks!'), (2, b'Double thanks!'), (3, b'Bronze sponsor'), (4, b'Silver sponsor'), (5, b'Gold sponsor'), (6, b'Platinum sponsor'), (7, b'Diamond sponsor')])),
('appearance', models.CharField(max_length=1, choices=[(b'V', b'Visible'), (b'I', b'Identity-Only'), (b'A', b'Anonymous')])),
],
options={
},
bases=(models.Model,),
),
]
| [
"aymeric.augustin@m4x.org"
] | aymeric.augustin@m4x.org |
772a363f4897ff0d1fb383ba03270c69caf9beef | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/sql/flags/list.py | 7002af2f8b9fb14f62ee5bb4319d675f8b877141 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,913 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lists customizable MySQL flags for Google Cloud SQL instances."""
from googlecloudsdk.calliope import base
class _BaseList(object):
"""Lists customizable MySQL flags for Google Cloud SQL instances."""
def Collection(self):
return 'sql.flags'
def Run(self, unused_args):
"""Lists customizable MySQL flags for Google Cloud SQL instances.
Args:
unused_args: argparse.Namespace, The arguments that this command was
invoked with.
Returns:
A dict object that has the list of flag resources if the command ran
successfully.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
result = sql_client.flags.List(sql_messages.SqlFlagsListRequest())
return iter(result.items)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class List(_BaseList, base.ListCommand):
"""Lists customizable MySQL flags for Google Cloud SQL instances."""
pass
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ListBeta(_BaseList, base.ListCommand):
"""Lists customizable MySQL flags for Google Cloud SQL instances."""
pass
| [
"toork@uw.edu"
] | toork@uw.edu |
6a0caeb63c63789f851099fc1d4eaa48c2e5701d | b429842319179d4df8586d5cb8287edd8e0bd3c8 | /escuela/core/migrations/0001_initial.py | 484e9f1903757b29db1cbd31413c523d96563c5f | [] | no_license | escuelagithub/escuelagithub | c5bd1be8bcb5b5fd7c1720415dd6d10d7816e0bf | 1e98619935ca3d772f0b77fdcc1de426f6a84729 | refs/heads/master | 2023-04-08T17:18:54.529244 | 2021-04-20T18:55:38 | 2021-04-20T18:55:38 | 359,922,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # Generated by Django 3.2 on 2021-04-08 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='curso',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('descripcion', models.TextField()),
('disciplina', models.CharField(max_length=20)),
('avatar', models.ImageField(blank=True, null=True, upload_to='core')),
('precio', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('calificacion', models.DecimalField(decimal_places=1, default=0, max_digits=2)),
('alumnos', models.IntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
| [
"gjangoinminutes@gmail.com"
] | gjangoinminutes@gmail.com |
5f0228e362c4758a1fdd7c1d13f7c08a4b579228 | 3f83a4411545140ef01e73ec037fdc94fdcb8fa0 | /mwbase/about.py | 77e81dc0ba13a40349274c1f5178b074611307ea | [
"MIT"
] | permissive | mediawiki-utilities/python-mwbase | 3a3759deb171e5b29589ff873e3c3c972a8daf78 | 5a7255fdbd2e9c5d844b48e7b6a293f5ffb17e6e | refs/heads/master | 2021-01-22T23:24:08.677862 | 2020-06-01T15:23:17 | 2020-06-01T15:23:17 | 85,629,682 | 2 | 1 | MIT | 2020-06-01T15:23:23 | 2017-03-20T21:34:42 | Python | UTF-8 | Python | false | false | 384 | py | __name__ = "mwbase"
__version__ = "0.1.4"
__author__ = "Aaron Halfaker"
__author_email__ = "aaron.halfaker@gmail.com"
__description__ = "Data structure normalization for MediaWiki's Wikibase"
__license__ = "MIT"
__url__ = "https://github.com/mediawiki-utilities/python-mwbase"
all = [__name__, __version__, __author__, __author_email__, __description__,
__license__, __url__]
| [
"aaron.halfaker@gmail.com"
] | aaron.halfaker@gmail.com |
f91b55ec00c84a171b7ef334cb54ec1a4add44ed | b33ddc7b89d05e19fdeb69593872fd174fab9f4f | /URI-py/2520a.py | 87d865aa4aae7d32dbe28a595c66d619f9542e3b | [] | no_license | ThiagoCComelli/URI-Online-Judge | 8b8d609d880342b39ba0d396c0610ecb7e01a5af | 5348f736b2d683f4b857232c22cccb7c1d8b8d65 | refs/heads/master | 2020-07-23T15:14:05.353948 | 2020-03-10T19:42:12 | 2020-03-10T19:42:12 | 207,606,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
while True:
try:
n,m = [int(x) for x in input().split()]
lista = []
a = 0
b = 0
lista1 = []
lista2 = []
lista3 = []
for i in range(n):
lista.append([int(x) for x in input().split()])
for i in range(len(lista)):
for j in lista[i]:
if j == 1:
lista1.append(lista[i].index(j)+1)
lista2.append(i+1)
if j == 2:
lista1.append(lista[i].index(j)+1)
lista2.append(i+1)
print(lista1)
print(lista2)
for a, b in zip(lista1,lista2):
lista3.append(a+b)
print(abs(lista3[0]-lista3[1]))
except EOFError:
break
| [
"thiago.comelli@outlook.com"
] | thiago.comelli@outlook.com |
99807557a16613d1e9a52417868f0c92aabaf9f8 | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/1832.CheckIfTheSentenceIsPangram.py | 32b154e7d1bd4e1cb3fe59f18af9a82760a895d9 | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # 1832. Check if the Sentence Is Pangram
# Easy
# 66
# 0
# Add to List
# Share
# A pangram is a sentence where every letter of the English alphabet appears at least once.
# Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
# Example 1:
# Input: sentence = "thequickbrownfoxjumpsoverthelazydog"
# Output: true
# Explanation: sentence contains at least one of every letter of the English alphabet.
# Example 2:
# Input: sentence = "leetcode"
# Output: false
# Constraints:
# 1 <= sentence.length <= 1000
# sentence consists of lowercase English letters.
# This solution works:
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
counts = set(sentence)
return len(counts) == 26
# This solution works - 1 liner:
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
return len(set(sentence)) == 26 | [
"akimi.mimi.yano@gmail.com"
] | akimi.mimi.yano@gmail.com |
f51a8b4d78cea86f8c6db59b5927055f7b591891 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/xaxis/rangeselector/button/_stepmode.py | 64cc451271fb54a7c541f1f6ef14b4f31d0573d9 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 511 | py | import _plotly_utils.basevalidators
class StepmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='stepmode',
parent_name='layout.xaxis.rangeselector.button',
**kwargs
):
super(StepmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='info',
values=['backward', 'todate'],
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
9c73175eab6073df14a8204dd7342180d9ef43ec | 3c20f43fe658ee4123aa47548a1fed4d7852670a | /postgretest/wsgi.py | 13396466cf74ecbc61c94d93a9f1e37051e6e5bb | [] | no_license | Jordan-Ak/Postgresql_setup | 47a8bac3a62ce81582116bac9c3419fa783ff6ef | 5427f65de63911ed40cc866f10f6a21877d4af44 | refs/heads/master | 2023-03-26T09:54:57.213028 | 2021-03-24T08:10:51 | 2021-03-24T08:10:51 | 350,995,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for postgretest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'postgretest.settings')
application = get_wsgi_application()
| [
"JosiahDavid98@gmail.com"
] | JosiahDavid98@gmail.com |
b3b43555f0df6f58886f9e05854ba2ebb887715f | 5cf745769b0f891aca2198ace8935dce5221ec48 | /anima/rez_packages/resolve/17.4.3/package.py | e2646c27056c9ada1759edb15a496b39cd4cd30f | [
"MIT"
] | permissive | jangcode/anima | d827672e45275f52727d66cd915bc5c2f3a0ede6 | 59ddfe76004626fc8142918b27d15e7759372854 | refs/heads/master | 2022-05-31T14:40:41.703689 | 2022-05-12T00:30:36 | 2022-05-12T00:30:36 | 94,961,144 | 0 | 0 | null | 2017-06-21T04:00:09 | 2017-06-21T04:00:09 | null | UTF-8 | Python | false | false | 1,199 | py | # -*- coding: utf-8 -*-
name = "resolve"
version = "17.4.3"
author = ["Erkan Ozgur Yilmaz"]
uuid = "86791641abc04a189b2177f4eff55327"
description = "DaVinci Resolve package"
requires = [
"python",
"pyside2",
"anima",
]
variants = [
["python-2"],
["python-3"],
]
build_command = "python {root}/../build.py {install}"
def commands():
# env.PYTHONPATH.append("{root}/../python")
env.PATH.append("{root}/bin")
if system.platform == "linux":
env.PATH.append("/opt/resolve/")
env.RESOLVE_SCRIPT_API = "/opt/resolve/Developer/Scripting/"
env.RESOLVE_SCRIPT_LIB = "/opt/resolve/libs/Fusion/fusionscript.so"
env.PYTHONPATH.append("$RESOLVE_SCRIPT_API/Modules/")
elif system.platform == "osx":
env.PATH.append("/Applications/DaVinci Resolve/DaVinci Resolve.app/")
env.RESOLVE_SCRIPT_API = (
"/Applications/DaVinci Resolve/DaVinci Resolve.app/Developer/Scripting/"
)
env.RESOLVE_SCRIPT_LIB = (
"/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/"
"Fusion/fusionscript.so"
)
env.PYTHONPATH.append("$RESOLVE_SCRIPT_API/Modules/")
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
6b30ff421722f621c0fae728aa28f336fdb80fd6 | 47dfa145704444bb62d9e6e4f4163bde13abdbd5 | /fancy/eventbus/event_bus.py | 1769aa881e988abb23ba73cfb8737db1a3888b3f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | susautw/fancy-eventbus | 1c2e4e8d64c2726e2f5ac373a91f2218c1ad2500 | 8c2c693538906c35ae87e12ec8a66f2e5d31f86b | refs/heads/master | 2022-12-02T01:05:17.403909 | 2020-08-21T12:59:06 | 2020-08-21T12:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | from typing import Dict, List
from fancy.eventbus import EventListener, EventScheduler
from fancy.eventbus.scheduler import SchedulerBase
class EventBus:
_scheduler: SchedulerBase[EventListener]
event_listener_map: Dict[type, List[EventListener]]
default_bus: 'EventBus' = None
@classmethod
def get_default(cls) -> 'EventBus':
if cls.default_bus is None:
cls.default_bus = EventBus(EventScheduler.MULTILEVEL_FEEDBACK_QUEUE)
return cls.default_bus
def __init__(self, scheduler: SchedulerBase):
self._scheduler = scheduler
self.event_listener_map = {}
def post(self, event: object) -> bool:
"""
:param event:
:return: Is the event has been handled by at least one listener
"""
posted = False
for event_type in self.event_listener_map.keys():
if isinstance(event, event_type):
posted = True
self._post_listeners(event, self.event_listener_map[event_type])
return posted
def _post_listeners(self, event: object, listeners: List[EventListener]) -> None:
for listener in listeners:
listener = listener.clone()
listener.event = event
self._scheduler.add(listener)
def cancel(self, event: object) -> bool:
canceled = False
for event_type in self.event_listener_map.keys():
if isinstance(event, event_type):
canceled = True
self._cancel_listeners(event, self.event_listener_map[event_type])
return canceled
def _cancel_listeners(self, event: object, listeners: List[EventListener]):
for listener in listeners:
listener.event = event
try:
self._scheduler.remove(listener)
except ValueError as _:
pass
def register(self, event_listeners: object) -> None:
listeners: List[EventListener] = EventListener.get_marked_method(event_listeners).values()
for listener in listeners:
if listener.event_type not in self.event_listener_map:
self.event_listener_map[listener.event_type] = [listener]
else:
self.event_listener_map[listener.event_type].append(listener)
def unregister(self, event_listeners: object) -> None:
listeners: List[EventListener] = EventListener.get_marked_method(event_listeners).values()
for listener in listeners:
if listener.event_type not in self.event_listener_map:
raise ValueError(f"{EventBus.__name__}.unregister(item): item not in {EventBus.__name__}")
self.event_listener_map[listener.event_type].remove(listener)
def clear(self) -> None:
self.event_listener_map = {}
@property
def scheduler(self) -> SchedulerBase[EventListener]:
return self._scheduler
| [
"susautw@gmail.com"
] | susautw@gmail.com |
1e8cb8a12175625244c6506cd6d7aeaf2a555bdb | 700fe1a57d6306171f17f012587c938ba49f0212 | /rpg/view.py | 23ab9a1122ea2d88bcbf5b6f4429aca3e7e11b6b | [] | no_license | Michaelllllll25/class-rpg-game | cfd850bc8ac6a8c6bb549ad77825eed3ba5d3228 | 4229124bbaa170b5bbaf29f7588ee8af20dc11ba | refs/heads/main | 2023-09-06T02:14:34.161346 | 2021-11-15T19:24:02 | 2021-11-15T19:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | from typing import List
import pygame
class View:
"""A pseudo-interface for views that can be used in the game class."""
def event_loop(self, events: List[pygame.event.Event]) -> None:
"""View-specific event loop for key-bindings"""
raise NotImplementedError("You need to override the 'event_loop' method in every class inheriting from the View class.")
def update(self) -> None:
"""Update the view's state"""
raise NotImplementedError("You need to override the 'update' method in every class inheriting from the View class.")
def draw(self, screen: pygame.Surface) -> None:
"""Draw the view's contents."""
raise NotImplementedError("You need to override the 'draw' method in every class inheriting from the View class.")
| [
"daniel.gallo@ycdsbk12.ca"
] | daniel.gallo@ycdsbk12.ca |
d223cdd0d3cbce60464d864107e23cc94170871c | 118984fdbacf5eb71159eb511ccd055987498886 | /CH10/EX10.31.py | 8c3cce27c8f1ea9df7d33ec5deeecf9e903c37df | [] | no_license | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | 321c6256be6ff78adbc8e3ddc73f2f43a51a75ab | 159489f3af296f87469ddddf3a1cb232917506b0 | refs/heads/master | 2023-06-05T20:03:17.951911 | 2021-06-18T18:04:42 | 2021-06-18T18:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # 10.31 (Occurrences of each digit in a string) Write a function that counts the occurrences
# of each digit in a string using the following header:
# def count(s):
# The function counts how many times a digit appears in the string. The return
# value is a list of ten elements, each of which holds the count for a digit. For
# example, after executing counts = count("12203AB3"), counts[0] is 1,
# counts[1] is 1, counts[2] is 2, and counts[3] is 2.
# Write a test program that prompts the user to enter a string and displays the
# number of occurrences of each digit in the string.
def getDistinctDigits(s):
lst = []
for x in s:
if x not in lst:
lst.append(x)
return lst
def count(s):
counts = []
lst = getDistinctDigits(s)
for i in range(len(lst)):
n = lst[i]
counts.append(s.count(n))
return counts
s = input("Enter a string: ")
counts = count(s)
s = list(s)
lst = getDistinctDigits(s)
lst2 = [x for x in lst]
lst.sort()
for i in lst:
print(i, "occurs", counts[lst2.index(i)], "times")
| [
"47993441+OmarAlmighty@users.noreply.github.com"
] | 47993441+OmarAlmighty@users.noreply.github.com |
4ee7fe861e8995c8351d3a5f977b4130187ac57b | 260133e46c0c88fd20f2ed18309c5f46508b7fb9 | /opengever/api/users.py | 65100d01e8db0b06da8ab5d5773d5f00f43f9c8d | [] | no_license | robertmuehsig/opengever.core | 4180fbea1436fade9b33232a293b0d43ebfc6c51 | 63b3747793d5b824c56eb3659987bb361d25d8d8 | refs/heads/master | 2020-09-08T14:55:00.340222 | 2019-11-08T10:16:02 | 2019-11-08T10:16:02 | 221,163,734 | 0 | 0 | null | 2019-11-12T08:08:59 | 2019-11-12T08:08:54 | null | UTF-8 | Python | false | false | 877 | py | from plone import api
from plone.restapi.services.users.get import UsersGet
class GeverUsersGet(UsersGet):
"""Customize permissions to enumarate and query user information.
By default its protected with `manage portal` permission, but in GEVER all
users should be able to enumarate, query or access user information for
all.
"""
def _has_allowed_role(self):
# We're not able to check for the `View` permission, because also
# anonymous users have the `View` permissions (login form).
current_roles = api.user.get_roles()
for role in ['Member', 'Reader', 'Manager']:
if role in current_roles:
return True
return False
def has_permission_to_query(self):
return self._has_allowed_role()
def has_permission_to_enumerate(self):
return self._has_allowed_role()
| [
"philippe.gross@4teamwork.ch"
] | philippe.gross@4teamwork.ch |
11d20a2a42da4e748a24529582b4d8a926ece82d | be026334d457b1f78050f8262cd693922c6c8579 | /onnxruntime/test/testdata/transform/qdq_conv_gen.py | 9d26b42e820b6f43455e7e6005b854bc59b55d8b | [
"MIT"
] | permissive | ConnectionMaster/onnxruntime | 953c34c6599c9426043a8e5cd2dba05424084e3b | bac9c0eb50ed5f0361f00707dd6434061ef6fcfe | refs/heads/master | 2023-04-05T00:01:50.750871 | 2022-03-16T15:49:42 | 2022-03-16T15:49:42 | 183,019,796 | 1 | 0 | MIT | 2023-04-04T02:03:14 | 2019-04-23T13:21:11 | C++ | UTF-8 | Python | false | false | 2,177 | py | import onnx
from onnx import helper
from onnx import TensorProto
# Generate a basic QDQ Conv model with `num_convs` Conv nodes and their surrounding DQ/Q nodes
def GenerateModel(model_path, num_convs):
nodes = []
initializers = []
inputs = []
outputs = []
for i in range(num_convs):
def name(base):
return f"{base}_{i}"
nodes.extend([
helper.make_node("DequantizeLinear", [name("X"), name("Scale"), name("Zero_point_uint8")], [name("input_DQ")], name("input_DQ")),
helper.make_node("DequantizeLinear", [name("W"), name("Scale"), name("Zero_point_uint8")], [name("conv_weight_DQ")], name("conv_weight_DQ")),
helper.make_node("DequantizeLinear", [name("Bias"), name("Scale"), name("Zero_point_int32")], [name("conv_bias_DQ")], name("conv_bias_DQ")),
helper.make_node("Conv", [name("input_DQ"), name("conv_weight_DQ"), name("conv_bias_DQ")], [name("conv_output")], name("conv")),
helper.make_node("QuantizeLinear", [name("conv_output"), name("Scale"), name("Zero_point_uint8")], [name("Y")], name("output_Q")),
])
initializers.extend([
helper.make_tensor(name('Scale'), TensorProto.FLOAT, [1], [256.0]),
helper.make_tensor(name('Zero_point_uint8'), TensorProto.UINT8, [1], [0]),
helper.make_tensor(name('Zero_point_int32'), TensorProto.INT32, [1], [0]),
helper.make_tensor(name('W'), TensorProto.UINT8, [1, 1, 3, 3], [128] * 9),
helper.make_tensor(name('Bias'), TensorProto.INT32, [1], [64]),
])
inputs.extend([
helper.make_tensor_value_info(name('X'), TensorProto.UINT8, [1, 1, 5, 5]),
])
outputs.extend([
helper.make_tensor_value_info(name('Y'), TensorProto.UINT8, [1, 1, 3, 3]),
])
graph = helper.make_graph(
nodes,
f"QDQ_Conv_x_{num_convs}",
inputs,
outputs,
initializers
)
model = helper.make_model(graph)
onnx.save(model, model_path)
if __name__ == "__main__":
GenerateModel('qdq_conv.onnx', 1)
GenerateModel('runtime_optimization/qdq_convs.onnx', 3)
| [
"noreply@github.com"
] | ConnectionMaster.noreply@github.com |
133a679f21c28752675651c876492307c71f99c2 | cc9820ebc602f4d41ade0f6fd5e17a90ad5fcb56 | /contrib/zmq/piped_zmq/mongrel2_processors.py | d7c902594f7138ca836baa2006e3997a4fe0b3bf | [
"MIT"
] | permissive | foundit/Piped | 2ed86e30709fd98a9245c620bd48b5795bc600d1 | 78cb485772e353622c5b939f4c1560dfe37464f6 | refs/heads/develop | 2021-01-01T05:34:22.137664 | 2017-07-13T20:46:33 | 2017-07-13T20:46:33 | 1,893,880 | 8 | 4 | null | 2015-05-29T10:19:26 | 2011-06-14T11:19:39 | Python | UTF-8 | Python | false | false | 5,025 | py | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# Parts Copyright (c) 2010, Zed A. Shaw and Mongrel2 Project Contributors.
# See LICENSE for details.
""" Utility processors that are especially useful in ZMQ contexts. """
import json
import urlparse
import zmq
from zope import interface
from piped import util, processing
from piped.processors import base
json_encoder = util.PipedJSONEncoder()
HTTP_FORMAT = "HTTP/1.1 %(code)s %(status)s\r\n%(headers)s\r\n\r\n%(body)s"
def http_response(body, code=200, status='OK', headers=None):
payload = {'code': code, 'status': status, 'body': body}
headers = headers or dict()
headers['Content-Length'] = len(body)
payload['headers'] = "\r\n".join('%s: %s' % (k, v) for k, v in headers.items())
return HTTP_FORMAT % payload
def parse_netstring(ns):
len, rest = ns.split(':', 1)
len = int(len)
assert rest[len] == ',', "Netstring did not end in ','"
return rest[:len], rest[len + 1:]
def parse_mongrel_http_request(msg):
sender, conn_id, path, rest = msg.split(' ', 3)
headers, rest = parse_netstring(rest)
body = parse_netstring(rest)[0]
headers = json.loads(headers)
data = dict()
if headers['METHOD'] == 'JSON':
data = json.loads(body)
# The query-string is URL-encoded, so it's all ASCII at this
# point. But json.loads have made all strings into unicode, though
# it's unaware of the URL-encoding. Make sure the input to
# parse_qs is a bytestring, otherwise it gets confused.
raw_query_string = headers.get('QUERY', u'').encode('utf8')
# Now turn the raw query-(byte)-string into a dictionary,
# converting the utf8-strings into unicode-objects post-parse_qs.
query_string = dict((key.decode('utf8'), [v.decode('utf8') for v in list_of_values])
for (key, list_of_values) in urlparse.parse_qs(raw_query_string).items())
# parse_qs returns a list of values for every parameter. We
# expect most parameters to take a single value, and want those to
# be scalars.
for key, list_of_values in query_string.items():
if len(list_of_values) == 1:
query_string[key] = list_of_values[0]
return dict(
sender=sender,
conn_id=conn_id,
path=path,
headers=headers,
body=body,
data=data,
query_string=query_string
)
class MongrelRequestToBatonParser(base.Processor):
name = 'parse-msg-as-mongrel-request'
interface.classProvides(processing.IProcessor)
def get_consumers(self, baton):
if util.dict_get_path(baton, 'http_request.data.type') == 'disconnect':
return []
return super(MongrelRequestToBatonParser, self).get_consumers(baton)
def process(self, msg):
baton = dict()
baton['http_request'] = request = parse_mongrel_http_request(msg)
if request['data'].get('type') == 'disconnect':
return
baton['http_response'] = dict(uuid=request['sender'], idents=[request['conn_id']], headers=dict(), body='')
return baton
class MongrelReplySender(base.Processor):
name = 'send-mongrel-reply'
interface.classProvides(processing.IProcessor)
def __init__(self, queue_name, response_path='http_response', close=True, *a, **kw):
super(MongrelReplySender, self).__init__(*a, **kw)
self.queue_name = queue_name
self.response_path = response_path
self.close = close
def configure(self, runtime_environment):
self.dependencies = runtime_environment.create_dependency_map(self,
socket=dict(provider='zmq.socket.%s' % self.queue_name)
)
def process(self, baton):
response = util.dict_get_path(baton, self.response_path)
assert response is not None, "provide a response if you expect something sensible from this processor"
message = self._make_http_response(response)
self.dependencies.socket.send(message, flags=zmq.NOBLOCK)
if self.close:
response = dict(response) # Don't empty the body of the original response dict.
response['body'] = ''
close_message = self._make_close_response(response)
self.dependencies.socket.send(close_message)
return baton
@classmethod
def _make_http_response(cls, response):
response = dict(response)
uuid = response.pop('uuid')
idents = ' '.join(response.pop('idents'))
msg = http_response(**response)
payload = dict(uuid=uuid, ident_length=len(idents), idents=idents, msg=msg)
return "%(uuid)s %(ident_length)i:%(idents)s, %(msg)s" % payload
@classmethod
def _make_close_response(cls, response):
response = dict(response)
uuid = response.pop('uuid')
idents = ' '.join(response.pop('idents'))
payload = dict(uuid=uuid, ident_length=len(idents), idents=idents, msg='')
return "%(uuid)s %(ident_length)i:%(idents)s, " % payload
| [
"njal@karevoll.no"
] | njal@karevoll.no |
8e88365f3bd779f18cbc22eb893965c4bb8040c2 | 2feea16ff9e2e59ac0e14a344c13b7c92004c6a8 | /vote/migrations/0001_initial.py | 0c91639392329be21d6a9df84f967971f53bbbd1 | [] | no_license | ThanHuuTuan/social-network-website | 6a5f74c00a00f8b804de81bf7e32e0ba00beb14b | 363803df2516864e4dd97a43f7d77d8c514a9c94 | refs/heads/master | 2020-09-13T10:41:15.585405 | 2018-01-14T09:03:44 | 2018-01-14T09:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-23 03:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('activities', '0038_auto_20160923_0342'),
]
operations = [
migrations.CreateModel(
name='ActivityVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vote_activityvote_activity', to='activities.Activity')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vote_activityvote_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"tranphong96.hbk@gmail.com"
] | tranphong96.hbk@gmail.com |
07a1628fb7df74dbda9255ea2b962cc915a79877 | dac6aba35a341afecc573ba1b1c48f1a3eb09e00 | /test/functional/tool_wallet.py | 64e354a9c7e295c7165d671ac33424a68ce5397c | [
"MIT"
] | permissive | minblock/icountcoins | dd28fadc958245ac171ec523ec2a8b3c473b7946 | 8575ece9bed59101e0e753cb762ce6165b625dbe | refs/heads/master | 2021-05-22T02:24:49.713242 | 2020-04-04T06:32:45 | 2020-04-04T06:32:45 | 252,926,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,835 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/icountcoins-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 0)
assert_equal(stderr, '')
assert_equal(stdout, output)
def run_test(self):
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` is an error. Use `bitcoin-wallet -help`
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
# stop the node to close the wallet to call info command
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
# mutate the wallet to check the info command output changes accordingly
self.start_node(0)
self.nodes[0].generate(1)
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
self.start_node(0, ['-wallet=foo'])
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
if __name__ == '__main__':
ToolWalletTest().main()
| [
"POSTMASTER@provgn.com"
] | POSTMASTER@provgn.com |
158506f682d64d210cc6f22b146317634efbfb1c | 9ba5d85bc644cc586abc29b6c82047deb4caea1f | /leetcode/228.汇总区间.py | 7c3a2496c51dfce124d13f0dd5bbc357bba579da | [
"MIT"
] | permissive | Data-Designer/Leetcode-Travel | f01dda19a1e37a2ba9da42e8ecda304c73645d99 | 147cf44904ce73cd4fd1cecf33f1ac8a336b0e6f | refs/heads/master | 2023-07-30T22:54:53.101323 | 2021-09-14T04:34:08 | 2021-09-14T04:34:08 | 366,757,874 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | '''
Description:
version:
Author: Data Designer
Date: 2021-05-05 12:50:26
LastEditors: Data Designer
LastEditTime: 2021-05-05 13:21:09
'''
#
# @lc app=leetcode.cn id=228 lang=python3
#
# [228] 汇总区间
#
# @lc code=start
class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
if not nums:
return []
res = []
# 两个指针
size = len(nums)
if size == 1:
return [str(nums[0])]
slow,fast = 0,0
while fast<= size-1:
while fast <=size-1 and nums[fast]-nums[slow] == fast -slow: # 边界条件
fast = fast + 1
if fast - slow ==1:
res.append(str(nums[slow]))
else:
res.append(str(nums[slow])+'->'+str(nums[fast-1]))
slow = fast
return res
# @lc code=end
| [
"zc_dlmu@163.com"
] | zc_dlmu@163.com |
0be3d2109326449942e5f67d8544cb116227a7fb | 7249970977cdbc5ffae9502278ec3a75f420b46c | /portfolio-project/portfolio/settings.py | 0f8a34cb3dfaeef86b75918c2a5ccebf0f49057e | [] | no_license | Abepena/Django-Projects | b899c72e0a73a46f8222ceddf380b52b5a6fcc1c | d4e01e45b0a0a969200a18333a4d67880282e604 | refs/heads/master | 2020-03-22T04:45:54.971020 | 2018-10-10T22:13:24 | 2018-10-10T22:13:24 | 139,519,642 | 0 | 0 | null | 2018-10-10T20:01:02 | 2018-07-03T02:45:18 | Python | UTF-8 | Python | false | false | 3,876 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'awj=u@n*1+r^_xp!l380($3kv99o75i&%&06den0-_75r-=5xm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jobs',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME': 'portfoliodb',
'USER': 'postgres',
'PASSWORD': 'super-secret-password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
#Create Static Files, root , url , and directories to look for
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'portfolio/static/'),
os.path.join(BASE_DIR, 'jobs/static/'),
]
# Media Files (Images, Video, Etc)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#local_settings import to store settings more securely
try:
# Caution: anything with the same name as a variable
# in local_settings.py will be overwritten
from .local_settings import *
except:
# Do nothing if no local_settings.py file
pass
| [
"pena.abe@gmail.com"
] | pena.abe@gmail.com |
d9888d76bef83c54e8dbedd20f2fe17a31bbee79 | 229a7f69999fbb5da88f01d11f22cf77af79a999 | /adobjects/eventsourcegroup.py | ef77fa9d093abebf41b8e50fd46a66c1a14061e3 | [] | no_license | DaehwanCho/facebookads_v2 | 0f86549ab83c1adef677d831c0c16529c701c364 | 5ca5476a5e5a13b21ed6e9386133ba901b926b87 | refs/heads/master | 2023-03-19T07:58:56.961368 | 2020-04-28T19:06:45 | 2020-04-28T19:06:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,158 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebookads_v2.adobjects.abstractobject import AbstractObject
from facebookads_v2.adobjects.abstractcrudobject import AbstractCrudObject
from facebookads_v2.adobjects.objectparser import ObjectParser
from facebookads_v2.api import FacebookRequest
from facebookads_v2.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class EventSourceGroup(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isEventSourceGroup = True
super(EventSourceGroup, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
business = 'business'
event_sources = 'event_sources'
id = 'id'
name = 'name'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'event_source_groups'
def api_create(self, parent_id, fields=None, params=None, batch=None, pending=False):
from facebookads_v2.adobjects.business import Business
return Business(api=self._api, fbid=parent_id).create_event_source_group(fields, params, batch, pending)
def api_get(self, fields=None, params=None, batch=None, pending=False):
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, pending=False):
param_types = {
'event_sources': 'list<string>',
'name': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_shared_account(self, fields=None, params=None, batch=None, pending=False):
param_types = {
'accounts': 'list<string>',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/shared_accounts',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='EDGE',
response_parser=ObjectParser(target_class=EventSourceGroup, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'business': 'Business',
'event_sources': 'list<ExternalEventSource>',
'id': 'string',
'name': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| [
"ian@fitbod.me"
] | ian@fitbod.me |
52c8f2a820bda42733e06cec9bba1d815e472589 | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/1b/a0d512aaa564001418adf2b9b78fa3c6 | e912e3611ab82e431dc342e23e802cb7fff09fcd | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 2,485 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from gnuradio import gr
import gras
class ztransform(gras.Block):
"""
docstring for block ztransform
"""
def __init__(self):
gras.Block.__init__(self,
name="ztransform",
in_sig=[np.float32],
out_sig=[np.float32])
def set_parameters(self,num,den,window):
self.num = list(map(float,num.split(" ")))
self.den = list(map(float,den.split(" ")))
print("self.num")
print("self.den")
self.n = window
self.num = np.poly1d(self.num)
self.den = np.poly1d(self.den)
self.den_coeff = self.den.c
nm_coeff = self.num.c
#print self.den_coeff
self.den_ord = self.den.order
self.num_ord = self.num.order
for i in range(0,self.den_ord-self.num_ord):
nm_coeff = np.insert(nm_coeff,0,0)
self.num_coeff = nm_coeff
#print self.num_coeff
self.in_q = [0]*(self.den_ord + 1)
self.out_q = [0]*(self.den_ord + 1)
self.final_q = []
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
#print "i am in work function"
# <+signal processing here+>
ans1 = 0
ans2 = 0
for i in range(1,self.den_ord + 1):
ans1 += self.den_coeff[i]*self.out_q[len(self.out_q)-i]
self.in_q.append(float(in0[0]))
#print self.in_q
for i in range(0,self.den_ord + 1):
ans2 += self.num_coeff[i]*self.in_q[len(self.in_q)-i-1]
#print ans2
ans = ans2 - ans1
ans = ans/self.den_coeff[0]
self.out_q.append(ans)
self.out_q.pop(0)
self.in_q.pop(0)
out[0] = ans
print "OUTPUT:",out[0]
#self.final_q.append(ans)
self.consume(0,1)
self.produce(0,1)
| [
"imushir@gmail.com"
] | imushir@gmail.com | |
0b675c38b7c247e3a694123ac24a2c167d6e6da1 | e413e4020617f2645f7f3ed89ec698183c17e919 | /ftkPipeline/Scriptv3/a051_RunResc8bit.py | 9aa8cd9eda49570b5bbc7048c96e0a31e58b5df4 | [] | no_license | YanXuHappygela/Farsight-latest | 5c349421b75262f89352cc05093c04d3d6dfb9b0 | 021b1766dc69138dcd64a5f834fdb558bc558a27 | refs/heads/master | 2020-04-24T13:28:25.601628 | 2014-09-30T18:51:29 | 2014-09-30T18:51:29 | 24,650,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import fnmatch
import os
import subprocess
import os.path
# ---------------------------------------------------------------------------------------------------------------------------------------
# Create Folder
# ---------------------------------------------------------------------------------------------------------------------------------------
def main( FARSIGHT_BIN_EXE, LOCAL_DATASET_PATH_PARAMETERS, OUTPUT, INPUT, runRescale_log ):
if os.path.exists(OUTPUT+'.nrrd'):
print "Rescale Exits already exist"
else:
print "Rescale does not exist"
#runCopy_db_log = LOCAL_DATASET_PATH_LOG +'/runCopyProjections.log'
#TEMP = FARSIGHT_BIN_EXE+'/ftkMainDarpa PROJECTION '+FILE_GFP+' '+LOCAL_DATASET_PATH_DATA_DEBUG+' > '+runCopy_db_log+' 2>&1'
TEMP = FARSIGHT_BIN_EXE+'/ftkMainDarpa RESCALE_8BIT '+INPUT+'.nrrd '+OUTPUT+'.nrrd'+' >> '+runRescale_log+' 2>&1'
TEMP2 = subprocess.Popen(TEMP, shell=True)
print 'Rescale of '+INPUT
TEMP2.communicate()
TEMP_FILE = open(runRescale_log, 'a')
TEMP_FILE.write('\nCOMMAND: '+TEMP+'\n')
TEMP_FILE.close()
if __name__ == "__main__":
main( FARSIGHT_BIN_EXE, LOCAL_DATASET_PATH_PARAMETERS, OUTPUT, INPUT, runRescale_log ) | [
"xy198908@gmail.com"
] | xy198908@gmail.com |
943a2f8d43d78bf7c8120d223d6f9de88072deb3 | f5f30ff2885f946949dfbcd6f8e4bfa25dbdcb63 | /quote_balance/console_scripts.py | ffbdc0a825d5b1dc411593d98e5149e980a11e3b | [
"BSD-2-Clause"
] | permissive | edelooff/quote-balance | bae22586344550e0acd6b1ffcb4ad0462eb64b26 | 94ea1fb2b645a711ccf804ee6c69eac572f4d645 | refs/heads/master | 2020-04-02T06:52:25.804009 | 2016-08-05T22:24:49 | 2016-08-05T22:29:56 | 65,052,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import argparse
import os
from . import tree_walker, check_file_balance
def arg_config():
parser = argparse.ArgumentParser()
parser.add_argument('target', type=str, help='Target to check for balance')
parser.add_argument(
'-e', '--ext',
action='append',
dest='file_exts',
help=(
'file extension that should be matched for directory searches. '
'This option can be provided multiple times. If no extensions '
'are set, the default checks only "py" extensions'),
metavar='EXT')
parser.add_argument(
'-r', '--recursive',
action='store_true',
help='recursively check all files in target directory')
return parser.parse_args()
def check_file(filename):
"""Checks a single file and reports imbalanced quotes."""
for line, imbalance in check_file_balance(filename):
print '{name}:{line} {imbalance} quotes are imbalanced'.format(
name=filename, line=line, imbalance=' and '.join(imbalance))
def check_directory(directory, file_exts, recursive):
"""Check all matching files in the directory, recursive or not."""
for filename in tree_walker(directory, file_exts, recursive=recursive):
check_file(filename)
def main():
args = arg_config()
target = args.target
if os.path.isfile(target):
return check_file(target)
file_exts = set(args.file_exts) if args.file_exts is not None else {'py'}
return check_directory(target, file_exts, args.recursive)
| [
"elmer.delooff@gmail.com"
] | elmer.delooff@gmail.com |
4029206a74cb0e31e10ca7e3dc30388fad608155 | d8c758b6220c784b5b7fde8b0ddcacf76a6c3966 | /preprocess.py | ce66075c09090770a4c355529cc0f67318e7f9e6 | [] | no_license | RitaRamo/crawling_with_scrapy | b41de09c4d257f36d9f3b95b82d972beda649ad6 | d96e7738bf741746ea41b8c6f08a1e7a0c15cee0 | refs/heads/master | 2020-05-14T09:00:07.333829 | 2019-04-16T17:25:59 | 2019-04-16T17:25:59 | 181,732,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from bs4 import BeautifulSoup, Comment
TAGS_BLACKLIST = ['noscript', 'script', 'style',
'input', 'textarea', 'iframe', 'footer', 'form']
def extract(to_remove):
for element in to_remove:
element.extract()
def remove_tags(soup):
for tag_name in TAGS_BLACKLIST:
tag_elements = soup.findAll(tag_name)
extract(tag_elements)
def remove_comments(soup):
comment_elements = soup.findAll(
text=lambda text: isinstance(text, Comment))
extract(comment_elements)
def get_text(html):
soup = BeautifulSoup(html, 'lxml')
remove_tags(soup)
remove_comments(soup)
text = soup.get_text(" ")
return " ".join(text.split())
| [
"rita.mparada.ramos@gmail.com"
] | rita.mparada.ramos@gmail.com |
7de4c23a140f42cdbbba5ae0609eea20d1f3982d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /FLgJEC8SK2AJYLC6y_5.py | 95cbe236aa1c401c0102c41ea9671e92e21d7f3c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
def possible_path(lst):
d={1:[2],2:[1,'H'],3:[4],4:[3,'H'],'H':[2,4]}
return all(x[1] in d[x[0]] for x in zip(lst,lst[1:]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c1eb78f39db51e874a025f85beefc14e2d389f5d | 29bec83fc600720533ad2bcf17fc90cd9ca385b7 | /0x08-python-more_classes/practice/robot_OOP.py | 5b5b54edc1869105b2e61134ab53bd08432d656e | [] | no_license | VictorZ94/holbertonschool-higher_level_programming | 73a7f504cde583f43f641e18e692e062610870a4 | ad512a1c76dc9b4c999a0ba2922c79f56206dd98 | refs/heads/master | 2023-03-25T04:38:12.708766 | 2021-03-24T01:08:47 | 2021-03-24T01:08:47 | 291,826,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | class Robot:
def __init__(self, name=None, build_year=None):
self.__name = name
#self.filds = Variable
self.__build_year = build_year
def say_hi(self):
if self.__name:
print("Hi, I am " + self.__name)
else:
print("Hi, I am a robot without a name")
def set_name(self, name):
self.__name = name
def get_name(self):
return self.__name
def set_build_year(self, by):
#setter receive parameter and pass it to getter
self.__build_year = by
def get_build_year(self):
#getter returns name variable
return self.__build_year
def __repr__(self):
return "Robot('" + self.__name + "', " + str(self.__build_year) + ")"
def __str__(self):
return "Name: " + self.__name + ", Build Year: " + str(self.__build_year)
if __name__ == "__main__":
x = Robot("Marvin", 1979)
y = Robot("Caliban", 1943)
for rob in [x, y]:
rob.say_hi()
if rob.get_name() == "Caliban":
rob.set_build_year(1993)
print("I was built in the year " + str(rob.get_build_year()) + "!")
| [
"zrvictor@outlook.com"
] | zrvictor@outlook.com |
f95375117f862312092b749273bda0520902399b | ed8bf8d548326cd80232a33fcae3135d4d4f4a1a | /examples/fauxware/solve.py | 579b296941c35da392b1453748e2f4e57d4759b5 | [] | no_license | symeonp/angr-doc | 480826e51cb16a5c007178f80073865fc0d92393 | 5d69aa70eb586cd064800cc27e6e336dd9908874 | refs/heads/master | 2021-01-12T22:46:19.390386 | 2016-01-05T20:49:22 | 2016-01-05T20:49:22 | 48,951,994 | 0 | 0 | null | 2016-01-03T16:08:32 | 2016-01-03T16:08:32 | null | UTF-8 | Python | false | false | 4,003 | py | #!/usr/bin/env python
import angr
# Look at fauxware.c! This is the source code for a "faux firmware" (@zardus
# really likes the puns) that's meant to be a simple representation of a
# firmware that can authenticate users but also has a backdoor - the backdoor
# is that anybody who provides the string "SOSNEAKY" as their password will be
# automatically authenticated.
def basic_symbolic_execution():
# We can use this as a basic demonstration of using angr for symbolic
# execution. First, we load the binary into an Angr project.
p = angr.Project('fauxware')
# Now, we want to construct a representation of symbolic program state.
# SimState objects are what angr manipulates when it symbolically executes
# binary code.
# The entry_state constructor generates a SimState that is a very generic
# representation of the possible program states at the program's entry
# point. There are more constructors, like blank_state, which constructs a
# "blank slate" state that specifies as little concrete data as possible,
# or full_init_state, which performs a slow and pedantic initialization of
# program state as it would execute through the dynamic loader.
state = p.factory.entry_state()
# States are relatively static objects, they don't do anything "smart".
# You can read data into and out of them, but that's about it.
# In order to actually perform symbolic execution, you need a Path.
# Paths wrap states and are your interface for stepping them forward and
# tracking their history.
path = p.factory.path(state)
# Now, in order to manage the symbolic execution process from a very high
# level, we have a PathGroup. Path groups are just collections of paths
# with various tags attached with a number of convenient interfaces for
# managing them.
pathgroup = p.factory.path_group(path)
# Uncomment the following line to spawn an IPython shell when the program
# gets to this point so you can poke around at the four objects we just
# constructed. Use tab-autocomplete and IPython's nifty feature where if
# you stick a question mark after the name of a function or method and hit
# enter, you are shown the documentation string for it.
# import IPython; IPython.embed()
# Now, we begin execution. This will symbolically execute the program until
# we reach a branch statement for which both branches are satisfiable.
pathgroup.step(until=lambda lpg: len(lpg.active) > 1)
# If you look at the C code, you see that the first "if" statement that the
# program can come across is comparing the result of the strcmp with the
# backdoor password. So, we have halted execution with two states, each of
# which has taken a different arm of that conditional branch. If you drop
# an IPython shell here and examine pathgroup.active[n].state.se.constraints
# you will see the encoding of the condition that was added to the state to
# constrain it to going down this path, instead of the other one. These are
# the constraints that will eventually be passed to our constraint solver
# (z3) to produce a set of concrete inputs satisfying them.
# As a matter of fact, we'll do that now.
input_0 = pathgroup.active[0].state.posix.dumps(0)
input_1 = pathgroup.active[1].state.posix.dumps(0)
# We have used a utility function on the state's posix plugin to perform a
# quick and dirty concretization of the content in file descriptor zero,
# stdin. One of these strings should contain the substring "SOSNEAKY"!
if 'SOSNEAKY' in input_0:
return input_0
else:
return input_1
def test():
pass # appease our CI infrastructure which expects this file to do something lmao
if __name__ == '__main__':
print basic_symbolic_execution()
# You should be able to run this program and pipe its into fauxware in order to
# produce a "sucessfully authenticated" message
| [
"andrew@andrewdutcher.com"
] | andrew@andrewdutcher.com |
266c5d137ecfc7ac54c4b637fa660fd3a6e375bc | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras/keras42_fashion3_dnn.py | b24254a02eed1096ea29ddb4a30732584ecef8aa | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | import numpy as np
# 1. 데이터
from tensorflow.keras.datasets import fashion_mnist
(x_train,y_train), (x_test, y_test) = fashion_mnist.load_data()
print(np.max(x_train)) #255
print(x_train.shape) #(60000, 784)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])/255.
print(x_train.shape) #(60000, 784)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) #(60000, 10)
# 2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(28*28,)))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='acc', patience=20, mode='max')
model.fit(x_train,y_train, batch_size=16, epochs=500, validation_split=0.2, callbacks=[es])
#4. 평가,예측
loss, acc = model.evaluate(x_test, y_test, batch_size=16)
print('loss, acc : ', loss, acc)
y_pred = model.predict(x_test)
# cnn
# loss, acc : 0.43148916959762573 0.8651000261306763
# dnn
# loss, acc : 0.5104688405990601 0.8884000182151794 | [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
95be7252f67172d4c7b2ad4c779fed9aae242eec | ad129f7fc03f10ef2b4734fa2c2b9cb9367c84fa | /Aula 12 - Condições - pt2/Exe041.py | 595c27c7c25edb398fae7f0dee375ccdf3f19084 | [] | no_license | LucasDatilioCarderelli/Exercises_CursoemVideo | c6dc287d7c08a0349867a17185474744513dbaac | 67c2d572a4817a52dababbca80513e4b977de670 | refs/heads/master | 2022-03-31T20:20:52.827370 | 2020-01-27T13:15:19 | 2020-01-27T13:15:19 | 236,491,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Exe041 - Digite o ano de nascimento e clasifique-o em que faixa está,
# sendo: mirim(até 9), infantil(até 14), júnior(até 19), sênior(até 25), master(acima 25).
from datetime import date
nasceu = int(input('Em que ano nasceu?: '))
idade = (date.today().year) - nasceu
print('O atleta tem {} anos'.format(idade))
if idade < 9:
print('Classificação: MIRIM')
elif idade < 14:
print('Classificação: INFANTIL')
elif idade < 19:
print('Classificação: JÚNIOR')
elif idade < 25:
print('Classificação: SÊNIOR')
else:
print('Classificação: MASTER')
| [
"noreply@github.com"
] | LucasDatilioCarderelli.noreply@github.com |
2a83c3d9eaab8aa74b431a89b46b8e95525c4c5b | 2e359c77bd9b8b1b7955b3ae5117191fa650ab72 | /app/main/views.py | 500475eb9d2dcf72fd2530f674d5410de279e1e7 | [] | no_license | sknewgiser/myFlasky | e60baddafe415ee01102f856a1e183164d7377c3 | 0ff7ad2bb5a75eb4350e424a1f4cd4ba434681e9 | refs/heads/master | 2020-05-01T11:02:08.497161 | 2016-06-27T14:07:10 | 2016-06-27T14:07:10 | 177,432,719 | 1 | 0 | null | 2019-03-24T15:25:20 | 2019-03-24T15:25:20 | null | UTF-8 | Python | false | false | 1,313 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import render_template, session, redirect, url_for, current_app
from . import main
from .forms import NameForm
from .. import db
from ..models import User
from ..email import send_email
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first() # 取出User中username与form收到的data相同的一项
if user is None:
user = User(username=form.name.data) # 创建新行
db.session.add(user) # 添加会话,这里的session是db的"会话"机制
session['known'] = False # 这里的session是Flask的请求上下文
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False),
current_time=datetime.utcnow()) | [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
ae94884792fb67ba8b1484f778a5f874b1a2b6e3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/120.py | 2d9966f5134b7b96b15e6ca0d0a8fa742d9cb581 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | N = int(input())
for num in range(1, N + 1):
inp = input().split()
combine = {}
opposed = set()
i = int(inp[0])
for j in range(1, 1 + i):
combine[inp[j][:2]] = inp[j][2]
combine[inp[j][1::-1]] = inp[j][2]
i = i + 1
for j in range(i + 1, i + 1 + int(inp[i])):
opposed.add(inp[j])
opposed.add(inp[j][::-1])
ans = ['@']
for elem in inp[-1]:
if ans[-1] + elem in combine:
ans[-1] = combine[ans[-1] + elem]
else:
for elem1 in ans:
if elem1 + elem in opposed:
ans = ['@']
break
else:
ans.append(elem)
ans = ', '.join(ans[1:])
print("Case #", num, ": [", ans, "]", sep = '')
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d2e6eca292c9df91bd4fd6422e9ed3c0ece225d7 | ba719722e890a7822a5533a8b6efd06cc776b17e | /Maricopa_County/fitlering Script/Surprise_85379_Maricopa_AZ.py | bd60aa3e5d180bd3dad8fa1438bf1c0cc4ca2866 | [] | no_license | devhadisov/python_selenium_zillow | 9c80566d829721dce952ab4d7a285d1fd970fe19 | e0b4f7243b548404912bdcdce4bcdf7168413242 | refs/heads/master | 2022-12-24T17:14:32.903874 | 2020-09-24T20:20:25 | 2020-09-24T20:20:25 | 298,384,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,459 | py | import selenium
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import requests
import urllib.request
import json, csv, lxml, time, re
import datetime
import hashlib
from insertdatabase import InsertDB
def main(htmlstring, driver):
table_name = "maricopa"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ko;q=0.8',
'referer': 'https://www.zillow.com/homes/85139_rb/',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
}
pagination = ""
usersSearchTerm = "85379"
west = "-112.43028831787109"
east = "-112.3211116821289"
south = "33.53811759133085"
north = "33.65736633255334"
regionId = "94953"
regionType = "7"
mapZoom = "13"
includeList = "true"
# https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={%22pagination%22:{},%22usersSearchTerm%22:%2285006%22,%22mapBounds%22:{%22west%22:-112.07973577801513,%22east%22:-112.01665022198486,%22south%22:33.43522122804253,%22north%22:33.494937169247144},%22regionSelection%22:[{%22regionId%22:94722,%22regionType%22:7}],%22isMapVisible%22:true,%22mapZoom%22:14,%22filterState%22:{%22sort%22:{%22value%22:%22globalrelevanceex%22}},%22isListVisible%22:true}&includeMap=false&includeList=true
default_first_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{0},"usersSearchTerm":"{1}","mapBounds":{"west":{2},"east":{3},"south":{4},"north":{5}},"regionSelection":[{"regionId":{6},"regionType":{7}}],"isMapVisible":true,"mapZoom":{8},"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList={9}'
first_case_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
# first_url = default_first_url.format(pagination, usersSearchTerm, west, east, south, north, regionId, regionType, mapZoom, includeList)
print(first_case_url)
# return
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
counts = 1
for page in range(1, 5):
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + str(page) + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
if page == 1:
url = first_case_url
else:
url = default_page_url
response = requests.get(url, headers=header)
result = response.json()
properties_infos = result["searchResults"]["listResults"]
print(len(properties_infos))
for i in range(0, len(properties_infos)):
data_base = []
property_url = properties_infos[i]["detailUrl"]
status_text = properties_infos[i]["statusText"]
print(status_text, counts)
counts += 1
try:
street_add = properties_infos[i]["hdpData"]["homeInfo"]["streetAddress"]
except:
street_add = ""
try:
city = properties_infos[i]["hdpData"]["homeInfo"]["city"]
except:
city = ""
try:
state = properties_infos[i]["hdpData"]["homeInfo"]["state"]
except:
state = ""
try:
zipcode = properties_infos[i]["hdpData"]["homeInfo"]["zipcode"]
except:
zipcode = ""
property_address = street_add + ", " + city + ", " + state + " " + zipcode
if "by owner" in status_text:
print("--------------------------------------------------> : ", i + 1)
driver.get(property_url)
time.sleep(10)
# street_add = driver.find_element_by_xpath("//h1[@class='ds-address-container']/span[1]").text
# property_address = street_add + ", " + city + ", " + state + " " + zipcode
# phone_number = driver.find_element_by_xpath("//span[@class='listing-field']").text
phones = re.findall(r'[(][\d]{3}[)][ ]?[\d]{3}-[\d]{4}', driver.page_source)
for phone in range(1, len(phones) + 1):
phone_number = phones[phone - 1]
print("Property Address--------------------> : ", property_address)
print("Property Url------------------------> : ", property_url)
print("Property Status---------------------> : ", status_text)
print("Owner Phone Number------------------> : ", phone_number)
string_id = property_address + status_text + phone_number
m = hashlib.md5()
m.update(string_id.encode('utf8'))
identifier = m.hexdigest()
print("hash-------------------->", identifier)
create_time = str(datetime.datetime.now())
update_time = ""
insertdb = InsertDB()
data_base.append((property_address, street_add, city, state, zipcode, status_text, phone_number, identifier, create_time, update_time))
insertdb.insert_document(data_base, table_name)
# return
if __name__ == "__main__":
print("-----------------start---------------")
path = "driver\\chromedriver.exe"
driver = Chrome(executable_path=path)
driver.get("https://www.zillow.com/")
time.sleep(2)
driver.maximize_window()
main(driver.page_source, driver) | [
"dev.hadisov@gmail.com"
] | dev.hadisov@gmail.com |
c34bbc8a5f1f93bad4d3178bd354e23f55da7ac2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03211/s089250300.py | e527a16181393649ce35c1070bbbf8b7f202f33e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | s = input()
l = []
ans = 0
ans_l = []
for i in range(len(s)-2):
l.append(s[i:i+3])
for i in l:
ans = abs(int(i)-753)
ans_l.append(ans)
print(min(ans_l)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ccd944b721ea43533688111c8209f149fb569c3c | 77d4d5a1881297dce3003560e04a2e39a97d4465 | /code_chef/CHFCHK.py | 9f69147f1b66643422b26f10a1330a0a89a919aa | [] | no_license | gomsterX/competitive_programming | c34820032c24532d62325a379590a22fa812159a | 72ac1fe61604e5a5e41f336bb40377fd7e4738d7 | refs/heads/master | 2023-07-19T21:28:16.205718 | 2021-09-02T14:18:44 | 2021-09-02T14:18:44 | 271,074,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #Problem ID: CHFCHK
#Problem Name: Chef Chick
for _ in range(int(input())):
n = input()
l = list(map(int, input().split()))
l.sort()
print(l[0])
| [
"mohamedmoussaa7@gmail.com"
] | mohamedmoussaa7@gmail.com |
95d44e5cf23fbc42626c705a67cb6448244ae02e | 5d7a3dc27540e04e5cb9c8f4742830c7fca188f0 | /week-05/code/httpdate.py | 3543de4e38136299f262df0feccd7d45d8775a8f | [] | no_license | PythonCHB/PythonIntroClass | 1986e553390c6f3504e279cda23744ceacc3a292 | b49d41bd04696d45ef4394b489de408cbd3b3d32 | refs/heads/master | 2020-12-24T17:35:31.408292 | 2014-10-16T18:09:21 | 2014-10-16T18:09:21 | 4,633,372 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | #!/usr/bin/env python
"""
httpdate.py
Module that provides a function that formats a date to the HTTP 1.1 spec
"""
import datetime
def httpdate(dt):
"""Return a string representation of a date according to RFC 1123
(HTTP/1.1).
:param dt" A python datetime object (in UTC (GMT) time zone)
For example: datetime.datetime.utcnow()
"""
weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()]
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"][dt.month - 1]
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, dt.day, month,
dt.year, dt.hour, dt.minute, dt.second)
def httpdate_now():
return httpdate( datetime.datetime.utcnow() )
if __name__ == "__main__":
print "the HTTP 1.1 date string for now is:"
print httpdate_now() | [
"PythonCHB@gmail.com"
] | PythonCHB@gmail.com |
ad3b5cc29e3c09e2396b8cb83642293b368591b3 | 145d0449b8babd749b758986a93421f6f3c59f9b | /python/deque.py | c0d3c58d56bc104b27dab87ec658fcf21f257184 | [
"MIT"
] | permissive | mattfenwick/DataStructures | cd5fcdbe2a1e2b473a4124fb3d747653b6a3f8eb | 9de052e36fb709488282938e03b5fde8ac92bc1d | refs/heads/master | 2020-05-07T12:01:25.079198 | 2014-03-08T15:39:07 | 2014-03-08T15:39:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py |
class Deque(object):
def __init__(self, front=[], back=[]):
self._front = front
self._back = back
def shift(self):
if len(self._back) == 0:
self._flip()
if len(self._back) == 0:
raise ValueError("can't shift empty deque")
return self._back.pop()
def unshift(self, elem):
self._back.append(elem)
def pop(self):
if len(self._front) == 0:
self._flip()
if len(self._front) == 0:
raise ValueError("can't pop empty deque")
return self._front.pop()
def push(self, elem):
self._front.append(elem)
def _flip(self):
if len(self._front) != 0 and len(self._back) != 0:
raise ValueError("can't flip deque with non-empty front and back")
new_back = self._front[-1::-1]
new_front = self._back[-1::-1]
self._front = new_front
self._back = new_back
def __repr__(self):
return repr(self._back[-1::-1] + self._front)
| [
"mfenwick100@gmail.com"
] | mfenwick100@gmail.com |
4c25c06ebf9b8bc22d801c7c29fd4d42b8ddc6ae | 0354d8e29fcbb65a06525bcac1f55fd08288b6e0 | /clients/python-flask/generated/swagger_server/models/cause_user_id_cause.py | 4f8f7914fcde8ff4d44b2b4bcadc2c93c370d8ea | [
"MIT"
] | permissive | zhiwei55/swaggy-jenkins | cdc52956a40e947067415cec8d2da1425b3d7670 | 678b5477f5f9f00022b176c34b840055fb1b0a77 | refs/heads/master | 2020-03-06T20:38:53.012467 | 2018-02-19T01:53:33 | 2018-02-19T01:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py | # coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class CauseUserIdCause(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _class: str=None, short_description: str=None, user_id: str=None, user_name: str=None):
"""
CauseUserIdCause - a model defined in Swagger
:param _class: The _class of this CauseUserIdCause.
:type _class: str
:param short_description: The short_description of this CauseUserIdCause.
:type short_description: str
:param user_id: The user_id of this CauseUserIdCause.
:type user_id: str
:param user_name: The user_name of this CauseUserIdCause.
:type user_name: str
"""
self.swagger_types = {
'_class': str,
'short_description': str,
'user_id': str,
'user_name': str
}
self.attribute_map = {
'_class': '_class',
'short_description': 'shortDescription',
'user_id': 'userId',
'user_name': 'userName'
}
self.__class = _class
self._short_description = short_description
self._user_id = user_id
self._user_name = user_name
@classmethod
def from_dict(cls, dikt) -> 'CauseUserIdCause':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The CauseUserIdCause of this CauseUserIdCause.
:rtype: CauseUserIdCause
"""
return deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""
Gets the _class of this CauseUserIdCause.
:return: The _class of this CauseUserIdCause.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""
Sets the _class of this CauseUserIdCause.
:param _class: The _class of this CauseUserIdCause.
:type _class: str
"""
self.__class = _class
@property
def short_description(self) -> str:
"""
Gets the short_description of this CauseUserIdCause.
:return: The short_description of this CauseUserIdCause.
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description: str):
"""
Sets the short_description of this CauseUserIdCause.
:param short_description: The short_description of this CauseUserIdCause.
:type short_description: str
"""
self._short_description = short_description
@property
def user_id(self) -> str:
"""
Gets the user_id of this CauseUserIdCause.
:return: The user_id of this CauseUserIdCause.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id: str):
"""
Sets the user_id of this CauseUserIdCause.
:param user_id: The user_id of this CauseUserIdCause.
:type user_id: str
"""
self._user_id = user_id
@property
def user_name(self) -> str:
"""
Gets the user_name of this CauseUserIdCause.
:return: The user_name of this CauseUserIdCause.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name: str):
"""
Sets the user_name of this CauseUserIdCause.
:param user_name: The user_name of this CauseUserIdCause.
:type user_name: str
"""
self._user_name = user_name
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
8a410271a8a9c98e06529afd8c80fd2be884af94 | 2626f6e6803c8c4341d01f57228a0fe117e3680b | /students/BrandonHenson/Lesson04/run_mailroom.py | d9d651237457dcc3a3a439afbf85193f92bc7e52 | [] | no_license | kmsnyde/SP_Online_Course2_2018 | 9e59362da253cdec558e1c2f39221c174d6216f3 | 7fe8635b47d4792a8575e589797260ad0a2b027e | refs/heads/master | 2020-03-19T17:15:03.945523 | 2018-09-05T22:28:55 | 2018-09-05T22:28:55 | 136,750,231 | 0 | 0 | null | 2018-06-09T19:01:52 | 2018-06-09T19:01:51 | null | UTF-8 | Python | false | false | 4,425 | py | # Python 220
# Lesson 4
# 7-17-18
# !/usr/bin/env python3
import os
from mailroom import Donor, Donor_list
import json
donor_history = Donor_list(Donor('Brandon Henson', [1005.49, 3116.72, 5200]),
Donor('Alicia Henson', [21.47, 1500]),
Donor('Michael Green', [2400.54]),
Donor('Brandon Henson Jr', [355.42, 579.31]),
Donor('Kaiya Henson', [636.9, 850.13, 125.23]))
prompt = ('\nSelect an option:\n'
'[1] Send A Thank You To New Or Exsisting Donor\n'
'[2] Create a Report\n'
'[3] Send letters to everyone\n'
'[4] Exit\n'
'[5] Save\n'
'[6] Load\n')
directory_prompt = ("\nChoose save location or press enter for default")
def menu_selection(prompt, dispatch_dict):
while True:
response = input(prompt)
try:
if dispatch_dict[response]() == "Exit Menu":
break
except KeyError:
print("\nPick from the listed options.")
def exit():
return "Exit Menu"
def load():
global donor_history
to_load = input("What do you want to load (with extension)?\n")
with open(to_load, 'r') as f:
donor_load = json.load(f)
donor_history = donor_history.from_json_dict(donor_load)
def save():
record_name = input("Name Of file(without extension)?")
info = donor_history.to_json()
donor_history.save(record_name, info)
def report():
donor_history.donor_report()
def make_file(letter, destination):
with open(destination, 'w') as f:
f.write(letter)
def make_destination(donor, need_dir='y', directory=""):
if need_dir == "y":
directory = input(directory_prompt)
destination = os.path.join(directory,
"{}.txt".format(donor.name.replace(' ', '_')))
return destination
def get_donation_amt(name):
while True:
try:
amount = int(input("\nHow much did {} donate: ".format(name)))
break
except ValueError:
print("\nThis only works with a number!")
return amount
def thank_everyone():
directory = input(directory_prompt)
for donor in donor_history.donor_dictionary:
make_file(donor.write_note(), make_destination(donor, 'n', directory))
def add_donation(name, amount, donor_list_obj):
new_donor = True
for donor in donor_list_obj.donor_dictionary:
if name == donor.name:
donor.new_donation(amount)
temp = donor
new_donor = False
if new_donor:
temp = Donor(name, [amount])
donor_list_obj.add_donor(temp)
return temp
def add_new_full(name="", thank_you=""):
if name == "":
name = input("\nWho is the donor?")
amount = get_donation_amt(name)
donor = add_donation(name, amount, donor_history)
if thank_you == "":
thank_you = input("\nSend a thank you to {}? (y/n): ".format(name))
if thank_you.upper() == 'Y':
make_file(donor.write_note(amount), make_destination(donor))
def send_to():
recipient = input("\nWho is the donor?\n"
"Enter a name or 'list'")
if recipient.lower() == 'list':
print(donor_history)
recipient = input("\nWho is the donor?\n")
return recipient
else:
return recipient
def thank_you():
name = send_to()
donor_exists = donor_history.check_donor(name)
if donor_exists:
donor = donor_history.get_donor(name)
new_donation = input("\n{} has donated. Another?(y/n)? ".format(name))
if new_donation.upper() == 'Y':
amount = get_donation_amt(name)
donor.new_donation(amount)
else:
amount = 0
make_file(donor.write_note(amount), make_destination(donor))
else:
add_new_full(name, 'y')
def donor_list_sum(donor_list):
tot_donations = 0
for donor in donor_list.donor_dictionary:
tot_donations += donor.total_donated
return tot_donations
arg_dict = {"1": thank_you, "2": report, "3": thank_everyone, "4": exit,
"5": save, "6": load}
if __name__ == '__main__':
menu_selection(prompt, arg_dict)
| [
"kmsnyder2@verizon.net"
] | kmsnyder2@verizon.net |
640bc6b34dd826a963ab60f2b4eeae22953d3bd8 | 9c48efbd0b87cb65d9002a4535d90cc0da7f7460 | /07-esercizi/es5_mostra_rango.py | a26721b0f97ff3d92e252192d9e5f0799418f561 | [] | no_license | amedina14/master-python | b439a69507f1e3be5c1b4e5a8001a582d66d03b7 | 12d3ba5aacbfca03a7966599afc4de41cb4af104 | refs/heads/master | 2023-02-24T09:27:23.793033 | 2021-02-01T15:27:13 | 2021-02-01T15:27:13 | 321,659,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | """
Esercizio 5
Mostra numeri tra due numeri detti dall'utente
"""
num1 = int(input("Da numero 1: "))
num2 = int(input("A numero 2: "))
if num1 < num2:
for i in range(num1,(num2 + 1)):
print(i)
else:
print("Il numero 1 deve essere minore al numero 2")
"""
else:
print("errore")
""" | [
"medinalarry96@gmail.com"
] | medinalarry96@gmail.com |
7e1e63e6d0914cb94a2e3b298ed1709387b77b63 | 574ba9b7b7f79ee06e395f697f2eb08d39081a2e | /nkms/network/protocols.py | e2eecbf55e9286e95223276bb527d8fa9e0c087e | [] | no_license | xxxAHMEDxxx/nucypher-kms | 5dcba61ee8a701dd9025dfa425a6347b3cfc80e1 | a10bdccc12374b1bdd8212f4c939f0d411729708 | refs/heads/master | 2021-07-23T23:21:46.776424 | 2017-11-05T00:18:14 | 2017-11-05T00:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | import asyncio
from kademlia.node import Node
from kademlia.protocol import KademliaProtocol
from kademlia.utils import digest
from nkms.network.constants import NODE_HAS_NO_STORAGE
from nkms.network.node import NuCypherNode
from nkms.network.routing import NuCypherRoutingTable
class NuCypherHashProtocol(KademliaProtocol):
def __init__(self, sourceNode, storage, ksize, *args, **kwargs):
super().__init__(sourceNode, storage, ksize, *args, **kwargs)
self.router = NuCypherRoutingTable(self, ksize, sourceNode)
def check_node_for_storage(self, node):
try:
return node.can_store()
except AttributeError:
return True
def rpc_ping(self, sender, nodeid, node_capabilities=[]):
source = NuCypherNode(nodeid, sender[0], sender[1], capabilities_as_strings=node_capabilities)
self.welcomeIfNewNode(source)
return self.sourceNode.id
async def callStore(self, nodeToAsk, key, value):
# nodeToAsk = NuCypherNode
if self.check_node_for_storage(nodeToAsk):
address = (nodeToAsk.ip, nodeToAsk.port)
# TODO: encrypt `value` with public key of nodeToAsk
store_future = self.store(address, self.sourceNode.id, key, value)
result = await store_future
success, data = self.handleCallResponse(result, nodeToAsk)
return success, data
else:
return NODE_HAS_NO_STORAGE, False
class NuCypherSeedOnlyProtocol(NuCypherHashProtocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def rpc_store(self, sender, nodeid, key, value):
source = Node(nodeid, sender[0], sender[1])
self.welcomeIfNewNode(source)
self.log.debug(
"got a store request from %s, but THIS VALUE WILL NOT BE STORED as this is a seed-only node." % str(
sender))
return True
| [
"justin@justinholmes.com"
] | justin@justinholmes.com |
de000d15fcb6e39a0cf59d0226b600017f317501 | 62ed242c7195788950e1d69dec8a0da0c29d0489 | /8_function/function_parameter2.py | 70e37ce9f05f02b859365ad023e1665a2e79d8d7 | [] | no_license | lmw8864/MyFirstPython_part1_python_basic | f11fbe2d524a4acfa00c2b18488e8f851de9e0ba | ab850187581f9a415066d7b75175a92023c0a691 | refs/heads/master | 2021-06-27T06:50:18.017125 | 2017-09-16T17:01:20 | 2017-09-16T17:01:20 | 103,767,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # function_parameter2.py
"""
매개변수를 임의의 숫자만큼 전달하기
매개변수를 몇 개나 받을지 미리 알 수 없을 때 매개변수 앞에 '*'를 붙여주면
파이썬이 매개변수명의 빈 튜플을 만들고 받는 값을 모두 이 튜플에 저장한다.
- 매개변수로 *toppings 를 지정하면,
→ 파이썬이 toppings 라는 빈 튜플을 만들고 받는 값을 모두 저장함.
"""
def make_pizza(*toppings):
"""주문받은 토핑 리스트 출력"""
print(toppings)
make_pizza('pepperoni') # 하나의 값을 받더라도 튜플로 저장한다.
make_pizza('mushrooms', 'green peppers', 'extra cheese')
# ('pepperoni',)
# ('mushrooms', 'green peppers', 'extra cheese')
def make_pizza(*toppings):
"""만들려고 하는 피자를 요약합니다."""
print("\nMaking a pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza('pepperoni')
make_pizza('mushrooms', 'green peppers', 'extra cheese')
# Making a pizza with the following toppings:
# - pepperoni
#
# Making a pizza with the following toppings:
# - mushrooms
# - green peppers
# - extra cheese
print("\n")
# 위치형 매개변수와 임의의 매개변수 함께 쓰기
def make_pizza(size, *toppings):
"""만들려고 하는 피자를 요약합니다."""
print("\nMaking a " + str(size) + "-inch pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza(16, 'pepperoni')
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
# Making a 16-inch pizza with the following toppings:
# - pepperoni
#
# Making a 12-inch pizza with the following toppings:
# - mushrooms
# - green peppers
# - extra cheese
| [
"lmw8864@gmail.com"
] | lmw8864@gmail.com |
300a527f303b1deae9cb0f2e2af9747afd203d3b | 35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6 | /69_SqrtX/69_SqrtX_3.py | f709058b297d6324a43b6f8ece0dda08f3ed9955 | [] | no_license | H-Cong/LeetCode | 0a2084a4845b5d7fac67c89bd72a2adf49f90c3d | d00993a88c6b34fcd79d0a6580fde5c523a2741d | refs/heads/master | 2023-03-19T15:22:00.971461 | 2021-03-11T00:33:00 | 2021-03-11T00:33:00 | 303,265,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | class Solution:
def mySqrt(self, x: int) -> int:
'''
Gradient Descent with Adaptive Learning Rates
'''
n = 0
lr = x
while abs(n**2 - x) >= 0.1:
lr = lr / 2
gradient = self._gradient(x, n)
n -= lr*gradient
return int(n + 1) if int(n + 1)**2 == x else int(n)
def _gradient(self, x, n):
v = n**2 - x
return 1 if v > 0 else -1
# TC: log(x)
# SC: O(1)
# why starts from 0?
# I think as f(x) = x^2 - n where n >= 0 is monotonically increaseing in
# the range of [0, inf). And we are looking for the point to make f(x) = 0
# When gradient (i.e. f(x)) < 0, it means that we need to move x to right
# When gradient > 0, it means that the target x is on the left of current x
# Initializing x = 0 is the only way to make this logic valid.
# ref: https://leetcode.com/problems/sqrtx/discuss/869428/Gradient-Descent-solution-for-machine-learning-interviews-O(logx)
| [
"nych1989@gmail.com"
] | nych1989@gmail.com |
c5e205dfc2e0827a38c3eb10c49570c9aeb24283 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2643/61519/310414.py | d92838a5acddda72ca16a1f452c2818c5b2bcd71 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | cus=list(input().split(","))
gru=list(input().split(","))
k=int(input())
res=[]
number=0
for i in range(len(cus)):
cus[i]=int(cus[i])
gru[i]=int(gru[i])
for i in range(len(cus)-k):
number=0
tem=[]
for j in range(len(cus)):
tem.append(gru[j])
for j in range(i,i+k):
tem[j]=0
for j in range(len(cus)):
if tem[j]==0:
number=number+cus[j]
res.append(number)
print(max(res)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b887c469115e9112723e249596fa954101bb9f04 | 46f52778894a1e2d9de04e886335e8659c8a2ff4 | /backend/mainPage/migrations/0003_auto_20200919_1853.py | 98e7cc5ddf252360a612a9b40b1d4f09033437dc | [] | no_license | oereo/Project_C | a038dd761065a2b7b0fbc46405331e27850457ed | eb8bf0283692b422a28c134dc277e194fbe006a7 | refs/heads/master | 2022-12-23T18:38:29.493272 | 2020-10-07T17:17:15 | 2020-10-07T17:17:15 | 278,578,190 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.1.1 on 2020-09-19 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainPage', '0002_profile'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='safe_percent',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| [
"dlstpgns0406@gmail.com"
] | dlstpgns0406@gmail.com |
a028d774ad4e71bb609099dde08533350e33691e | c239486d2884f03591e20c1e9419156e27805aa1 | /BOJ/구현/BOJ7568_덩치.py | 660821bd5245747d351c1dfb75189a39c11daf1d | [] | no_license | ynsseon07/Coding_with_Python | ba1fda42534e6134b58d742dc02dc204f447f57a | 4e2041198b3720d97934becdcc2603486dfb5564 | refs/heads/master | 2023-08-25T19:57:44.657541 | 2021-10-28T14:11:24 | 2021-10-28T14:11:24 | 364,128,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | N = int(input())
pp = []
for _ in range(N):
pp.append(list(map(int, input().split())))
rank = [0] * len(pp)
for i in range(len(pp)):
for j in range(len(pp)):
if i == j:
continue
if pp[i][0] < pp[j][0] and pp[i][1] < pp[j][1]:
rank[i] += 1
for r in rank:
print(r+1, end=' ') | [
"ynsseon@gmail.com"
] | ynsseon@gmail.com |
5b14d812cdc36e5cd4ee145b3af3f90357e47de3 | 2f330fc050de11676ab46b963b7878882e9b6614 | /memsource_cli/models/task_mapping_dto.py | 0961162bc49c06bd5819b8af375229ebe5c9a512 | [
"Apache-2.0"
] | permissive | zerodayz/memsource-cli-client | 609f48c18a2b6daaa639d4cb8a61da43763b5143 | c2574f1467539a49e6637c874e88d75c7ef789b3 | refs/heads/master | 2020-08-01T12:43:06.497982 | 2019-09-30T11:14:13 | 2019-09-30T11:14:13 | 210,999,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.uid_reference import UidReference # noqa: F401,E501
class TaskMappingDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'task_id': 'str',
'workflow_level': 'str',
'_resource_path': 'str',
'project': 'UidReference',
'job': 'UidReference'
}
attribute_map = {
'task_id': 'taskId',
'workflow_level': 'workflowLevel',
'_resource_path': 'resourcePath',
'project': 'project',
'job': 'job'
}
def __init__(self, task_id=None, workflow_level=None, _resource_path=None, project=None, job=None): # noqa: E501
"""TaskMappingDto - a model defined in Swagger""" # noqa: E501
self._task_id = None
self._workflow_level = None
self.__resource_path = None
self._project = None
self._job = None
self.discriminator = None
if task_id is not None:
self.task_id = task_id
if workflow_level is not None:
self.workflow_level = workflow_level
if _resource_path is not None:
self._resource_path = _resource_path
if project is not None:
self.project = project
if job is not None:
self.job = job
@property
def task_id(self):
"""Gets the task_id of this TaskMappingDto. # noqa: E501
:return: The task_id of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this TaskMappingDto.
:param task_id: The task_id of this TaskMappingDto. # noqa: E501
:type: str
"""
self._task_id = task_id
@property
def workflow_level(self):
"""Gets the workflow_level of this TaskMappingDto. # noqa: E501
:return: The workflow_level of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self._workflow_level
@workflow_level.setter
def workflow_level(self, workflow_level):
"""Sets the workflow_level of this TaskMappingDto.
:param workflow_level: The workflow_level of this TaskMappingDto. # noqa: E501
:type: str
"""
self._workflow_level = workflow_level
@property
def _resource_path(self):
"""Gets the _resource_path of this TaskMappingDto. # noqa: E501
:return: The _resource_path of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self.__resource_path
@_resource_path.setter
def _resource_path(self, _resource_path):
"""Sets the _resource_path of this TaskMappingDto.
:param _resource_path: The _resource_path of this TaskMappingDto. # noqa: E501
:type: str
"""
self.__resource_path = _resource_path
@property
def project(self):
"""Gets the project of this TaskMappingDto. # noqa: E501
:return: The project of this TaskMappingDto. # noqa: E501
:rtype: UidReference
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this TaskMappingDto.
:param project: The project of this TaskMappingDto. # noqa: E501
:type: UidReference
"""
self._project = project
@property
def job(self):
"""Gets the job of this TaskMappingDto. # noqa: E501
:return: The job of this TaskMappingDto. # noqa: E501
:rtype: UidReference
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this TaskMappingDto.
:param job: The job of this TaskMappingDto. # noqa: E501
:type: UidReference
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TaskMappingDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskMappingDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cerninr@gmail.com"
] | cerninr@gmail.com |
0db82e4db91dc685a8d5fa4f607db6916a7edad2 | 3e31bb9dd45b6fb7e97e28322f23633a3dc99f2d | /web_scraping/regex_test.py | 1c7ad087a726badbf814c46d1ca99f8b52bc0dbd | [] | no_license | peterbristow/codeinstitute-stream-two | 9dbb01230128cc1d73edd7a741020f5d1107f58d | 15a7ec241cb6438bafb9e55f20f5c9697f632efb | refs/heads/master | 2020-04-11T04:27:41.504203 | 2016-06-13T20:41:28 | 2016-06-13T20:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import re
# Using * asterisk - multiple occurrences of the character preceding it
print re.findall("ab*c", "ac") # ['ac']
print re.findall("ab*c", "abcd") # ['abc']
print re.findall("ab*c", "acc") # ['ac']
print re.findall("ab*c", "abcac") # ['abc', 'ac']
print re.findall("ab*c", "abdc") # []
print re.findall("ab*c", "ABC") # [] case sensitive
# Using re.IGNORECASE
print re.findall("ab*c", "ABC", re.IGNORECASE) # ['ABC']
# Using . period - any single occurrence
print re.findall("a.c", "abc") # ['abc']
print re.findall("a.c", "abbc") # []
print re.findall("a.c", "ac") # []
print re.findall("a.c", "acc") # ['acc']
# Combining . with *
print re.findall("a.*c", "abc") # ['abc']
print re.findall("a.*c", "abbc") # ['abbc']
print re.findall("a.*c", "ac") # ['ac']
# Using re.search()
results = re.search("ab*c", "ABC", re.IGNORECASE)
print results.group()
a_string = "Everything we do is <replaced> if it is indeed inside <tags>."
# Substitute the tags with 'coming up roses' using the re.sub() method
a_string = re.sub("<.*>", "coming up roses", a_string)
print a_string
another_string = "Everything we do is <replaced> if it is indeed inside <tags>."
# Make sure that both tags are replaced by using the ? too tell
# re.sub() to stop after first match of '>'
another_string = re.sub("<.*?>", "coming up roses", another_string)
print another_string
| [
"peterjb73@gmail.com"
] | peterjb73@gmail.com |
f909f2748513ae1af2c003a3453a949ee37eb3ff | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/6704-MaximumScore.py | 9181f1fc0bdc2a9d56e9392daf163fb8c633a733 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | class Solution:
def maximumScore(self, nums: List[int], k: int) -> int:
n = len(nums)
l,r = k,k
ans = 0
while True:
while r < n and nums[r] >= nums[k]:
r += 1
while l >= 0 and nums[l] >= nums[k]:
l -= 1
ans = max(ans,(r-l-1)*nums[k])
if l < 0 and r == n:
break
if l >= 0 and r < n:
nums[k] = max(nums[l],nums[r])
elif l < 0:
nums[k] = nums[r]
else:
nums[k] = nums[l]
return ans
| [
"guoyuhang0921@gmail.com"
] | guoyuhang0921@gmail.com |
dde9f9a87549bb12202e514573e9252914f2dcc0 | a0659e58f8073485674d7bc4092f9a04174fb7c7 | /Lib/objc/_C2.py | f3e7b02d732db535decb0efe9163a2c39ebfceaf | [
"MIT"
] | permissive | kanishpatel/Pyto | 74f75de8e06e6120324458346a9c65b73191b935 | feec7a1a54f635a6375fa7ede074ff35afbfbb95 | refs/heads/main | 2023-01-30T15:16:37.095828 | 2020-12-04T14:11:44 | 2020-12-04T14:11:44 | 318,703,734 | 0 | 0 | MIT | 2020-12-05T04:46:16 | 2020-12-05T04:46:15 | null | UTF-8 | Python | false | false | 1,864 | py | '''
Classes from the 'C2' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
C2SessionPool = _Class('C2SessionPool')
C2RequestManager = _Class('C2RequestManager')
C2Session = _Class('C2Session')
C2RequestOptions = _Class('C2RequestOptions')
C2SessionTask = _Class('C2SessionTask')
C2SessionCallbackMetrics = _Class('C2SessionCallbackMetrics')
C2DeviceInfo = _Class('C2DeviceInfo')
C2SessionTLSCache = _Class('C2SessionTLSCache')
C2NetworkingDelegateURLSessionDataTask = _Class('C2NetworkingDelegateURLSessionDataTask')
C2NetworkingDelegateURLSession = _Class('C2NetworkingDelegateURLSession')
C2Logging = _Class('C2Logging')
C2Metric = _Class('C2Metric')
C2SessionGroup = _Class('C2SessionGroup')
C2ReportMetrics = _Class('C2ReportMetrics')
C2MetricRequestOptions = _Class('C2MetricRequestOptions')
C2MetricOperationOptions = _Class('C2MetricOperationOptions')
C2MetricOperationGroupOptions = _Class('C2MetricOperationGroupOptions')
C2MetricOptions = _Class('C2MetricOptions')
C2Time = _Class('C2Time')
C2RoutingTable = _Class('C2RoutingTable')
C2Route = _Class('C2Route')
C2MPDeviceInfo = _Class('C2MPDeviceInfo')
C2MPCloudKitOperationGroupInfo = _Class('C2MPCloudKitOperationGroupInfo')
C2MPMetric = _Class('C2MPMetric')
C2MPServerInfo = _Class('C2MPServerInfo')
C2MPGenericEventMetric = _Class('C2MPGenericEventMetric')
C2MPError = _Class('C2MPError')
C2MPGenericEvent = _Class('C2MPGenericEvent')
C2MPCloudKitInfo = _Class('C2MPCloudKitInfo')
C2MPNetworkEvent = _Class('C2MPNetworkEvent')
C2MPCloudKitOperationInfo = _Class('C2MPCloudKitOperationInfo')
C2MPGenericEventMetricValue = _Class('C2MPGenericEventMetricValue')
C2MPInternalTestConfig = _Class('C2MPInternalTestConfig')
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
6d43ccff678343c91363b14927542e7a10967eda | 3c1ad0919924ed8d96ae5f9d9a10b97cfdf1ee38 | /LSA_cgi/simple-ajax-py.py | 72599d2c97742a8da73234b6978640237744c875 | [] | no_license | emonson/CopyrightScripts | 4439ba584840e74ebdc5ab6083887e530757de64 | 862e5d2eb0af848647bf1cb2d95519071a00adc0 | refs/heads/master | 2020-05-18T15:37:15.926524 | 2017-03-16T14:51:08 | 2017-03-16T14:51:08 | 1,569,450 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #!/usr/bin/python
import os, stat
import cgi
import cgitb
cgitb.enable()
# Trying to add to PYTHONPATH
import sys
sys.path.insert(0, '/Users/emonson/Programming/VTK_git/VTK/build/bin')
sys.path.insert(0,'/Users/emonson/Programming/VTK_git/VTK/build/Wrapping/Python')
import vtk
form = cgi.FieldStorage()
secret_word = form.getvalue('w','_blank_')
remote_host = os.environ['REMOTE_ADDR']
# Owner ends up being _www and permissions 0644
out_file = '/Users/Shared/junk.txt'
f = open(out_file, 'w')
f.write(secret_word)
f.close()
# File permissions
# st = os.stat('sync_get_NotesJournal.sh')
# posix.stat_result(st_mode=33188, st_ino=845809, st_dev=234881032L, st_nlink=1, st_uid=501, st_gid=20, st_size=525, st_atime=1281026596, st_mtime=1247759591, st_ctime=1262712197)
# stat.ST_MODE # 0
# stat.S_IMODE(st[stat.ST_MODE]) # 420
# oct(stat.S_IMODE(st[0])) # '0644'
# bin(420) # '0b110100100'
# os.chmod(out_file, stat.S_IMODE(0b110110110))
os.chmod(out_file, stat.S_IMODE(0o0666))
print "Content-type:text/html\r\n\r\n"
print "<p>Your word is: <b>%s</b> and your IP address is: <b>%s</b></p>" % (secret_word, remote_host)
# print "Content-type:text/html\r\n\r\n"
# print "<html>"
# print "<head>"
# print "<title>Hello - Second CGI Program</title>"
# print "</head>"
# print "<body>"
# print "<h2>Hello %s</h2>" % (secret_word,)
# print "</body>"
# print "</html>"
# $query = new CGI;
#
# $secretword = $query->param('w');
# $remotehost = $query->remote_host();
#
# print $query->header;
# print "<p>The secret word is <b>$secretword</b> and your IP is <b>$remotehost</b>.</p>";
#
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
a9a9634f65bd35da3fd546d96a14c239e1c23448 | e86851297175203451374021595659adbd516b59 | /scripts/deploy_to_netlify.py | d73ded3eed7fecbff06eb62db2809fad5ff3805c | [
"MIT"
] | permissive | stcolumbas/free-church-psalms | f0417d07af449300a5ada758dc95e153712b0e9e | 0eee5faa19306a79d77a55019ff82fcba72fc9b4 | refs/heads/master | 2022-12-16T15:31:44.907547 | 2017-12-08T22:53:40 | 2017-12-08T22:53:40 | 28,723,518 | 2 | 0 | null | 2022-12-07T23:51:49 | 2015-01-02T19:23:24 | Elm | UTF-8 | Python | false | false | 1,824 | py | #!/usr/bin/env python
import os
from hashlib import sha1
import requests
SITE_ID = '8954ba2a-fa5e-447b-ada8-09c4b5ce8b29'
BASE_URL = 'https://api.netlify.com/api/v1/'
token = os.environ.get('NETLIFY_TOKEN')
def hash_file(path):
with open(path, 'rb') as f:
return sha1(f.read()).hexdigest()
def main():
hash_to_path = dict()
uri_to_hash = dict()
hash_to_uri = dict()
for root, dirs, files in os.walk('dist'):
for f in files:
full_path = os.path.join(root, f)
hash_ = hash_file(full_path)
hash_to_path[hash_] = full_path
uri_to_hash[full_path.replace('dist/', '/')] = hash_
hash_to_uri[hash_] = full_path.replace('dist/', '/')
#post
resp = requests.post(
f'{BASE_URL}sites/{SITE_ID}/deploys',
json={'files': uri_to_hash},
headers={'Authorization': f'Bearer {token}'},
)
resp.raise_for_status()
resp_data = resp.json()
# put files
deploy_id = resp_data['id']
required_files = resp_data['required']
if deploy_id is None or (not required_files):
print('No files to upload, stopping')
return
else:
print(f'{len(required_files)} files to upload:')
for rf in required_files:
path_to_file = hash_to_path[rf]
uri = hash_to_uri[rf]
print(f'Uploading {uri}...')
with open(path_to_file, 'rb') as f:
resp = requests.put(
f'{BASE_URL}deploys/{deploy_id}/files{uri}',
headers={
'content-type':'application/octet-stream',
'Authorization': f'Bearer {token}'
},
data=f.read(),
)
resp.raise_for_status()
print('Deploy successful')
if __name__ == '__main__':
main()
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
97ead138762bf5e3f051a977b15614e25761f09f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/show_details_of_app_v2_request.py | cc75e5f86cf7d2e5e920f4bb52bf6943d5a2b852 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,835 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDetailsOfAppV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, app_id=None):
"""ShowDetailsOfAppV2Request
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param app_id: 应用编号
:type app_id: str
"""
self._instance_id = None
self._app_id = None
self.discriminator = None
self.instance_id = instance_id
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowDetailsOfAppV2Request.
实例ID
:return: The instance_id of this ShowDetailsOfAppV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowDetailsOfAppV2Request.
实例ID
:param instance_id: The instance_id of this ShowDetailsOfAppV2Request.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def app_id(self):
"""Gets the app_id of this ShowDetailsOfAppV2Request.
应用编号
:return: The app_id of this ShowDetailsOfAppV2Request.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ShowDetailsOfAppV2Request.
应用编号
:param app_id: The app_id of this ShowDetailsOfAppV2Request.
:type app_id: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDetailsOfAppV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
447d5d3897c97297582aad9f15348cb2a248c320 | 28129a9c44f3891eb5b3ce8c7fc530252b1c3840 | /algorithms/sorts/test_sort.py | cc829fa3ffc63abb8690c1186e21b5c443cef71c | [] | no_license | ngocyen3006/learn-python | 55eeb221f5a836ebee8c197fc3fddf6c585f02a6 | ec2f35a87f846385f7353e7ef4900e5f80cfdb0a | refs/heads/master | 2020-03-26T16:35:59.151230 | 2019-05-08T07:26:50 | 2019-05-08T07:26:50 | 145,112,258 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | import unittest
import random
def gen(n):
return [random.randint(0, 100) for i in range(n)]
# Use wrapper class to prevent this class being discovered by nosetest.
class Wrapper:
class TestSort(unittest.TestCase): # Class TestSort inherits from class TestCase of module unittest
# this is an abstract method, which will be overrided bu subclass.
def sortMethod(self, arr):
pass
def test_empty(self):
emptyArr = []
self.sortMethod(emptyArr)
self.assertEqual(emptyArr, [], "sort an empty array should return an empty array")
def test_singleElement(self):
single = [1]
self.sortMethod(single)
self.assertEqual(single, [1])
def test_repeatedElements(self):
elem = 1
repeated = [elem for i in range(5)]
self.sortMethod(repeated)
# sort method should not change the size of the array
self.assertEqual(len(repeated), 5)
# after sort, all element will be the same
self.assertTrue(all([x == elem for x in repeated]))
def test_sort(self):
a = [2, 3, 1, 6, 7, 5, 4, 8, 9, 10, 15,14, 13, 12, 11]
self.sortMethod(a)
self.assertEqual(a, list(range(1, 16)))
def test_randomInput(self):
for i in range(10):
n = random.randint(3, 10)
randomArr = gen(n)
self.sortMethod(randomArr)
for j in range(n-1):
if randomArr[j] > randomArr[j+1]: # test case fail, sortMethod is wrong
print(randomArr)
self.fail("sort method provide wrong result for random arr")
if __name__ == '__main__':
unittest.main()
| [
"ngocyen300693@gmail.com"
] | ngocyen300693@gmail.com |
3ec725e3fe5085cd6b29bb34f56d8ca10493be16 | 564cef7c58ed45635f7a09344e6c22c27f3b32f3 | /exercise_library/auto_corrector.py | 7e6dc73ecaf27f4460dcb6cac768d154f58f0db5 | [] | no_license | jjdixon/exercise-library | b7e399626b9e5f898471c03e7533502f281322d3 | 06ea97a1c9c17b533e3f5812dd51685a0349f8af | refs/heads/master | 2020-12-25T05:18:33.275285 | 2015-03-02T19:23:13 | 2015-03-02T19:23:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,126 | py | import re
from collections import defaultdict
from exercise_library.exercise_cacher import ExerciseCacher
class SpellChecker(object):
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
model = defaultdict(int)
for f in features:
model[f] += 1
return model
exercises = ExerciseCacher().exercises
NWORDS = train(
words(
" ".join(
[dict_obj["name"] for dict_obj in exercises]
)
)
)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def _edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def _known_edits2(self, word):
return set(e2 for e1 in self._edits1(word) for e2 in self._edits1(e1) if e2 in self.NWORDS)
def _known(self, words):
return set(w for w in words if w in self.NWORDS)
def correct_token(self, token):
candidates = self._known([token]) or self._known(self._edits1(token)) or self._known_edits2(token) or [token]
return max(candidates, key=self.NWORDS.get)
def correct_phrase(self, text):
tokens = text.split()
return [self.correct_token(token) for token in tokens]
class AutoCompleter(object):
MIN_N_GRAM_SIZE = 1
exercise_name_to_dict = {}
exercises = ExerciseCacher().exercises
token_to_exercise_name = defaultdict(list)
n_gram_to_tokens = defaultdict(set)
for exercise in exercises:
exercise_name = exercise["name"]
exercise_name = exercise_name.lower().replace("-", " ").replace("(", " ").replace(")", " ").replace("'", " ")
exercise_name = " ".join(exercise_name.split())
exercise_name_to_dict[exercise_name] = exercise
tokens = exercise_name.split()
for token in tokens:
token_to_exercise_name[token].append(exercise_name)
if len(token) < MIN_N_GRAM_SIZE:
n_gram_to_tokens[token].add(token)
for string_size in xrange(MIN_N_GRAM_SIZE, len(token) + 1):
n_gram = token[:string_size]
n_gram_to_tokens[n_gram].add(token)
@classmethod
def get_exercise_dict_from_name(cls, exercise_name):
return cls.exercise_name_to_dict.get(exercise_name, {})
def _get_real_tokens_from_possible_n_grams(self, tokens):
real_tokens = []
for token in tokens:
token_set = self.n_gram_to_tokens.get(token, set())
real_tokens.extend(list(token_set))
return real_tokens
def _get_scored_exercises_uncollapsed(self, real_tokens):
exercises__scores = []
for token in real_tokens:
possible_exercises = self.token_to_exercise_name.get(token, [])
for exercise_name in possible_exercises:
score = float(len(token)) / len(exercise_name.replace(" ", ""))
exercises__scores.append((exercise_name, score))
return exercises__scores
def _combined_exercise_scores(self, exercises__scores, num_tokens):
collapsed_exercise_to_score = defaultdict(int)
collapsed_exercise_to_occurence = defaultdict(int)
for exercise, score in exercises__scores:
collapsed_exercise_to_score[exercise] += score
collapsed_exercise_to_occurence[exercise] += 1
for exercise in collapsed_exercise_to_score.keys():
collapsed_exercise_to_score[exercise] *= collapsed_exercise_to_occurence[exercise] / float(num_tokens)
return collapsed_exercise_to_score
def _filtered_results(self, exercises__scores):
min_results = 5
max_results = 10
score_threshold = 0.2
max_possibles = exercises__scores[:max_results]
if exercises__scores and exercises__scores[0][1] == 1.0:
exact_match_str = exercises__scores[0][0]
exercises__scores = [tuple_obj for tuple_obj in exercises__scores if len(tuple_obj[0]) >= len(exact_match_str)]
possibles_within_thresh = [tuple_obj for tuple_obj in exercises__scores if tuple_obj[1] >= score_threshold]
min_possibles = possibles_within_thresh if len(possibles_within_thresh) > min_results else max_possibles[:min_results]
return [tuple_obj[0] for tuple_obj in min_possibles]
def guess_exercises(self, tokens):
real_tokens = self._get_real_tokens_from_possible_n_grams(tokens)
exercises__scores = self._get_scored_exercises_uncollapsed(real_tokens)
collapsed_exercise_to_score = self._combined_exercise_scores(exercises__scores, len(tokens))
exercises__scores = collapsed_exercise_to_score.items()
exercises__scores.sort(key=lambda t: t[1], reverse=True)
return self._filtered_results(exercises__scores)
| [
"slobdell@hearsaycorp.com"
] | slobdell@hearsaycorp.com |
655bd5532b790642e2c48e5016801ab1d1f14da9 | 1e9d743cd42db052a26ac716ebdacc082db70871 | /coding/leetcode/297-serialize-and-deserialize-binary-tree/dfs.py | 4c1189f3ef86424e30f2e50ff4afb9da695fdea9 | [] | no_license | teckoo/interview_public | a993b03cdf2b2f2606207463d841b01d93f12118 | 30198097904994e34f8321926ad2a2cadc8b5940 | refs/heads/master | 2023-04-06T10:35:49.390343 | 2021-04-22T03:38:24 | 2021-04-22T03:38:24 | 320,933,226 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def rserialize(root, string):
""" a recursive helper function for the serialize() function."""
# check base case
if root is None:
string += 'None,'
else:
string += str(root.val) + ','
string = rserialize(root.left, string)
string = rserialize(root.right, string)
return string
return rserialize(root, '')
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def rdeserialize(l):
""" a recursive helper function for deserialization."""
if l[0] == 'None':
l.pop(0)
return None
root = TreeNode(l[0])
l.pop(0)
root.left = rdeserialize(l)
root.right = rdeserialize(l)
return root
data_list = data.split(',')
root = rdeserialize(data_list)
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| [
"c2.programmer@gmail.com"
] | c2.programmer@gmail.com |
d1ba17a6139aebba2d6cfd140f23e827a2ed6211 | b75a1300f7196269171d04a8be6e4341e00e1359 | /pipelines/process_study_driver.py | d2d0261d738cd54a04cc144799cff4ca85dd55e3 | [] | no_license | lemwill/spikeforest | a32120e6f3484c56ef66398cc8a0d859bde7eaa1 | f36ef7bed0ddbba651edff67c31db52ae372257f | refs/heads/master | 2023-03-17T23:20:01.907903 | 2019-03-15T12:51:15 | 2019-03-15T12:51:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | #!/usr/bin/env python
import os
import sys
import argparse
from kbucket import client as kb
from pairio import client as pa
from process_study import Study
def main(*,command,mode='local'):
# Select the study
study_dir='kbucket://b5ecdf1474c5/spikeforest/gen_synth_datasets/datasets_noise10_K20'
study_name='synth_jfm_noise10_K20'
# The following are relevant when mode='remote'
PAIRIO_USER='spikeforest'
KBUCKET_SHARE_ID='magland.spikeforest'
# Specify whether we want to read/write remotely
if mode=='local':
read_local=True; write_local=True; read_remote=False ;write_remote=False
load_local=True; load_remote=True; save_remote=False
elif mode=='remote':
read_local=False; write_local=False; read_remote=True; write_remote=True
load_local=False; load_remote=True; save_remote=True
if write_remote:
PAIRIO_TOKEN=os.getenv('SPIKEFOREST_PAIRIO_TOKEN')
pa.setConfig(user=PAIRIO_USER,token=PAIRIO_TOKEN)
if save_remote:
KBUCKET_UPLOAD_TOKEN=os.getenv('SPIKEFOREST_KBUCKET_TOKEN')
kb.setConfig(upload_share_id=KBUCKET_SHARE_ID,upload_token=KBUCKET_UPLOAD_TOKEN)
kb.testSaveRemote()
else:
raise Exception('Missing or invalid mode:',mode)
pa.setConfig(read_local=read_local,write_local=write_local,read_remote=read_remote,write_remote=write_remote)
pa.setConfig(collections=[PAIRIO_USER])
kb.setConfig(load_local=load_local,load_remote=load_remote,save_remote=save_remote)
kb.setConfig(share_ids=[KBUCKET_SHARE_ID])
study=Study(study_dir=study_dir,study_name=study_name)
if command=='process':
study.process()
elif command=='clear':
study.clearResults()
elif command=='save':
results=study.getResults()
print ('Saving {} results...'.format(len(results)))
key=dict(
name='spikeforest_results',
study_name=study_name
)
kb.saveObject(key=key,object=results)
print ('Saved under key:')
print (key)
else:
raise Exception('Unrecognized command: '+command)
def print_usage():
print ('Usage:')
print ('./process_study_driver.py process')
print ('./process_study_driver.py save')
print ('./process_study_driver.py clear')
if __name__== "__main__":
parser = argparse.ArgumentParser(description = 'Process a spikeforest study')
parser.add_argument('command', help='process, save, or clear')
parser.add_argument('--mode', help='local or remote')
args = parser.parse_args()
main(
command=args.command,
mode=args.mode
)
| [
"jeremy.magland@gmail.com"
] | jeremy.magland@gmail.com |
8bff251d955aeee4159ac2339ee6db119dbe244e | 015e0d41cf9cf85c1a0bfd28c00d0c00ebedcb39 | /metric/coco_scores.py | 8190541f9af79365c9b1cedf78dcc7192a8f5f51 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | zeta1999/gan-compression | 2b074676e49bda43439fd224ca5f8e2bfae13309 | 3224be53f334afe70f7da665906d1ada06233da5 | refs/heads/master | 2023-01-13T06:45:11.676868 | 2020-11-20T12:21:35 | 2020-11-20T12:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | import os
import cv2
import numpy as np
import torch
from PIL import Image
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
class CocoStuff164k(Dataset):
def __init__(self, root, images, names):
self.root = root
self.ignore_label = 255
self.mean_bgr = np.array((104.008, 116.669, 122.675))
self.label_paths = []
self.images = images
self.names = names
self._set_files()
cv2.setNumThreads(0)
def _set_files(self):
label_paths = []
for name in self.names:
path = os.path.join(self.root, 'val_label', '%s.png' % name)
assert os.path.exists(path)
label_paths.append(path)
self.label_paths = label_paths
def _load_data(self, index):
# Set paths
image_id = self.names[index]
label_path = self.label_paths[index]
# Load an image and label
image = self.images[index]
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
return image_id, image, label
def __getitem__(self, index):
image_id, image, label = self._load_data(index)
h, w = label.shape
image_pil = Image.fromarray(image)
if image_pil.size[0] != w or image_pil.size[1] != h:
image_pil = image_pil.resize((w, h), Image.BICUBIC)
image = np.asarray(image_pil)
image = np.flip(image, axis=2)
# Mean subtraction
image = image - self.mean_bgr
# HWC -> CHW
image = image.transpose(2, 0, 1)
return image_id, image.astype(np.float32), label.astype(np.int64)
def __len__(self):
return len(self.names)
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class ** 2,
).reshape(n_class, n_class)
return hist
def compute_scores(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
accu = np.diag(hist).sum() / hist.sum()
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
valid = hist.sum(axis=1) > 0 # added
mIoU = np.nanmean(iu[valid])
return accu * 100, mIoU * 100
def test(fakes, names, model, device, data_dir, batch_size=1, num_workers=0, tqdm_position=None):
dataset = CocoStuff164k(data_dir, fakes, names)
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
num_workers=num_workers, shuffle=False)
preds, gts = [], []
if tqdm_position is None or tqdm_position >= 0:
import tqdm
dataloader_tqdm = tqdm.tqdm(dataloader, desc='Coco Scores', position=tqdm_position, leave=False)
else:
dataloader_tqdm = dataloader
with torch.no_grad():
for image_ids, images, gt_labels in dataloader_tqdm:
images = images.to(device)
logits = model(images)
_, H, W = gt_labels.shape
if logits.shape[-2] != H or logits.shape[-1] != W:
logits = F.interpolate(
logits, size=(H, W), mode="bilinear", align_corners=False
)
probs = F.softmax(logits, dim=1)
labels = torch.argmax(probs, dim=1)
preds += list(labels.cpu().numpy())
gts += list(gt_labels.numpy())
return compute_scores(gts, preds, n_class=182)
| [
"lmxyy1999@foxmail.com"
] | lmxyy1999@foxmail.com |
5f67ded098c37a29160d3af7fa7a1f691310e71a | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /nova/cmd/api.py | f56abfac63f42b7ef5e2630925dcc9a63d0ebe0d | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 1,454 | py | import six
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from nova import exception
from nova.i18n import _LE, _LW
from nova import service
from nova import utils
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import rpc
from nova import objects
from oslo_config import cfg
CONF = cfg.CONF
from oslo_log import log as logging
#LOG = logging.getLogger(__name__)
def main():
#logging.setup(CONF, "nova")
#import pdb;pdb.set_trace()
rpc.set_defaults(control_exchange='nova')
rpc.init(CONF)
objects.register_all()
sqlalchemy_api.configure(CONF)
log = logging.getLogger(__name__)
launcher = service.process_launcher()
started = 0
for api in CONF.enabled_apis:
should_use_ssl = api in CONF.enabled_ssl_apis
try:
server = service.WSGIService(api, use_ssl=should_use_ssl)
#import pdb;pdb.set_trace()
launcher.launch_service(server, workers=server.workers or 1)
started += 1
except exception.PasteAppNotFound as ex:
log.warning(
_LW("%s. ``enabled_apis`` includes bad values. "
"Fix to remove this warning."), six.text_type(ex))
if started == 0:
log.error(_LE('No APIs were started. '
'Check the enabled_apis config option.'))
sys.exit(1)
launcher.wait()
if __name__ == "__main__":
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
baec54e7a2c40ee0b02bb845aef4de9c59c60e57 | 80092fa3b60a00744effb5037324527729d89648 | /Multiple_Plots/multiple_plots_6.py | f20edb93f695c1e22be9d950edad024a9885f424 | [] | no_license | zjxpirate/Data-Analyst-DATAQUEST | 7820d4b218a1ccd49de9eac56bc92dc10917baa9 | 6bd56878cff00b52ca22aba9be7b52be96bb42bd | refs/heads/master | 2020-04-18T00:36:55.719894 | 2019-04-09T00:14:49 | 2019-04-09T00:14:49 | 167,084,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py |
# 1 begins here:
import pandas as pd
import matplotlib.pyplot as plt
unrate = pd.read_csv('unrate.csv')
# unrate['DATE'] = pd.to_datetime(unrate['DATE'])
#
# first_twelve = unrate[0:12]
#
# plt.plot(first_twelve['DATE'], first_twelve['VALUE'])
#
# plt.xticks(rotation=0)
#
# plt.xlabel('Month')
#
# plt.ylabel('Unemployment Rate')
#
# plt.title('Monthly Unemployment Trends, 1948')
#plt.show()
# 2 begins here:
#fig = plt.figure()
#ax1 = fig.add_subplot(2,1,1)
#ax2 = fig.add_subplot(2,1,2)
#plt.show()
# 3 begins here:
# fig = plt.figure()
#
# ax1 = fig.add_subplot(2, 1, 1)
# ax2 = fig.add_subplot(2, 1, 2)
#plt.show()
# 5 begins here:
#
# fig = plt.figure()
# ax1 = fig.add_subplot(2,1,1)
# ax2 = fig.add_subplot(2,1,2)
#
# ax1.plot(unrate[0:12]['DATE'], unrate[0:12]['VALUE'])
# ax2.plot(unrate[12:24]['DATE'], unrate[12:24]['VALUE'])
#
# plt.show()
# 6 begins here:
fig = plt.figure(figsize=(15, 8))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.plot(unrate[0:12]['DATE'], unrate[0:12]['VALUE'])
ax1.set_title('Monthly Unemployment Rate, 1948')
ax2.plot(unrate[12:24]['DATE'], unrate[12:24]['VALUE'])
ax2.set_title('Monthly Unemployment Rate, 1949')
plt.show()
| [
"j_zhang21@u.pacific.edu"
] | j_zhang21@u.pacific.edu |
8bdab363b2e9bacfc34fe82e3b02c0eadae830b5 | def8024e06c442a3b033df8d8f4cbbad87e46e96 | /database/pg.py | 456e389b571c72a477f7153fe5100e0c79e839de | [] | no_license | yhkl-dev/cloud_m | ae48cf5c71061b3e060caca80dbe9d5aac345473 | 491dac7ee707df708437ecd283ecde2ef617fa82 | refs/heads/main | 2023-03-10T22:49:03.478569 | 2021-03-03T15:21:38 | 2021-03-03T15:21:38 | 344,126,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | import psycopg2
from psycopg2 import pool, extras
from config.config import GlobalConfig
from contextlib import contextmanager
from threading import Semaphore
from psycopg2 import pool, extensions
CONFIG = GlobalConfig()
class ReallyThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, key=None):
self._semaphore.acquire()
return super().getconn(key)
def putconn(self, *args, **kwargs):
super().putconn(*args, **kwargs)
self._semaphore.release()
cnxpool = ReallyThreadedConnectionPool(5, 10, **CONFIG.postgres_dict)
@contextmanager
def get_cursor():
try:
con = cnxpool.getconn()
cursor = con.cursor()
yield cursor
except psycopg2.Error as e:
print(e)
finally:
cursor.close()
cnxpool.putconn(con)
class PyPgsql(object):
@staticmethod
def get_all(sql):
with get_cursor() as cursor:
cursor.execute(sql)
return cursor.fetchall()
class POSTGERS:
def get_all_tables(self):
SQL = """
SELECT tablename FROM pg_tables where schemaname = 'public';
"""
result = PyPgsql.get_all(SQL)
print(result)
return [r[0] for r in result]
def get_table_structures(self, table_name: str):
column_type_list = []
SQL = """
SELECT a.attnum,
a.attname AS field,
t.typname AS type,
a.attlen AS length,
a.atttypmod AS lengthvar,
a.attnotnull AS notnull,
b.description AS comment
FROM pg_class c,
pg_attribute a
LEFT OUTER JOIN pg_description b ON a.attrelid=b.objoid AND a.attnum = b.objsubid,
pg_type t
WHERE c.relname = '{table_name}'
and a.attnum > 0
and a.attrelid = c.oid
and a.atttypid = t.oid
ORDER BY a.attnum;
""".format(table_name=table_name)
x = PyPgsql.get_all(SQL)
print(table_name)
if x is not None:
for column in x:
print(column)
column_type_list.append(column[2])
return list(set(column_type_list))
| [
"kaiyang939325@gmail.com"
] | kaiyang939325@gmail.com |
377850eac06f4f7b1668514b1bcacbc349c0f814 | d8b647e50e356646760b94051652e4fa5ac86c83 | /setup.py | 6a7d79a6d59f9a029e31ea20b1b937253c2f196b | [] | no_license | ffxf/tortuga-kit-gceadapter | 5fb74bc8bdc61bc9d386b76db1951958d9ce19c3 | 7cea8f34cff01d7d4743d99c9db2898fbc2a37b6 | refs/heads/master | 2021-01-25T14:26:59.694308 | 2018-03-02T17:07:51 | 2018-03-02T17:07:51 | 123,699,424 | 0 | 0 | null | 2018-03-03T14:30:40 | 2018-03-03T14:30:40 | null | UTF-8 | Python | false | false | 1,050 | py | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name='tortuga-gce-adapter',
version='6.3.0',
url='http://univa.com',
author='Univa Corp',
author_email='info@univa.com',
license='Commercial',
packages=find_packages(exclude=['tortuga_kits']),
namespace_packages=[
'tortuga',
'tortuga.resourceAdapter'
],
zip_safe=False,
install_requires=[
'google-api-python-client',
'gevent',
]
)
| [
"mfrisch@univa.com"
] | mfrisch@univa.com |
0127385c11bd1a9c9bd072ebe03e6ade4983c466 | c7061fb106b801c12fb40ff331d927a5bb24da80 | /BasicExerciseAndKnowledge/w3cschool/n5_sortion.py | 83e4f19a347a5789574bab81d7f1d19b99ab8c73 | [
"MIT"
] | permissive | Jonathan1214/learn-python | 34e6b5612beeb1a46b5964b0a4e306656355fe84 | 19d0299b30e953069f19402bff5c464c4d5580be | refs/heads/master | 2020-03-27T09:03:16.785034 | 2018-08-31T02:48:34 | 2018-08-31T02:48:34 | 146,310,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #coding:utf-8
# 题目:输入三个整数x,y,z,请把这三个数由小到大输出。
# 有了sort函数感觉就是再开挂啊!!
import re
put_in = ['12 234 21']
lt = []
for num in put_in:
nums = re.split(r'[\D]', num)
for n in nums:
lt.append(int(n))
lt = sorted(lt)
print lt | [
"jonathan1214@foxmail.com"
] | jonathan1214@foxmail.com |
f547b32987825ea3be7a56d9d96f5a6d0c9e4d4b | 3def6d5ac41b0956bee326b2cda7e11603eac121 | /gjhandler/__init__.py | 153e7933f581029a3d84469d22fd1e3713ac3369 | [
"MIT"
] | permissive | sfujiwara/gjhandler | 2eca3ea5ae1e4011031f70456f1d7e257d774f91 | 9d10b7ce071fb3dd0b50ee391886a68f9d35e165 | refs/heads/master | 2021-01-18T17:25:58.869822 | 2016-09-01T02:38:06 | 2016-09-01T02:38:06 | 67,048,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # -*- coding: utf-8 -*-
import json
import logging.handlers
import math
class GoogleJsonHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename):
super(GoogleJsonHandler, self).__init__(
filename,
maxBytes=8*1024*1024,
backupCount=5
)
def format(self, record):
message = super(GoogleJsonHandler, self).format(record)
subsecond, second = math.modf(record.created)
payload = {
"message": message,
"timestamp": {
"seconds": int(second),
"nanos": int(subsecond * 1e9)
},
"thread": record.thread,
"severity": record.levelname,
}
return json.dumps(payload, ensure_ascii=False)
| [
"shuhei.fujiwara@gmail.com"
] | shuhei.fujiwara@gmail.com |
e90d519a2ded43336a44dff2304315ad4182fa9f | 41b031bd7449c75d215478dbb109c823c1c2bccc | /scraps/replacenan.py | 0987196694d0f31f6b8176ded595c78f39986208 | [
"MIT"
] | permissive | woutdenolf/spectrocrunch | dfa667528aa2bb9845d371fef29c4659bcd7392e | 944637c76671adc2fdb909f7a62196bde27c9d22 | refs/heads/master | 2022-12-10T14:31:28.101355 | 2022-11-30T10:00:49 | 2022-11-30T10:00:49 | 79,220,131 | 4 | 0 | MIT | 2022-11-30T10:00:49 | 2017-01-17T11:16:57 | Python | UTF-8 | Python | false | false | 790 | py | # -*- coding: utf-8 -*-
import h5py
import numpy as np
# f = h5py.File(filename)
# data = f["detector0"]["sample"]["data"]
# filename = "/data/visitor/hg94/id21/ffproc/results/600Vmap1/map1.align.h5"
# data[np.isnan(data)] = 0
# filename = "/data/visitor/hg94/id21/ffproc/results/SKA2116_1.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
# filename = "/data/visitor/hg94/id21/ffproc/results/20Vpow_1.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
# filename = "/data/visitor/hg94/id21/ffproc/results/SKC1129_2.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
| [
"woutdenolf@users.sf.net"
] | woutdenolf@users.sf.net |
99939a85d3de3cf8133c71badb9e55e260e45e0d | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/Y/yomalM/jan11-match-result-premier-league.py | 9c1efa0db2483dcaad83cb5cfd0304710ab23f0b | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | py | ############
#Note from creator(Yomal Mudalige)- I have been tried to change column orders, however still it is not finalized. Hence I added prefix 'A', 'B'.etc. Thanks
#Reference - Scraperwiki Tutorial 3
##################
from scraperwiki.apiwrapper import getKeys, getData, getDataByDate, getDataByLocation
import datetime #to display update time
sourcescraper = "test_pl"
limit = 20
offset = 0
keys = getKeys(sourcescraper)
keys.sort() # alphabetically
d= datetime.datetime.today()
print '<th><font color =black face=verdana size=2>Last Update:</th>'
print d
print '</br>'
print '</br>'
print '<th><font color=990033 face=verdana size=5>January 2011- English Premier League</th>'
print '</br>'
print '</br>'
print '<th><font color=blue face=tahoma size=3><a href="http://scraperwiki.com/views/premier-league-table-201011-view/full/">Back to Points Table</a></th>'
print '<table border="5" cellpadding="15" style="border-collapse:collapse;">'
# column headings
print "<tr>",
for key in keys:
print "<th>%s</th>" % key,
print "</tr>"
# rows
for row in getData(sourcescraper, limit, offset):
print "<tr>",
for key in keys:
print "<td>%s</td>" % row.get(key),
print "</tr>"
print "</table>"
############
#Note from creator(Yomal Mudalige)- I have been tried to change column orders, however still it is not finalized. Hence I added prefix 'A', 'B'.etc. Thanks
#Reference - Scraperwiki Tutorial 3
##################
from scraperwiki.apiwrapper import getKeys, getData, getDataByDate, getDataByLocation
import datetime #to display update time
sourcescraper = "test_pl"
limit = 20
offset = 0
keys = getKeys(sourcescraper)
keys.sort() # alphabetically
d= datetime.datetime.today()
print '<th><font color =black face=verdana size=2>Last Update:</th>'
print d
print '</br>'
print '</br>'
print '<th><font color=990033 face=verdana size=5>January 2011- English Premier League</th>'
print '</br>'
print '</br>'
print '<th><font color=blue face=tahoma size=3><a href="http://scraperwiki.com/views/premier-league-table-201011-view/full/">Back to Points Table</a></th>'
print '<table border="5" cellpadding="15" style="border-collapse:collapse;">'
# column headings
print "<tr>",
for key in keys:
print "<th>%s</th>" % key,
print "</tr>"
# rows
for row in getData(sourcescraper, limit, offset):
print "<tr>",
for key in keys:
print "<td>%s</td>" % row.get(key),
print "</tr>"
print "</table>"
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
d479dd125c06d779076e5385e954ef319cd87dc7 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py | 0018b73e264798dd21450564812dfca5ec992038 | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 3,295 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import warnings
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.transpiler.details.program_utils import delete_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_heter_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import union_forward_gradient_op
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_heter_program
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_trainer_program
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_block_joints
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_op_input_output
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import get_vars_name_in_block
def split_heter_worker_ops_pass(program, config, stage_id, device):
"""
split heter worker program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create heter worker program, add listen&serv op
"""
default_deveice = "cpu"
program, heter_ops, _, program_block_ops = find_heter_ops(
program, default_deveice)
if len(heter_ops) == 0:
warnings.warn(
"Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code."
)
return program
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops, heter_ops)
heter_program = framework.Program()
create_heter_program(program, config, heter_program, program_block_ops,
heter_ops, block_vars_detail, device, stage_id)
return heter_program
def split_trainer_ops_pass(program, config, default_device="cpu"):
"""
split cpu-trainer program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create cpu-trainer program, add send&recv op
"""
# Todo: support user define default_device (MrChengmo)
default_device_ = default_device
program, heter_ops, default_ops, program_block_ops = find_heter_ops(
program, default_device_)
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops, heter_ops)
trainer_program = program.clone()
create_trainer_program(trainer_program, program, config, program_block_ops,
block_vars_detail)
return trainer_program
| [
"noreply@github.com"
] | jiweibo.noreply@github.com |
a875b5e5061429293af6336e793068a510e23783 | b7429c03761db1d1a58494c92da355542dba86e7 | /Python高效开发实战——Django、Tornado、Flask、Twisted/src/chapter9/9-5.py | e87ae9db8f8afd62caec2efd68829be229947ab2 | [] | no_license | daedalaus/practice | 90abc1cd15ca5230a8a9deb2bbc532c3f36f307b | 916a3269cb3946f33bc87b289c5f20f26c265436 | refs/heads/master | 2020-11-30T17:16:29.337509 | 2020-01-18T08:48:49 | 2020-01-18T08:48:49 | 230,448,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import threading
import time
import datetime
host = '127.0.0.1'
port = 8007
class Echo(DatagramProtocol): # 定义DatagramProtocol子类
def startProtocol(self): # 连接成功后被调用
self.transport.connect(host, port) # 指定对方地址/端口
self.transport.write(b'Here is the first connected message') # 发送数据
print('Connection created!')
def datagramReceived(self, datagram, addr): # 收到数据时被调用
print(datagram.decode('utf-8'))
def connectionRefused(self): # 每次通信失败后被调用
print('sent failed!')
def stopProtocol(self): # Protocol被关闭时被调用
print('Connection closed!')
protocol = Echo() # 实例化Portocol子类
def routine(): # 每隔5秒向服务器发送消息
time.sleep(1)
while True:
protocol.transport.write(('%s: say hello to myself.' % datetime.datetime.now()).encode('utf-8'))
time.sleep(5)
threading.Thread(target=routine).start()
reactor.listenUDP(port, protocol) # 消息接收者
reactor.run() # 挂起运行
| [
"lzy_2318@163.com"
] | lzy_2318@163.com |
5469d275c96e668c247f7b4dfeb30ac7a2ada67e | 7dbd800d64919e93805c16f665e78c81ca2b5278 | /py4e/assignment10/words_counting.py | bf159549ea48bd0ac02e76d938c5b5bbc9c5854b | [] | no_license | merak0514/python | f6ea0ae372a6c52e2a5fae7c78f3bce13770b774 | 3451854a07b97fadf6ffd4d8f41863181a2243cb | refs/heads/master | 2022-05-09T23:58:15.462620 | 2022-04-04T12:24:09 | 2022-04-04T12:24:09 | 123,144,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # count words
import string
fname = input('File name: ')
try:
fhand = open(fname)
except:
print('Error')
quit()
wordlist = {}
for line in fhand:
line = line.rstrip()
line = line.translate(line.maketrans('', '', string.punctuation))
line = line.lower()
words = line.split()
for word in words:
wordlist[word] = wordlist.get(word, 0) + 1
if 'i' in wordlist.keys():
wordlist['I'] = wordlist.pop('i') # HIGH LIGHT
new_wordlist = sorted(
[(k, v) for (v, k) in wordlist.items()], reverse=True) # HIGH LIGHT
print('Most used words: ')
for k, v in new_wordlist[0:9]:
print(v, k)
| [
"2692195809@qq.com"
] | 2692195809@qq.com |
49ed7ee7902cbe3fdd70e37b96b8de65bdc08c75 | af5c5761c21f9251cc89565ab3b40b0dd560555f | /doc/.src/book/src/ex_approx1D_session.py | 771173796d96b928e1a913e907b486eb1ff1de7b | [] | no_license | kent-and/fem-book | d67e282a47b23650b8d9ee1c7a04989f92121848 | b24022ad59bb2185f7524b71fba58e5d994ab806 | refs/heads/master | 2020-05-20T22:26:24.287598 | 2016-08-17T19:47:23 | 2016-08-17T19:47:23 | 53,691,843 | 1 | 0 | null | 2016-03-11T19:33:14 | 2016-03-11T19:33:13 | null | UTF-8 | Python | false | false | 496 | py | # to be run by scitools file2interactive
from approx1D import *
x = sym.Symbol('x')
f = 10*(x-1)**2-1
u = least_squares(f=f, psi=[1, x, x**2], Omega=[1, 2])
print u
print sym.expand(f)
# show how equal x**i functions are (ill-conditioning)
import numpy as np
x = np.linspace(1, 2, 1001)
import matplotlib.pyplot as plt
#import scitools.std as plt
for i in range(15):
plt.plot(x, x**i, '-')
plt.hold('on')
plt.savefig('tmp_basis_xini.pdf')
plt.savefig('tmp_basis_xini.png')
plt.show()
| [
"hpl@simula.no"
] | hpl@simula.no |
cfff7859b73b0c1cd037a8cc56e5904ef005f50f | 5bea85271fa7d8d950f6f75ef251e2fe4a3672b8 | /model/vae.py | 43efcd8a555657c9aeb62ef19ab4c9a62a73e053 | [] | no_license | umutkucukaslan/thesis | ca4095c8d9caa128edb3be3dfdbd031c2d22a28f | 149a9612e9f76fbaff227d8aaa9b949aa9ed33c7 | refs/heads/master | 2023-03-26T13:00:27.677705 | 2021-02-05T22:19:49 | 2021-02-05T22:19:49 | 221,968,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py |
import tensorflow as tf
def build_encoder(input_shape=(128, 128, 3), output_shape=128, filters=(32, 64, 128), kernel_size=5,
pool_size=(2, 2), batch_normalization=False, activation="relu", name='encoder', alpha=0.2):
"""
Assumes input is in the range [-1, 1]
:param input_shape:
:param output_shape:
:param filters:
:param kernel_size:
:param pool_size:
:param batch_normalization:
:param activation: None, "relu", "leakyrelu" or "swish"
:param name:
:param alpha:
:return:
"""
inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
x = inputs
for i in range(len(filters)):
x = tf.keras.layers.Conv2D(filters=filters[i],
kernel_size=kernel_size,
padding='same',
activation=None)(x)
if activation == "relu":
x = tf.keras.layers.ReLU()(x)
elif activation == "leakyrelu":
x = tf.keras.layers.LeakyReLU(alpha)(x)
elif activation == "swish":
x = tf.nn.swish(x)
x = tf.keras.layers.MaxPool2D(pool_size=pool_size, padding='same')(x)
if batch_normalization:
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Flatten()(x)
output_mean = tf.keras.layers.Dense(output_shape, activation=None, name='mean')(x)
output_std = tf.keras.layers.Dense(output_shape, activation=None, name='std')(x)
return tf.keras.Model(inputs=inputs, outputs=[output_std, output_mean], name=name)
| [
"umutkucukaslan@hotmail.com"
] | umutkucukaslan@hotmail.com |
1d193ed151a98e53a1500f108127e2414d73cb61 | 04dd31f1d4c84b96777107701e727c54c4fc704e | /MINE_PY27/ThePracticeOfML/FirstEditionMaster/7-9.PY | b3626c77bc7424c05457cd67cca316ba71843ca2 | [] | no_license | stormstone/LEARN-PYTHON | 8df6ee96ebf1d2da7703d726dd18061956e2412f | 23636e4c267f82815be5e203d0f4fd66acd18ccf | refs/heads/master | 2021-01-01T18:10:47.650160 | 2017-12-21T14:14:59 | 2017-12-21T14:14:59 | 98,271,596 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#code:myhaspl@qq.com
#7-9.py
import mplannliner as nplann
traindata1=[[[9,25],-1],[[5,8],-1],[[15,31],-1],[[35,62],-1],[[19,40],-1],[[28,65],1],[[20,59],1],[[9,41],1],[[12,60],1],[[2,37],1]]
myann=nplann.Mplannliner()
#样本初始化
myann.samples_init(traindata1)
#学习率初始化
myann.a_init(0.1)
#搜索时间常数初始化
myann.r_init(50)
#最大训练次数
myann.maxtry_init(500)
#期望最小误差
myann.e_init(0.05)
#训练
myann.train()
#仿真,测试,对未知样本分类
myc=myann.simulate([35,68])
print "[35,68]"
if myc==1:
print u"正类"
else:
print u"负类"
#将测试点在最终效果图上显示出来,将它加入drawponint集,测试点表现为"*",并且色彩由其最终的分类结果而决定
myann.drawponint_add([35,68])
myc=myann.simulate([35,82])
print "[35,82]"
if myc==1:
print u"正类"
else:
print u"负类"
myann.drawponint_add([35,82])
myann.draw2d()
#下面直接使用默认参数进行训练
traindata2=[[[9,25,30],-1],[[5,8,12],-1],[[15,31,49],-1],[[35,62,108],-1],[[19,40,60],-1],[[28,65,98],1],[[20,59,72],1],[[9,41,38],1],[[12,60,46],1],[[2,37,18],1]]
myann2=nplann.Mplannliner()
myann2.samples_init(traindata2)
myann2.train()
myc=myann2.simulate([35,68,110])
print "[35,68,110]"
if myc==1:
print u"正类"
else:
print u"负类" | [
"2499144744@qq.com"
] | 2499144744@qq.com |
0ad1badda72db888146eeae0cbbc7f9ec618fa09 | c641eca2b95da76ab0b0fc0ce3156e496559cebd | /src/0106/solution.py | 6f09f0e7cb4230ad2aa68d13f1ddc9a5a9611f8c | [
"MIT"
] | permissive | jiangshanmeta/lintcode | 8915365ea5c8d602fca192e33e374be26130d4e6 | 9cc07edcaa3e7ec8e7b76a19020e6743ebc03875 | refs/heads/master | 2023-08-07T22:22:15.974814 | 2023-08-06T08:49:58 | 2023-08-06T08:49:58 | 212,155,398 | 8 | 2 | MIT | 2023-08-06T08:50:00 | 2019-10-01T17:18:04 | JavaScript | UTF-8 | Python | false | false | 925 | py | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
if head is None :
return head
if head.next is None :
return TreeNode(head.val)
prev = None
fast = head
slow = head
while fast and fast.next :
prev = slow
slow = slow.next
fast = fast.next.next
prev.next = None
root = TreeNode(slow.val)
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(slow.next)
return root
| [
"540118044@qq.com"
] | 540118044@qq.com |
4ae1478682dfe9974355b06fd2ab3ad41d0ce6fa | d8d9dec0450d5f7fe15f9d5a42d7c2998d4cb649 | /tools/import-categories.py | da45ddc3a16e9c0df8d2bb7f816e67caccaa6e1f | [] | no_license | hansenanders/bank | 678c2af2e3d12502bf49af28ec7758dc4af915ed | e79f0957f51e3360c6f01916e87dab79790fa9d9 | refs/heads/master | 2021-01-12T15:07:25.020993 | 2016-11-09T19:26:23 | 2016-11-09T19:26:23 | 71,704,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | #!/usr/bin/env python3
import argparse
import sqlite3
import json
import sys
parser = argparse.ArgumentParser(description='import categories to sqlitedb.')
parser.add_argument(
'-d', '--sqlite-db-file', required=True, action='store', help='path to SQLiteDB')
parser.add_argument('--data-file', required=True, help='data file path')
args = parser.parse_args()
db = sqlite3.connect(args.sqlite_db_file)
with open(args.data_file) as f:
data = json.load(f)
cur = db.cursor()
for d in data:
try:
query = '''INSERT INTO categories (name, type) VALUES ("{}", "{}")'''.format(d, data[d])
print(query)
cur.execute(query)
db.commit()
except Exception as e:
print(e)
db.rollback()
cur.execute("SELECT * FROM categories")
data = cur.fetchall()
print(data)
db.close()
| [
"you@example.com"
] | you@example.com |
726b5f70d7d7ab3a0b0228386190ec07a7ec2710 | 54408d1b51d1557dea78e0c1dc970240c428a25a | /python/tree/binary_tree_level_order_traversal.py | e4d8b0f7d006c52364fa6d8c51afb5063c397111 | [
"Apache-2.0"
] | permissive | MiKueen/interview | 5d115f570b7d2fea607bf2b4dafdba92f035be82 | 45741d49b49a9817d43b27b05757d95f3b9d9775 | refs/heads/master | 2020-08-13T22:57:55.968032 | 2019-10-14T13:56:07 | 2019-10-14T13:56:07 | 215,052,698 | 0 | 0 | Apache-2.0 | 2019-10-14T13:34:40 | 2019-10-14T13:34:39 | null | UTF-8 | Python | false | false | 1,253 | py | '''
Author : MiKueen
Level : Medium
Problem Statement : Binary Tree Level Order Traversal
https://leetcode.com/problems/binary-tree-level-order-traversal/
Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
'''
from collections import deque
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
level, res = deque([root]), []
while level:
next_level = []
for i in range(len(level)):
node = level.popleft()
if node.left:
level.append(node.left)
if node.right:
level.append(node.right)
next_level.append(node.val)
res.append(next_level)
return res
| [
"keshvi2298@gmail.com"
] | keshvi2298@gmail.com |
6d6e09031e8fdc4910811e20775215820b3f3f11 | 31ff0321d4aa46e43c193e82cf96806df127d63e | /source/code/glyphNanny/defaults.py | a1f05f71eaabc0b669226549613613353c1d8d29 | [
"MIT"
] | permissive | typesupply/glyph-nanny | 0c76fa7d3abb4914782669dc053ce6ed6738a1e5 | dd9db49ca8a7725bb311d89d9ee1efc866298f20 | refs/heads/master | 2023-04-13T05:58:16.149056 | 2023-04-03T20:43:03 | 2023-04-03T20:43:03 | 22,653,004 | 38 | 10 | MIT | 2023-04-03T20:43:05 | 2014-08-05T17:37:41 | Python | UTF-8 | Python | false | false | 3,826 | py | from mojo.extensions import (
registerExtensionDefaults,
getExtensionDefault,
setExtensionDefault
)
from .tests.registry import testRegistry
defaultKeyStub = "com.typesupply.GlyphNanny2."
defaults = {
defaultKeyStub + "displayLiveReport" : True,
defaultKeyStub + "testDuringDrag" : False,
defaultKeyStub + "displayTitles" : True,
defaultKeyStub + "colorInform" : (0, 0, 0.7, 0.3),
defaultKeyStub + "colorReview" : (1, 0.7, 0, 0.7),
defaultKeyStub + "colorRemove" : (1, 0, 0, 0.5),
defaultKeyStub + "colorInsert" : (0, 1, 0, 0.75),
defaultKeyStub + "lineWidthRegular" : 1,
defaultKeyStub + "lineWidthHighlight" : 4,
defaultKeyStub + "textFont" : "system",
defaultKeyStub + "textFontWeight" : "medium",
defaultKeyStub + "textPointSize" : 10,
}
for testIdentifier in testRegistry.keys():
defaults[defaultKeyStub + "testState." + testIdentifier] = True
registerExtensionDefaults(defaults)
# -----
# Tests
# -----
def getTestState(testIdentifier):
return getExtensionDefault(defaultKeyStub + "testState." + testIdentifier)
def setTestState(testIdentifier, value):
setExtensionDefault(defaultKeyStub + "testState." + testIdentifier, value)
# -------
# Display
# -------
# Live Report
def getDisplayLiveReport():
return getExtensionDefault(defaultKeyStub + "displayLiveReport")
def setDisplayLiveReport(value):
setExtensionDefault(defaultKeyStub + "displayLiveReport", value)
# Test During Drag
def getTestDuringDrag():
return getExtensionDefault(defaultKeyStub + "testDuringDrag")
def setTestDuringDrag(value):
setExtensionDefault(defaultKeyStub + "testDuringDrag", value)
# Titles
def getDisplayTitles():
return getExtensionDefault(defaultKeyStub + "displayTitles")
def setDisplayTitles(value):
setExtensionDefault(defaultKeyStub + "displayTitles", value)
# ------
# Colors
# ------
# Inform
def getColorInform():
return getExtensionDefault(defaultKeyStub + "colorInform")
def setColorInform(value):
setExtensionDefault(defaultKeyStub + "colorInform", value)
# Review
def getColorReview():
return getExtensionDefault(defaultKeyStub + "colorReview")
def setColorReview(value):
setExtensionDefault(defaultKeyStub + "colorReview", value)
# Remove
def getColorRemove():
return getExtensionDefault(defaultKeyStub + "colorRemove")
def setColorRemove(value):
setExtensionDefault(defaultKeyStub + "colorRemove", value)
# Insert
def getColorInsert():
return getExtensionDefault(defaultKeyStub + "colorInsert")
def setColorInsert(value):
setExtensionDefault(defaultKeyStub + "colorInsert", value)
# -----------
# Line Widths
# -----------
# Line: Regular
def getLineWidthRegular():
return getExtensionDefault(defaultKeyStub + "lineWidthRegular")
def setLineWidthRegular(value):
setExtensionDefault(defaultKeyStub + "lineWidthRegular", value)
# Line: Highlight
def getLineWidthHighlight():
return getExtensionDefault(defaultKeyStub + "lineWidthHighlight")
def setLineWidthHighlight(value):
setExtensionDefault(defaultKeyStub + "lineWidthHighlight", value)
# ----
# Text
# ----
def getTextFont():
data = dict(
font=getExtensionDefault(defaultKeyStub + "textFont"),
weight=getExtensionDefault(defaultKeyStub + "textFontWeight"),
pointSize=getExtensionDefault(defaultKeyStub + "textPointSize"),
)
return data
def setTextFont(data):
font = data.get("font")
if font is not None:
setExtensionDefault(defaultKeyStub + "textFont", font)
weight = data.get("textFontWeight")
if weight is not None:
setExtensionDefault(defaultKeyStub + "textFontWeight", weight)
pointSize = data.get("pointSize")
if pointSize is not None:
setExtensionDefault(defaultKeyStub + "textPointSize", pointSize)
| [
"tal@typesupply.com"
] | tal@typesupply.com |
7c3f6a0d31b8b6fd29726bd1b1238609fb158ee8 | 83a63364499df53dec8d0175370c959a231563e9 | /blog/tangerine/migrations/0008_auto_20171119_0930.py | 506a228963f2730ccb473da4bab337a10b0613b7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | shacker/tangerine | f328750467d21e9be2642c833c0cf5e44f7802aa | 0ca1571a52f6901d1ae10243c4514630853d51ed | refs/heads/main | 2023-05-10T23:25:09.294128 | 2023-04-29T07:19:19 | 2023-04-29T07:19:19 | 108,086,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 2.1.dev20171104145028 on 2017-11-19 17:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tangerine', '0007_auto_20171118_1743'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(blank=True, help_text='ForeignKey to User object; used for authenticated commenters only.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"shacker@birdhouse.org"
] | shacker@birdhouse.org |
a738842a3973e0eda8c51df858a61a1f65242169 | 37fef592f365194c28579f95abd222cc4e1243ae | /Financial_Modeling/Project 4.py | 3e03974ee863c96966c95fe38305908e45d82a6c | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 1,665 | py | # Libraries
import streamlit as st
import pandas as pd
import matplotlib as mp
import plotly.express as px
import os, os.path
import warnings
import numpy as np
from datetime import datetime
import random
warnings.simplefilter(action='ignore', category=FutureWarning)
import math
st.set_page_config(
page_title = "Project 4 - Full DCF Valuation",
layout = 'wide'
)
st.title('Financial Modeling using Python')
st.header("Project 4 - Full DCF Valuation")
st.subheader("The Problem")
with st.expander(" "):
st.write("""
The purpose of this exercise is to complete a full discounted cash flow valuation of a stock from end to end, complete
with all of the additional analyses you learned throughout the course. You can pick any publicly traded stock for
your valuation. You must find the data on your own and research the company’s operations. Ultimately the main
output is your valuation of the stock, but you must also provide a written justification of why you believe this value
to be correct. You must discuss and show how variable this estimate is, as well as what could have large effects on
the valuation. You should also consider several realistic scenarios based on states of the economy, and how these
scenarios affect the valuation.
Some of the components of your project should include:
• WACC estimation
• FCF estimation and forecasting (must forecast financial statements, not only FCFs directly, though that can
be an extra check)
• Terminal value estimation using both perpetuity growth and various exit multiples
• Monte carlo simulation
• Sensitivity analysis
• Scenario analysis
• Visualization
""")
| [
"edimaudo@gmail.com"
] | edimaudo@gmail.com |
c6fe08208cd171c3893d2b8d5e0cbe8c5b590934 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/cf8e84fc6289f0d1a9ec07b91f1e7f1b2716028c-<api_call_for_rule>-fix.py | de82b86e6f485096ebccf7e61c3e9cd1af3ec5a3 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | def api_call_for_rule(module, api_call_object):
is_access_rule = (True if ('access' in api_call_object) else False)
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {
'changed': False,
}
if module.check_mode:
return result
version = ((('v' + module.params['version']) + '/') if module.params.get('version') else '')
if is_access_rule:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['action', 'position'])
else:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['position'])
payload_for_equals = {
'type': api_call_object,
'params': copy_payload_without_some_params,
}
(equals_code, equals_response) = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
if ((equals_code == 400) or (equals_code == 500)):
module.fail_json(msg=equals_response)
if (module.params['state'] == 'present'):
if (equals_code == 200):
if equals_response['equals']:
if (not is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule)):
equals_response['equals'] = False
if (not equals_response['equals']):
if ('position' in payload):
payload['new-position'] = payload['position']
del payload['position']
(code, response) = send_request(connection, version, ('set-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
pass
elif (equals_code == 404):
(code, response) = send_request(connection, version, ('add-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif (module.params['state'] == 'absent'):
if (equals_code == 200):
(code, response) = send_request(connection, version, ('delete-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif (equals_code == 404):
pass
return result | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
84e81158f9ce9e4b792edce641911b13c4c00de8 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/baekjoon/11399.py | 6f5c2c8eb7384490b7d3a194fc1b4bd34f796f13 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | N = int(input())
arr = list(map(int,input().split()))
arr.sort()
ans = 0
for i in range(len(arr)):
ans+=arr[i]*(N-i)
print(ans) | [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
b1c964b0f5a6f5e482f4c668f25f65cf1f10ce37 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.11.36/1569574793.py | 9344099c29c8565bef8b57eea46b4316b4188abe | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,919 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def nwords(s: str) -> int:
"""Count the number of words in a given string. Words are separated by at least one char in string.whitespace
Args:
s (str): A string whose words are counted
Returns:
int: Number of words in the given string
"""
n = 0
words = []
current_word = ""
for ch in s:
if ch not in string.whitespace:
# if its not whitespace, its a letter
current_word += ch
else:
# only append word if its actually a word
# (not only whitespace characters before)
if len(current_word) > 0:
words.append(current_word)
current_word = ""
if len(current_word) > 0:
words.append(current_word)
return len(words)
## Lösung Teil 2.
def word_count_iter(it) -> tuple:
"""Takes an iterable that yields a str in every iteration and returns a tuple
with the number of lines, words and characters
Args:
it (iterable)
Returns:
tuple
"""
lines, words, chars = 0, 0, 0
for i in it:
lines += 1
words += nwords(i)
chars += len(i)
return lines, words, chars
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
original = None
def coverage(func):
nonlocal covered, target, count, original
def wrapper(it):
nonlocal covered, count
lit = list (it)
r = func (lit)
count += 1
if lit == []:
covered.add(0)
elif len (lit) == 1:
covered.add(1)
else:
covered.add(2)
if "" in lit:
covered.add (3)
if len (lit) > 1:
if [line for line in lit if [x for x in line if x in string.whitespace]]:
covered.add (4)
else:
covered.add (5)
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func == "original": return original
original = func
functools.update_wrapper (wrapper, func)
return wrapper
return coverage
coverage = mk_coverage()
try:
word_count_iter = coverage(word_count_iter)
except:
pass
## Lösung Teil 3. (Tests)
def test_word_count_iter():
iter1 = ["Hello, World", "Hallo, Welt"]
iter2 = [" "]
iter3 = [" ", ",,,,,"]
assert word_count_iter(iter1) == (2, 4, 23)
assert word_count_iter(iter2) == (1, 0, 5)
assert word_counter_iter(iter3) == (2, 0, 10)
## revert
try:
word_count_iter = word_count_iter.__wrapped__
except:
pass
## Lösung Teil 4.
######################################################################
## hidden test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_nwords (self):
assert nwords
assert 's' in getfullargspec(nwords).args
def test_word_count_iter(self):
assert word_count_iter
def test_word_count(self):
assert word_count
assert 'file' in getfullargspec(word_count).args
class TestGrades:
def test_docstring_present(self):
assert nwords.__doc__ is not None
assert word_count_iter.__doc__ is not None
assert word_count.__doc__ is not None
def test_typing_present (self):
assert nwords.__annotations__ == self.nwords_oracle.__annotations__
assert word_count_iter.__annotations__ == self.word_count_iter_oracle.__annotations__
assert word_count.__annotations__ == self.word_count_oracle.__annotations__
def nwords_oracle (self, s:str) -> int:
return len (s.split())
def test_nwords(self):
charset = string.printable
for i in range (100):
s = ''.join (random.choice (charset) for j in range (1000))
assert nwords (s) == self.nwords_oracle (s)
def word_count_iter_oracle(self, iter):
lines = 0
words = 0
chars = 0
for line in iter:
lines += 1
chars += len(line)
r = line.split()
words += len(r)
return (lines, words, chars)
def test_wci_empty (self):
assert word_count_iter ([]) == (0,0,0)
def test_wci_one (self):
assert word_count_iter (["a"]) == (1, 1, 1)
def test_wci_simple (self):
for i in range (50):
assert word_count_iter (i * ["a"]) == (i,i,i)
def test_wci_scale (self):
for i in range (20):
assert word_count_iter (i * ["a bb"]) == (i, 2*i, 4*i)
def test_word_count_iter(self):
charset = string.printable
for i in range (100):
l = random.randrange (10)
subject = [''.join (random.choice (charset) for j in range (1000)) for k in range(l)]
assert word_count_iter (subject) == self.word_count_iter_oracle (subject)
def word_count_oracle(self,file:str):
return self.word_count_iter_oracle (open (file))
def test_some_known_files(self):
count = 3
try:
assert word_count ("/usr/share/dict/words") == (235886, 235886, 2493109)
except:
count = count - 1
try:
assert word_count ("/usr/share/doc/libpython3.6-minimal/copyright") == (995, 7030, 49855)
except:
count = count - 1
try:
f = "/data/test_code.py"
assert word_count (f) == self.word_count_oracle (f)
except:
count = count - 1
assert count > 0
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
6080231deaceb0a21438174ebaeac53b7355ac8c | d5f611f9abadc91adc18cf958d9874a14e249b32 | /traffic/core/distance.py | dd6658d7a58149ca17872b7191849abda1c4dfca | [
"MIT"
] | permissive | avgisSI/traffic | c01943a6814f64a7d4f14f71696cbb9e90c4bc10 | d7f21401cba3a393dec082b1a7d8a152cc7a995b | refs/heads/master | 2023-07-22T17:18:48.421208 | 2021-09-07T12:43:50 | 2021-09-07T12:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | import logging
from typing import TYPE_CHECKING, NamedTuple, Optional
import numpy as np
import pandas as pd
from . import geodesy as geo
if TYPE_CHECKING:
from ..data.basic.airports import Airports # noqa: F401
from .mixins import PointMixin # noqa: F401
from .structure import Airport
def closest_point(
data: pd.DataFrame,
point: Optional["PointMixin"] = None,
*args,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
) -> pd.Series:
if point is not None:
latitude = point.latitude
longitude = point.longitude
name = point.name
else:
name = "unnamed"
assert latitude is not None and longitude is not None
dist_vect = geo.distance(
data.latitude.values,
data.longitude.values,
latitude * np.ones(len(data.latitude)),
longitude * np.ones(len(data.longitude)),
)
argmin = dist_vect.argmin()
elt = data.iloc[argmin]
return pd.Series(
{**dict(elt), **{"distance": dist_vect[argmin], "point": name}},
name=elt.name,
)
def guess_airport(
point: Optional[NamedTuple] = None,
*args,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
dataset: Optional["Airports"] = None,
warning_distance: Optional[float] = None,
) -> "Airport":
if dataset is None:
from ..data import airports
dataset = airports
# TODO define a protocol instead of PointMixin
if point is not None:
longitude = point.longitude # type: ignore
latitude = point.latitude # type: ignore
if any((longitude is None, latitude is None)):
raise RuntimeError("latitude or longitude are None")
airport_data = closest_point(
dataset.data, latitude=latitude, longitude=longitude
)
airport = dataset[airport_data.icao]
assert airport is not None
airport.distance = airport_data.distance # type: ignore
if warning_distance is not None and airport.distance > warning_distance:
logging.warning(
f"Closest airport is more than {warning_distance*1e-3}km away "
f" (distance={airport.distance})"
)
return airport
| [
"git@xoolive.org"
] | git@xoolive.org |
bd78505b0cfbc80351ec303ebf0a8a3b65befbfb | 75d318b2f125ec1d08195f12f8cc3870b3aa3056 | /tests/importer/test_springer_dojson.py | 9842bcd989109623adacf2194c5674ceffdd1352 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | topless/cds-books | d496913da2fdbcc42bf7bc7987fe60f80d6af35a | e587fec6c191fddebce3c3f9f61aae625db31254 | refs/heads/master | 2023-01-24T19:19:25.923716 | 2020-11-25T13:20:48 | 2020-11-26T07:49:11 | 257,366,115 | 0 | 0 | MIT | 2020-04-20T18:12:40 | 2020-04-20T18:12:39 | null | UTF-8 | Python | false | false | 3,655 | py | import os
from cds_dojson.marc21.utils import create_record
from cds_ils.importer.providers.springer.springer import model
marcxml = (
"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
"""<record>{0}</record></collection>"""
)
def check_transformation(marcxml_body, json_body):
"""Check transformation."""
blob = create_record(marcxml.format(marcxml_body))
record = {}
record.update(**model.do(blob, ignore_missing=True))
expected = {}
expected.update(**json_body)
assert record == expected
def test_springer_transformation(app):
"""Test springer record import translation."""
dirname = os.path.join(os.path.dirname(__file__), "data")
with open(os.path.join(dirname, "springer_record.xml"), "r") as fp:
example = fp.read()
with app.app_context():
check_transformation(
example,
{
"_eitem": {
"internal_note": "Physics and Astronomy (R0) "
"(SpringerNature-43715)",
"urls": [
{
"description": "E-book by Springer",
"value": "https://doi.org/10.1007/b100336",
}
],
},
"provider_recid": "978-0-306-47915-1",
"_serial": [
{
"title": "Advances in Nuclear Physics ;",
"volume": "26",
}
],
"abstract": "The four articles ...",
"agency_code": "DE-He213",
"alternative_titles": [
{"type": "SUBTITLE", "value": "Volume 26 /"}
],
"alternative_identifiers": [
{"scheme": "SPRINGER", "value": "978-0-306-47915-1"}
],
"authors": [
{"full_name": "Negele, J.W.", "roles": ["EDITOR"]},
{"full_name": "Vogt, Erich W.", "roles": ["EDITOR"]},
],
"document_type": "BOOK",
"edition": "1st ed. 2001.",
"identifiers": [
{"scheme": "ISBN", "value": "9780306479151"},
{"scheme": "ISBN", "value": "9780306479151X"},
],
"imprint": {
"date": "2001.",
"place": "New York, NY :",
"publisher": "Springer US :, Imprint: Springer,",
},
"keywords": [
{"source": "SPR", "value": "Nuclear physics."},
{"source": "SPR", "value": "Heavy ions."},
{
"source": "SPR",
"value": "Nuclear Physics, Heavy Ions, Hadrons.",
},
],
"number_of_pages": "386",
"publication_year": "2001.",
"subjects": [
{"scheme": "LoC", "value": "QC770-798"},
{"scheme": "LoC", "value": "QC702.7.H42"},
{"scheme": "Dewey", "value": "539.7092"},
],
"table_of_content": [
"The Spin Structure of the Nucleon",
"Liquid-Gas Phase Transition in Nuclear "
"Multifragmentation",
"High Spin Properties of Atomic Nuclei",
"The Deuteron: Structure and Form Factors.",
],
"title": "Advances in Nuclear Physics",
},
)
| [
"38131488+kprzerwa@users.noreply.github.com"
] | 38131488+kprzerwa@users.noreply.github.com |
affe4860d2c5900e87b8d5ca7f0a608119533a85 | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/test/test_DistributionGroupUserGetResponse.py | f382222ea7f9f529b0ed88420c679534d6be68a3 | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 1,066 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from DistributionGroupUserGetResponse.clsDistributionGroupUserGetResponse import DistributionGroupUserGetResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestDistributionGroupUserGetResponse(unittest.TestCase):
"""DistributionGroupUserGetResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDistributionGroupUserGetResponse(self):
"""Test DistributionGroupUserGetResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsDistributionGroupUserGetResponse.DistributionGroupUserGetResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
60c9114f98ec2437c232bbed5e91ca263ce743d2 | a6432a0443b0f32b68baa4632397bc83abb0dfd4 | /ugali/scratch/PlotValidation.py | 764c76c047d5df17b9d1da20201c34dfbead05b6 | [
"MIT"
] | permissive | norashipp/ugali | d795c016e52d8c9639b214bd83ca610d052cb10f | 812bd9222737b9ffd36cfc2f2d058d948fc0522a | refs/heads/master | 2021-01-19T17:06:08.030637 | 2016-07-08T14:14:57 | 2016-07-08T14:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | #!/usr/bin/env python
from os.path import splitext,basename
import pylab as plt
import numpy as np
if __name__ == "__main__":
from optparse import OptionParser
usage = "Usage: %prog [options] input"
description = "python script"
parser = OptionParser(usage=usage,description=description)
(opts, args) = parser.parse_args()
for arg in args:
if splitext(arg)[1] != '.dat':
raise Exception('Input not .dat files')
params = basename(arg).split('_')
label = params[0]
ext = params[4]
s = ''.join(open(arg).readlines())
results = eval(s)
logLike = np.array(results['log_likelihood'])
low,rich,up = np.array([results['richness_lower'],
results['richness'],
results['richness_upper']])
mass = np.array(results['stellar_mass'])
norm = mass/rich
low *= norm
rich *= norm
up *= norm
plt.errorbar(range(len(rich)), rich, yerr=[rich-low, up-rich], fmt='o',label=label)
plt.axhline(np.mean(rich),ls='--',color='r')
for mc_mass in np.unique(results['mc_stellar_mass']):
plt.axhline(mc_mass,ls='--',color='k')
plt.title(r'Likelihood Comparison ($r_h = %s$ deg)'%ext)
plt.ylabel(r'Stellar Mass ($M_{\odot}$)')
plt.legend(loc='upper right')
plt.savefig("%s_%s.png"%(params[1],ext))
| [
"kadrlica@fnal.gov"
] | kadrlica@fnal.gov |
3166b0f241324912b43ecb7470e3cd06ddf95705 | 7d5641f61e9317984dc656ae43658f9f09a3d2c5 | /72.py | bec0164cd0e98e23a3695b3ae6615592e2bebae6 | [
"MIT"
] | permissive | r9y9/nlp100 | 51ec6b1a13128a53eeda9ff110d122211f0fceba | 391ca6c4fb8afc074ed825404d4ad4efc4467f05 | refs/heads/master | 2021-01-25T09:00:54.021853 | 2017-06-09T17:32:52 | 2017-06-09T17:32:52 | 93,770,527 | 18 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from stemming.porter2 import stem
import sys
import numpy as np
vocab = {}
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[:-1].lower()
words = line.split(" ")[1:]
words = list(map(stem, words))
for word in words:
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
def build_idx(vocab):
word2idx = {}
count = 0
for k, v in vocab.items():
word2idx[k] = count
count += 1
assert count == len(vocab)
return word2idx
def sentence2features(words, vocab, word2idx):
N = len(vocab)
x = np.zeros(N, dtype=np.int)
for word in words:
idx = word2idx[word]
x[idx] += 1
return x
K = 13
stopwords = sorted(vocab.items(), key=lambda x: x[1])[:: -1][: K]
for k, v in stopwords:
print(k, v)
stopwords_dict = dict(stopwords)
print(len(vocab))
print(vocab.get("like"))
word2idx = build_idx(vocab)
def is_stopword(x, stopwords_dict=stopwords_dict):
if x in stopwords_dict:
return True
return False
def is_not_stopword(x, stopwords_dict=stopwords_dict):
return not is_stopword(x, stopwords_dict)
X = []
Y = []
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[: -1].lower()
y, words = line.split(" ")[0], line.split(" ")[1:]
y = 0 if (y == "+1") else 1
words = list(map(stem, words))
words = list(filter(is_not_stopword, words))
x = sentence2features(words, vocab, word2idx)
X.append(x)
Y.append(y)
X = np.array(X)
Y = np.array(Y)
print(X.shape)
print(Y.shape)
sys.exit(0)
| [
"zryuichi@gmail.com"
] | zryuichi@gmail.com |
603f0a1c5a4fbe205d9f940387c07dbb827e0755 | d780df6e068ab8a0f8007acb68bc88554a9d5b50 | /python/g1/messaging/tests/test_servers.py | 712fd78186bd0542a9ffc468cae2f57cbe9277a7 | [
"MIT"
] | permissive | clchiou/garage | ed3d314ceea487b46568c14b51e96b990a50ed6f | 1d72863d3a5f5d620b170f4dd36f605e6b72054f | refs/heads/master | 2023-08-27T13:57:14.498182 | 2023-08-15T07:09:57 | 2023-08-15T19:53:52 | 32,647,497 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,217 | py | import unittest
import unittest.mock
import uuid
from g1.asyncs import kernels
from g1.asyncs.bases import tasks
from g1.messaging import reqrep
from g1.messaging.reqrep import clients
from g1.messaging.reqrep import servers
from g1.messaging.wiredata import jsons
class InvalidRequestError(Exception):
pass
class InternalServerError(Exception):
pass
@reqrep.raising(InvalidRequestError, InternalServerError)
class TestInterface:
@reqrep.raising(ValueError)
def greet(self, name: str) -> str:
raise NotImplementedError
def f(self):
raise NotImplementedError
def g(self):
raise NotImplementedError
def h(self):
raise NotImplementedError
@reqrep.raising(InternalServerError)
class TestOnlyOneError:
def f(self):
raise NotImplementedError
# Don't inherit from ``TestInterface`` because we intentionally leave
# out ``f`` unimplemented.
class TestApplication:
# pylint: disable=no-self-use
async def greet(self, name):
return 'Hello, %s' % name
async def g(self):
return object()
async def h(self):
# Test error that is not declared in the interface.
raise RuntimeError
Request, Response = reqrep.generate_interface_types(TestInterface, 'Test')
WIRE_DATA = jsons.JsonWireData()
class ServerTest(unittest.TestCase):
def test_only_one_error(self):
request_type, response_type = \
reqrep.generate_interface_types(TestOnlyOneError)
server = servers.Server(
TestOnlyOneError(),
request_type,
response_type,
WIRE_DATA,
)
self.assertEqual(
server._declared_error_types,
{InternalServerError: 'internal_server_error'},
)
@kernels.with_kernel
def test_serve(self):
server = servers.Server(
TestApplication(),
Request,
Response,
WIRE_DATA,
invalid_request_error=InvalidRequestError(),
internal_server_error=InternalServerError(),
)
wire_request = WIRE_DATA.to_lower(
Request(args=Request.m.greet(name='world'))
)
self.assertEqual(
WIRE_DATA.to_upper(
Response,
kernels.run(server._serve(wire_request)),
),
Response(result=Response.Result(greet='Hello, world')),
)
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(b'')),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_upper error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.f()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'unknown method: f: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.g()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_lower error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.h()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'server error: ')
@kernels.with_kernel
def test_end_to_end(self):
def do_test(client, server, server_serve):
url = 'inproc://%s' % uuid.uuid4()
server.socket.listen(url)
client.socket.dial(url)
server_task = tasks.spawn(server_serve)
client_task = tasks.spawn(client.m.greet(name='world'))
with self.assertRaises(kernels.KernelTimeout):
kernels.run(timeout=0.005)
self.assertTrue(client_task.is_completed())
self.assertEqual(
client_task.get_result_nonblocking(), 'Hello, world'
)
self.assertFalse(server_task.is_completed())
server.socket.close()
kernels.run(timeout=1)
self.assertTrue(server_task.is_completed())
self.assertIsNone(server_task.get_result_nonblocking())
app = TestApplication()
with servers.Server(app, Request, Response, WIRE_DATA) as server:
with clients.Client(Request, Response, WIRE_DATA) as client:
do_test(client, server, server.serve)
app = TestApplication()
server = servers.Server(app, Request, Response, WIRE_DATA)
with clients.Client(Request, Response, WIRE_DATA) as client:
with server:
do_test(client, server, server.serve)
if __name__ == '__main__':
unittest.main()
| [
"clchiou@gmail.com"
] | clchiou@gmail.com |
e17e6c8ecbe5505cbafb9db285c12c2de832cc48 | 62def70e2d802375b1ad28b0ac85fee2010ee0a9 | /flask/server/app2-BBIO.py | 6f563a5c2b1d5a7dd221b84ec1d0f31098dcec96 | [] | no_license | MarkAYoder/BeagleBoard-exercises | c48028b6e919d8c04dedfd2040a133c760f0f567 | 2fab7c7f7aa09bf101168dfb279e690bc43a6514 | refs/heads/master | 2023-07-22T08:06:19.482358 | 2023-07-12T19:24:51 | 2023-07-12T19:24:51 | 5,111,513 | 48 | 41 | null | 2021-07-29T18:02:29 | 2012-07-19T15:07:14 | JavaScript | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python3
# From: https://towardsdatascience.com/python-webserver-with-flask-and-raspberry-pi-398423cc6f5d
'''
Raspberry Pi GPIO Status and Control
'''
import Adafruit_BBIO.GPIO as GPIO
from flask import Flask, render_template
app = Flask(__name__)
button = "P9_11"
buttonSts = GPIO.LOW
# Set button as an input
GPIO.setup(button, GPIO.IN)
@app.route("/")
def index():
# Read Button Status
buttonSts = GPIO.input(button)
templateData = {
'title' : 'GPIO input Status!',
'button' : buttonSts,
}
return render_template('index2.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081, debug=True) | [
"Mark.A.Yoder@Rose-Hulman.edu"
] | Mark.A.Yoder@Rose-Hulman.edu |
233f0d40f307d09932e85c5edd77a34aeee76a96 | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_footer.py | 3535a50683a08f16c069762bca0020279cf550fd | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,022 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.footer import Footer # noqa: E501
from openapi_client.rest import ApiException
class TestFooter(unittest.TestCase):
"""Footer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFooter(self):
"""Test Footer"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.footer.Footer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
65fc44fd2cb52915fa25c53464419caacae2ac56 | 2b6fa34dac030ec1f2918b1377956bf791219d22 | /leetcode/medium/search-a-2d-matrix.py | 172983011abfef17fc0165532c2d6bed9454af6f | [
"MIT"
] | permissive | rainzhop/cumulus-tank | aa13fb8f14c27893838a67d2eb69fdd2ac3d6450 | 09ebc7858ea53630e30606945adfea856a80faa3 | refs/heads/master | 2020-06-06T23:24:37.498966 | 2020-01-06T09:52:16 | 2020-01-06T09:52:16 | 192,874,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | # https://leetcode.com/problems/search-a-2d-matrix/
#
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
# Integers in each row are sorted from left to right.
# The first integer of each row is greater than the last integer of the previous row.
#
# For example,
# Consider the following matrix:
# [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# Given target = 3, return true.
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m = len(matrix)
left, right = 0, m - 1
if target < matrix[0][0] or target > matrix[-1][-1]:
return False
while left != right:
mid = (left + right) / 2
if target < matrix[mid][-1]:
right = mid
else:
left = mid + 1
if target in matrix[left]:
return True
else:
return False
| [
"rainzhop@gmail.com"
] | rainzhop@gmail.com |
87565ac9145b88260426230d9548b3bfc3db3b37 | edbb63696580638af0084ee318d2c9bc9e8c7e79 | /text_wc_2.py | 8cd11af58e5aef643a13ed0776bd13036ca2bad0 | [] | no_license | awaddell77/Scrapers | fef34e34b8e039f4992497cae75135cdb57b2581 | 0a36fb2c99f2d7b90533834b29c0ba8f27c13a85 | refs/heads/master | 2020-05-21T13:44:06.524855 | 2020-03-16T23:00:45 | 2020-03-16T23:00:45 | 62,753,048 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | def text_wc(x,output='listoutput.txt', directory = 'C:\\Users\\Owner\\', v = 0):#takes list writes to text
n_l = x
name = directory + output
with open(name, 'w') as wf:
for i in range(0, len(n_l)):
if v != 0:
print(n_l[i])
new = n_l[i]
wf.writelines(new)
else:
new = n_l[i]
wf.writelines(new)
print("%s saved to %s" % (output, directory))
return True | [
"waddell.andrew@gmail.com"
] | waddell.andrew@gmail.com |
7b296749e9a4ef07b6b90aa915c8778c1bd21a26 | 9f951479d5eda96e7fecbbbd0b3b7e4f5e83360d | /webtest/全栈课程代码学员版/Level2Code/Level2Code/lesson6URL/LessonCode/firstsite/firstapp/views.py | 8f9b6e20978ccae64d476c71c7351b075f6ff647 | [] | no_license | lianbo2006/Project | 44c5b6fcab4fe31b80bfff467b3e0e31fd2da8ba | 5d13923817a1d4cffe7d4abbb5873277ce28bb87 | refs/heads/master | 2021-01-11T18:24:47.597849 | 2017-04-25T03:44:47 | 2017-04-25T03:44:47 | 79,539,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | from django.shortcuts import render, HttpResponse, redirect
from firstapp.models import Aritcle, Comment
from django.template import Context, Template
from firstapp.form import CommentForm
def index(request):
print(request)
print('==='*30)
print(dir(request))
print('==='*30)
print(type(request))
queryset = request.GET.get('tag')
if queryset:
article_list = Aritcle.objects.filter(tag=queryset)
else:
article_list = Aritcle.objects.all()
context = {}
context['article_list'] = article_list
index_page = render(request, 'first_web_2.html', context)
return index_page
def detail(request, page_num, error_form=None):
context = {}
form = CommentForm
a = Aritcle.objects.get(id=page_num)
best_comment = Comment.objects.filter(best_comment=True, belong_to=a)
if best_comment:
context['best_comment'] = best_comment[0]
article = Aritcle.objects.get(id=page_num)
context['article'] = article
# context['comment_list'] = comment_list
if error_form is not None:
context['form'] = error_form
else:
context['form'] = form
return render(request, 'article_detail.html', context)
def detail_comment(request, page_num):
form = CommentForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
comment = form.cleaned_data['comment']
a = Aritcle.objects.get(id=page_num)
c = Comment(name=name, comment=comment, belong_to=a)
c.save()
else:
return detail(request, page_num, error_form=form)
return redirect(to='detail', page_num=page_num)
| [
"513748889@qq.com"
] | 513748889@qq.com |
77080eb1b0672d0647b0341846a888bc476e3a0a | d18d7f86a1e701caada063d09ee00fe08a95e353 | /test/kapp/sys/ys/calc/calc_mpi_openmp_stategen/intel/runtest.py | 047f4715d694e464ec32c28be77152923524d7a0 | [
"BSD-3-Clause"
] | permissive | E3SM-Project/KGen | 2e097b2ef979b42b094089f337d49240838aa13b | c0035c93d21286da6519a74ff527b6a009781de4 | refs/heads/master | 2021-02-14T00:01:10.939108 | 2020-06-15T18:49:58 | 2020-06-15T18:49:58 | 244,747,822 | 3 | 0 | NOASSERTION | 2020-03-03T21:43:57 | 2020-03-03T21:43:56 | null | UTF-8 | Python | false | false | 1,790 | py | import sys
from kapp_sys_ys_calc_calc_mpi_openmp_stategen_test import KAppSysYSCalcCOSTest
import time
class Test(KAppSysYSCalcCOSTest):
def generate(self, myname, result):
workdir = result['mkdir_task']['workdir']
tmpsrc = result['download_task']['tmpsrc']
srcfile = '%s/bridge_mod.F90'%tmpsrc
prerun = 'module swap intel intel/16.0.1; module try-load impi/5.0.1.035'
passed, out, err = self.extract_kernel(srcfile, None, \
__cmd_clean='"cd %s; make -f Makefile.mpirun clean"'%tmpsrc, \
__cmd_build='"cd %s; make -f Makefile.mpirun build"'%tmpsrc, \
__cmd_run='"cd %s; make -f Makefile.mpirun run"'%tmpsrc, \
__invocation='0-1:0:1,2-3:0:3', \
__timing='repeat=1', \
__prerun='build="%s",run="%s"'%(prerun, prerun), \
__mpi='enable', \
__openmp='enable,omp_num_threads=10', \
__rebuild='state', \
__outdir=workdir)
#__debug='printvar=:i,:j,:output',
result[myname]['stdout'] = out
result[myname]['stderr'] = err
result[myname]['datadir'] = '%s/data'%workdir
if passed:
result[myname]['statefiles'] = ['update.0.0.1', 'update.1.0.1', 'update.2.0.3', 'update.3.0.3' ]
self.set_status(result, myname, self.PASSED)
else:
result[myname]['statefiles'] = []
self.set_status(result, myname, self.FAILED, 'STDOUT: %s\nSTDERR: %s'%(out, err))
return result
if __name__ == "__main__":
# we may allow to run this test individually
print('Please do not run this script from command line. Instead, run this script through KGen Test Suite .')
print('Usage: cd ${KGEN_HOME}/test; ./kgentest.py')
sys.exit(-1)
| [
"youngsun@ucar.edu"
] | youngsun@ucar.edu |
67281564909457dc70bb7145aa9e43a1ee83e5a7 | d1bca991935232035b5a373e7b9199a73182fa3f | /0x09-utf8_validation/0-validate_utf8.py | 68d494f4f9fb3e74f157dc50405c7d48f6fa0897 | [] | no_license | felipeserna/holbertonschool-interview | 49ab15a099f5bf29e19d33e4ef738df4fd99d446 | d42763f7a82e551c9effcf2f0e9cf60f959559cd | refs/heads/main | 2023-07-14T08:28:52.457126 | 2021-08-26T16:14:50 | 2021-08-26T16:14:50 | 319,788,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #!/usr/bin/python3
"""
Determines if a given data set represents a valid UTF-8 encoding
"""
def validUTF8(data):
"""
Return: True if data is a valid UTF-8 encoding, else return False
"""
try:
bytes(number & 0xFF for number in data).decode()
return True
except UnicodeDecodeError:
return False
| [
"feserna86@gmail.com"
] | feserna86@gmail.com |
55c9cd30b623e0a078ad1d54a62099bb5c2984c0 | 80edae503da03b3350fe458553860ea44a1a74dd | /backend/tasker_business/migrations/0001_initial.py | 1becb346c243dade1b06e637926556b3c9397ec7 | [] | no_license | crowdbotics-apps/our-protection-26031 | e4059bd616eff63f0d67ec869b49b9f6530fd2e6 | 7dbeef820b127efdf5c2a87d47a934b06c0ba87e | refs/heads/master | 2023-04-15T14:38:17.312365 | 2021-04-29T22:45:17 | 2021-04-29T22:45:17 | 362,954,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | # Generated by Django 2.2.19 on 2021-04-29 22:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("task_category", "0001_initial"),
("task_profile", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Timeslot",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateField()),
("start_time", models.TimeField()),
("end_time", models.TimeField()),
],
),
migrations.CreateModel(
name="TaskerSkill",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("rate", models.FloatField()),
("description", models.TextField()),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerskill_category",
to="task_category.Category",
),
),
(
"subcategory",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="taskerskill_subcategory",
to="task_category.Subcategory",
),
),
(
"tasker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerskill_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
migrations.CreateModel(
name="TaskerAvailability",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"tasker",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskeravailability_tasker",
to="task_profile.TaskerProfile",
),
),
(
"timeslots",
models.ManyToManyField(
related_name="taskeravailability_timeslots",
to="tasker_business.Timeslot",
),
),
],
),
migrations.CreateModel(
name="BusinessPhoto",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("description", models.TextField()),
(
"tasker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="businessphoto_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4a97cb424cf33bfbbc4a39b5a6c9b7b242fab2db | 272032c7604e0e9627f4cf42967aa3230a8facbc | /lect7dendropy/pars.py | c5af0dc1b8f6ece33455f2e44c9d8135f55c2585 | [] | no_license | mtholder/eebprogramming | 78ebaf5ca2163cc7da977b411087bf164fce6059 | b7478643b4cb3ce91c299753eb346626640c3378 | refs/heads/master | 2021-01-01T18:34:27.502098 | 2010-05-06T16:26:50 | 2010-05-06T16:26:50 | 493,047 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,577 | py | #!/usr/bin/env python
import sys, logging
from dendropy.utility.messaging import get_logger
_LOG = get_logger('sankoff')
from dendropy import DataSet
_DEBUGGING = True
verbose = False
def get_min_edge_costs(cost_row, costs_for_one_child):
min_score = cost_row[0] + costs_for_one_child[0]
for i in xrange(1, len(cost_row)):
y = cost_row[i] + costs_for_one_child[i]
if y < min_score:
min_score = y
return min_score
def get_min_cost(step_mat_row, child_costs):
total_cost = 0
for e in child_costs:
total_cost = total_cost + get_min_edge_costs(step_mat_row, e)
return total_cost
def sankoff(postorder_node_list, taxa_to_state_set_map, step_matrix):
max_cost = 0
num_states = len(step_matrix)
for row in step_matrix:
for cell in row:
if cell > max_cost:
max_cost = cell
impossible_cost = 1 + max_cost
impossible_cost_row = [impossible_cost] * num_states
score = 0
for nd in postorder_node_list:
if nd.is_leaf():
char_costs = []
for char_ss in taxa_to_state_set_map[nd.taxon]:
el = list(impossible_cost_row)
for observed_state in char_ss:
el[observed_state] = 0
char_costs.append(el)
nd.char_costs = char_costs
_LOG.debug(nd.taxon.label + ' -> ' + str(nd.char_costs))
else:
child_list = nd.child_nodes()
char_costs = []
num_patterns = len(child_list[0].char_costs)
for pattern_index in xrange(num_patterns):
child_costs = []
for c in child_list:
child_costs.append(c.char_costs[pattern_index])
el = []
for anc_state in xrange(num_states):
c = get_min_cost(step_matrix[anc_state], child_costs)
el.append(c)
char_costs.append(el)
nd.char_costs = char_costs
_LOG.debug('Internal node -> ' + str(nd.char_costs))
if not nd.parent_node:
for pattern_index in xrange(num_patterns):
score += min(nd.char_costs[pattern_index])
return score
def pars_score_tree(tree, taxa_to_states, step_matrix=None):
if step_matrix is None:
step_matrix = [ [0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
]
node_list = [i for i in tree.postorder_node_iter()]
return sankoff(node_list, taxa_to_states, step_matrix)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
action="store_true",
default=False,
help="Verbose execution mode")
(options, args) = parser.parse_args()
if len(args) == 0:
sys.exit("Expecting a filename as an argument")
if options.verbose:
_LOG.setLevel(logging.DEBUG)
tree_index = 0
try:
for f in args:
fo = open(f, "rU")
dataset = DataSet()
dataset.read(stream=fo, schema="NEXUS")
if len(dataset.taxon_sets) != 1:
raise ValueError("Expecting one set of taxa in %s" % f)
taxon_set = dataset.taxon_sets[0]
if len(dataset.tree_lists) != 1:
raise ValueError("Expecting one tree block in %s" % f)
tree_list = dataset.tree_lists[0]
if len(dataset.char_matrices) != 1:
raise ValueError("Expecting one character matrix in %s" % f)
char_mat = dataset.char_matrices[0]
num_char = len(char_mat[0])
taxon_to_state_set = char_mat.create_taxon_to_state_set_map()
for taxon, chars in taxon_to_state_set.iteritems():
_LOG.debug(taxon.label + ' ' + str(chars))
for tree in tree_list:
_LOG.debug(str(tree))
print pars_score_tree(tree, taxon_to_state_set, [ [0, 5, 1, 5],
[5, 0, 5, 1],
[1, 5, 0, 5],
[5, 1, 5, 0],
])
except Exception as x:
if _DEBUGGING:
raise
sys.exit(str(x))
| [
"mtholder@gmail.com"
] | mtholder@gmail.com |
541d1592cf2ae6cebfff7a7b20f3462f581214cc | 30fb85c14e18956fe5690f4224e55d3fa34651ce | /ml_neural_networks_pybrain_hidden_layers.py | 74540ed511b295a20436d4a35911af2b1d035e41 | [] | no_license | TiagoArrazi/Semantix-Internship | 1d39cc310b75be3b395d3a7df1dde7543fa4db84 | 87f93db82e9594ce0911b4e6264a4981e4f0ac14 | refs/heads/master | 2020-04-02T22:53:25.937133 | 2020-01-17T12:58:40 | 2020-01-17T12:58:40 | 154,848,050 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | #! /usr/bin/env python3
# Classification with PyBrain - 3D XOR
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
dataset = SupervisedDataSet(3, 1)
dataset.addSample([0, 0, 0], [0])
dataset.addSample([0, 0, 1], [1])
dataset.addSample([0, 1, 0], [1])
dataset.addSample([0, 1, 1], [0])
dataset.addSample([1, 0, 0], [1])
dataset.addSample([1, 0, 1], [0])
dataset.addSample([1, 1, 0], [0])
ataset.addSample([1, 1, 1], [1])
network = buildNetwork(dataset.indim, 6, 6, dataset.outdim, bias=True)
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.9)
trainer.trainEpochs(10000)
test = SupervisedDataSet(3, 1)
test.addSample([0, 0, 0], [0])
test.addSample([0, 0, 1], [1])
test.addSample([0, 1, 0], [1])
test.addSample([0, 1, 1], [0])
test.addSample([1, 0, 0], [1])
test.addSample([1, 0, 1], [0])
test.addSample([1, 1, 0], [0])
test.addSample([1, 1, 1], [1])
trainer.testOnData(test, verbose=True)
| [
"tiago_arrazi98@outlook.com"
] | tiago_arrazi98@outlook.com |
0cf22fe3aa57e99a7fbc6dc4b130ea7deb1c47ac | 23e844d75a0214ed5ad48885d008e23516383610 | /toughio/_utils/relative_permeability/_fatt_klikoff.py | 4ce84e87d5101ff08fb032c15715b9972bbe9848 | [
"MIT"
] | permissive | codacy-badger/toughio | dc1b138a5defa7b00b0833526d67c372d9b6d1f5 | 8d4f3d8408d5507a83f65e7f393b13be08d42aca | refs/heads/master | 2021-02-04T21:32:49.721095 | 2020-02-28T07:49:33 | 2020-02-28T07:49:33 | 243,711,552 | 0 | 0 | MIT | 2020-02-28T08:15:36 | 2020-02-28T08:15:36 | null | UTF-8 | Python | false | false | 920 | py | from ._base import BaseRelativePermeability
__all__ = [
"FattKlikoff",
]
class FattKlikoff(BaseRelativePermeability):
"""Fatt and Klikoff's function.
After Fatt and Klikoff (1959).
Parameters
----------
slr : scalar
Irreducible liquid saturation (RP(1)).
"""
_id = 7
_name = "Fatt-Klikoff"
def __init__(self, slr):
if slr >= 1.0:
raise ValueError()
self.parameters = [slr]
def _eval(self, sl, slr):
"""Fatt and Klikoff's function."""
Seff = (sl - slr) / (1.0 - slr) if sl > slr else 0.0
kl = Seff ** 3
kg = (1.0 - Seff) ** 3
return kl, kg
@property
def parameters(self):
"""Return model parameters."""
return [self._slr]
@parameters.setter
def parameters(self, value):
if len(value) != 1:
raise ValueError()
self._slr = value[0]
| [
"keurfonluu@outlook.com"
] | keurfonluu@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.