blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
070700610e801fade392728e0d895aed5093ad8b
8a695f0abff49d4e7be050dd278c504b80015456
/orbit.py
3f0f082e184f53aa5b68bfbbf1a2fc41e1bc0e31
[]
no_license
flucey/chem160module5
4e5015e053cd8021ae49264615ed157456ada177
399092ffa9a8a03921299f91578d7dca34414727
refs/heads/master
2020-07-30T03:34:59.441757
2019-09-22T01:07:40
2019-09-22T01:07:40
210,072,675
0
0
null
null
null
null
UTF-8
Python
false
false
580
py
from math import sqrt from drawtraj import drawtraj def force(x,y,m,mstar): r2=x**2+y**2 r32=r2*sqrt(r2) fx=-x*m*mstar/r32 fy=-y*m*mstar/r32 return fx,fy def integrate(x,y,vx,vy,fx,fy,m,dt): ax,ay=fx/m,fy/m vx+=ax*dt vy+=ay*dt x+=vx*dt y+=vy*dt return x,y,vx,vy # Main part of the program mstar=100 m=1 nsteps=5000000 dt=0.01 r=50 x,y=0,r vx,vy=0.2,0.4 trajx,trajy=[],[] for t in range(nsteps): fx,fy=force(x,y,m,mstar) x,y,vx,vy=integrate(x,y,vx,vy,fx,fy,m,dt) trajx.append(x) trajy.append(y) drawtraj(trajx,trajy,5*r)
[ "noreply@github.com" ]
flucey.noreply@github.com
7250bd6ebf4c7254ac0d435bf15e73ef32fb2375
49cba1fba19d5a89c45c118404aad05606d11903
/Weekly_182/question.py
9a65f31ca2ea82692a0cadf53d50c3b2aca94da1
[]
no_license
bharadwajvaduguru8/leetcode_contests
9ec2f2ffca8f40c34f2bfb9bdaf0f4b0574f28f4
7faab423337e1bbb106e1b00ba723070fca946c1
refs/heads/main
2023-04-18T17:56:24.785735
2021-04-26T05:34:31
2021-04-26T05:34:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
336
py
# python3 import math def nums(num): dict={} #f=0 for b in nums: for a in dict.keys: if(a==b): dict.update({a:dict[a]+1}) else: dict.add(b,0) print(dict) if __name__ == '__main__': num=list(map(int,input("Num: ").split())) nums(num)
[ "noreply@github.com" ]
bharadwajvaduguru8.noreply@github.com
6d533a22fd7ba72d63f3d689c3fd96da7fecc29a
dc893a23ea659a9aff10c972202abae113a31f8d
/causal_inference/code/CVPR2012_slidingwindow_action_detection/screen_30_9406.py
9e3ea6fc8e19ee4e2ff4b281d8a8a25b4cf04d4f
[]
no_license
scotfang/research
f28ff4cdcdb579839fddabc7d77a31b324f45a78
424e0dce9e5083e75ac49a33765d1a9d7c2878e9
refs/heads/master
2021-01-01T17:17:15.633209
2014-05-12T18:20:43
2014-05-12T18:20:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
166
py
temporal_parses = { 704: { "usecomputer_START": {"energy": -0.000000, "agent": "uuid1"} }, 880: { "usecomputer_END": {"energy": -0.000000, "agent": "uuid1"} }, },
[ "scotfang@gmail.com" ]
scotfang@gmail.com
3d7c1eb1ed4087a49c6fea97eca35696c9b6f325
cf9484ee1147172138c702a95a6fe2fe862a9f58
/TravelBlog/asgi.py
dd4c115a3066fe06bd1aaf312488f8a7c6dca6a5
[]
no_license
crankshaft1099/Travel-Blog
428ab46760c6546ae44a9f3d3d756a0daae0d6b6
4c6c2a8399946a02cedc38e9e4a1e80279d90869
refs/heads/main
2023-03-11T22:01:16.704635
2021-03-05T17:48:13
2021-03-05T17:48:13
344,887,935
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
""" ASGI config for TravelBlog project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TravelBlog.settings') application = get_asgi_application()
[ "noreply@github.com" ]
crankshaft1099.noreply@github.com
8730d58aed4341d6bb3e7fc6860ff9e1ad2d13ce
16f524cf688f3cc673883544bc8af4fed8bb0436
/demo.py
fdd0db6670f51bbb78674b2905ea0ae7ddb4e46c
[]
no_license
Alroomi/Song-Classification
de2fff43169eba4024f4e4111d9d80a5b7ad7098
066ebf50b27a4c25a19da3bc4df05d9a41a5efac
refs/heads/master
2021-05-06T13:32:00.777472
2017-12-05T20:36:26
2017-12-05T20:36:26
113,230,163
0
0
null
null
null
null
UTF-8
Python
false
false
6,694
py
import os, os.path, sys import h5py import numpy as np import pickle as pkl from configs_genre import gen_config as genre_config from configs_gender import gen_config as gender_config from configs_year import gen_config as year_config from models.ann import Classifier from models.cnn import CNNClassifier from feature_extractor import FeatureExtractor from sklearn.utils.extmath import softmax from utils import parse_csv_list_file def load_models(task): if task.startswith('Year'): cfgs_1, nn_cfgs_1 = year_config(task, 'ann', 'decade') cfgs_2, nn_cfgs_2 = year_config(task, 'cnn', 'decade') elif task.startswith('Genre'): cfgs_1, nn_cfgs_1 = genre_config(task, 'ann') cfgs_2, nn_cfgs_2 = genre_config(task, 'cnn') elif task.startswith('Gender'): cfgs_1, nn_cfgs_1 = gender_config(task, 'ann') cfgs_2, nn_cfgs_2 = gender_config(task, 'cnn') ann = Classifier(nn_cfgs_1, log_dir=None) ann.restore('%s-%d' %(nn_cfgs_1['tf_sess_path'], cfgs_1['n_iters'])) cnn = CNNClassifier(nn_cfgs_2, log_dir=None) cnn.restore('%s-%d' %(nn_cfgs_2['tf_sess_path'], cfgs_2['n_iters'])) return ann, cnn def init_ft(task): if task.startswith('Year'): cfgs, nn_cfgs = year_config(task, 'ann', 'decade') elif task.startswith('Genre'): cfgs, nn_cfgs = genre_config(task, 'ann') elif task.startswith('Gender'): cfgs, nn_cfgs = gender_config(task, 'ann') ft1 = FeatureExtractor(cfgs['feature_list'], cfgs['feature_pool'], cfgs['l2_norm'], cfgs['sr'], 1, cfgs['stride']) ft2 = FeatureExtractor(['melspectrogram'], 'none', cfgs['l2_norm'], cfgs['sr'], 3, cfgs['stride']) return ft1, ft2 def get_utils(task): if task.startswith('Year'): cfgs, nn_cfgs = year_config(task, 'ann', 'decade') elif task.startswith('Genre'): cfgs, nn_cfgs = genre_config(task, 'ann') elif task.startswith('Gender'): cfgs, nn_cfgs = gender_config(task, 'ann') _, cls_to_id, id_to_cls = parse_csv_list_file(cfgs['train_list_fname']) ann_mean = np.load('means/mean_%s_%s.npy' %(task,'ann')) cnn_mean = np.load('means/mean_%s_%s.npy' %(task,'cnn')) return cls_to_id, id_to_cls, ann_mean, cnn_mean def build_label_map_year2decade(cls_to_id): new_cls_to_id = dict({}) new_id_to_cls = dict({}) old_to_new_ids = dict({}) counter = 0 years = cls_to_id.keys() years = sorted(years) for year in years: decade = '%d0s' %int(np.floor(int(year) / 10)) if decade not in new_cls_to_id: new_cls_to_id[decade] = counter # '19x0s' -> 0 ... new_id_to_cls[counter] = decade # 0 -> '19x0s' counter += 1 old_to_new_ids[cls_to_id[year]] = new_cls_to_id[decade] num_ids = len(new_id_to_cls.keys()) return old_to_new_ids, new_id_to_cls, num_ids def sum_score(scores): sum_scores = scores.sum(axis=0) final_pred = np.argmax(sum_scores) return final_pred def ensemble(scores1, scores2): ss1 = scores1.sum(axis=0).reshape([1,-1]) ss2 = scores2.sum(axis=0).reshape([1,-1]) ss1 = softmax(ss1) ss2 = softmax(ss2) final_scores = ss1 + ss2 final_pred = np.argmax(final_scores) return final_pred def predict_genre(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2): # ann preds1, scores1 = ann.predict(feat1 - mean1) final_pred1 = sum_score(scores1) # cnn preds2, scores2 = cnn.predict(feat2 - mean2) final_pred2 = sum_score(scores2) # ensemble ensemble_pred = ensemble(scores1, scores2) print('--------------Genre Prediction--------------') print('FeatureExtraction1: %s' %id_to_cls[final_pred1]) print('FeatureExtraction2: %s' %id_to_cls[final_pred2]) print('Ensemble: %s' %id_to_cls[ensemble_pred]) def predict_gender(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2): # ann preds1, scores1 = ann.predict(feat1 - mean1) final_pred1 = sum_score(scores1) # cnn preds2, scores2 = cnn.predict(feat2 - mean2) final_pred2 = sum_score(scores2) # ensemble ensemble_pred = ensemble(scores1, scores2) print('--------------Gender Prediction--------------') print('FeatureExtraction1: %s' %id_to_cls[final_pred1]) print('FeatureExtraction2: %s' %id_to_cls[final_pred2]) print('Ensemble: %s' %id_to_cls[ensemble_pred]) def predict_year(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2): # ann preds1, scores1 = ann.predict(feat1 - mean1) final_pred1 = sum_score(scores1) # cnn preds2, scores2 = cnn.predict(feat2 - mean2) final_pred2 = sum_score(scores2) # ensemble ensemble_pred = ensemble(scores1, scores2) print('--------------Year Prediction--------------') print('FeatureExtraction1: %s (%s)' %(id_to_cls[final_pred1], id_to_cls[final_pred1])) print('FeatureExtraction2: %s (%s)' %(id_to_cls[final_pred2], id_to_cls[final_pred2])) print('Ensemble: %s (%s)' %(id_to_cls[ensemble_pred], id_to_cls[ensemble_pred])) if __name__ == '__main__': len(sys.argv) == 2, 'Path to folder containing the songs need to be provided' song_folder = sys.argv[1] # ./data/6th Nov/genre/songs genre_ann, genre_cnn = load_models('Genre_fold_1') genre_cls_to_id, genre_id_to_cls, genre_ann_mean, genre_cnn_mean = get_utils('Genre_fold_1') ft1, ft2 = init_ft('Genre_fold_1') gender_ann, gender_cnn = load_models('Gender_fold_1') gender_cls_to_id, gender_id_to_cls, gender_ann_mean, gender_cnn_mean = get_utils('Gender_fold_1') year_ann, year_cnn = load_models('Year_fold_1') year_cls_to_id, year_id_to_cls, year_ann_mean, year_cnn_mean = get_utils('Year_fold_1') year_cls_to_id, year_id_to_cls, _ = build_label_map_year2decade(year_cls_to_id) filenames = os.listdir(song_folder) for fname in filenames: fname = fname.strip() if not fname.endswith('.mp3'): continue print('--------------------------------------------') print(fname) print('Extracting Feature 1 ...') feat1 = ft1.extract_feature(os.path.join(song_folder, fname)) print('Extracting Feature 2 ...') feat2 = ft2.extract_spectrogram(os.path.join(song_folder, fname)) print('Done.') predict_genre(feat1, feat2, genre_ann, genre_cnn, genre_cls_to_id, genre_id_to_cls, genre_ann_mean, genre_cnn_mean) predict_gender(feat1, feat2, gender_ann, gender_cnn, gender_cls_to_id, gender_id_to_cls, gender_ann_mean, gender_cnn_mean) predict_year(feat1, feat2, year_ann, year_cnn, year_cls_to_id, year_id_to_cls, year_ann_mean, year_cnn_mean) input("Press Enter to continue...")
[ "31593907+Alroomi@users.noreply.github.com" ]
31593907+Alroomi@users.noreply.github.com
c379cf67ebd5bf094587e4de80fa170eb40a8b11
27518b095787e1d5a6d4926a1849401b1dab5f5a
/pi_snake/drawers/pi_draw.py
04c3bc360ac1dcfc2d02e54ccc91c03b2947e101
[]
no_license
mcmhav/pi-snake
94a3945f681be86ca3b93de8b3da02d2aa127763
0f672419e5d8f7b908cdbdfd61f3f529c74ac2e7
refs/heads/master
2020-12-01T22:37:10.306834
2020-02-16T22:46:38
2020-02-16T22:46:38
230,795,574
0
0
null
null
null
null
UTF-8
Python
false
false
769
py
from typing import List from .drawer import Drawer B = (0, 0, 0) R = (255, 0, 0) G = (0, 255, 0) class Pihat(Drawer): def __init__(self): from sense_hat import SenseHat # pylint: disable=import-error self._sense = SenseHat() self._sense.set_rotation(180) def draw(self, board: List) -> None: pixels = [] for row in board: for cell in row: if cell == 's': pixels.append(R) elif cell == 'g': pixels.append(G) else: pixels.append(B) self._sense.set_pixels(pixels) def clear(self, game_summary: str) -> None: self._sense.show_message(game_summary) self._sense.clear()
[ "mcmhav@gmail.com" ]
mcmhav@gmail.com
d2b41f94eec0c608bee7477753703dafae3dccc8
98ca37f5dd2751efaa060cca19e0b83f871d7765
/sdk/metricsadvisor/azure-ai-metricsadvisor/tests/base_testcase_aad.py
a40496c6753e169137d95e6df644d3887e5da9a1
[ "LicenseRef-scancode-generic-cla", "LGPL-2.1-or-later", "MIT" ]
permissive
jayhebe/azure-sdk-for-python
5ea99732ebb9929d3f6f77c08cc640d5915970b1
f4455f85d9fe747fa4de2fdc691b975c07bfeea5
refs/heads/main
2023-06-24T01:22:06.602194
2021-07-28T02:12:25
2021-07-28T02:12:25
390,290,984
1
0
MIT
2021-07-28T09:23:46
2021-07-28T09:23:46
null
UTF-8
Python
false
false
20,507
py
# coding=utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import datetime from devtools_testutils import AzureTestCase from azure_devtools.scenario_tests import ( ReplayableTest, create_random_name ) from azure.ai.metricsadvisor import ( MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient, MetricsAdvisorClient, ) from azure.ai.metricsadvisor.models import ( SqlServerDataFeedSource, DataFeedSchema, DataFeedMetric, DataFeedDimension, DataFeedGranularity, DataFeedIngestionSettings, DataFeedMissingDataPointFillSettings, DataFeedRollupSettings, MetricAlertConfiguration, MetricAnomalyAlertScope, MetricAnomalyAlertConditions, MetricBoundaryCondition, TopNGroupScope, SeverityCondition, MetricDetectionCondition, MetricSeriesGroupDetectionCondition, MetricSingleSeriesDetectionCondition, SmartDetectionCondition, SuppressCondition, ChangeThresholdCondition, HardThresholdCondition, EmailNotificationHook, WebNotificationHook, ) from azure.identity import DefaultAzureCredential class MockCredential(): def get_token(self, *scopes, **kwargs): from azure.core.credentials import AccessToken return AccessToken("fake-token", 0) class TestMetricsAdvisorAdministrationClientBase(AzureTestCase): FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key'] def __init__(self, method_name): super(TestMetricsAdvisorAdministrationClientBase, self).__init__(method_name) self.vcr.match_on = ["path", "method", "query"] if self.is_live: service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT") self.sql_server_connection_string = self.get_settings_value("METRICS_ADVISOR_SQL_SERVER_CONNECTION_STRING") self.azure_table_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_TABLE_CONNECTION_STRING") self.azure_blob_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_BLOB_CONNECTION_STRING") self.azure_cosmosdb_connection_string = self.get_settings_value("METRICS_ADVISOR_COSMOS_DB_CONNECTION_STRING") self.application_insights_api_key = self.get_settings_value("METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY") self.azure_data_explorer_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING") self.influxdb_connection_string = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_CONNECTION_STRING") self.influxdb_password = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_PASSWORD") self.azure_datalake_account_key = self.get_settings_value("METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY") self.mongodb_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_MONGO_DB_CONNECTION_STRING") self.mysql_connection_string = self.get_settings_value("METRICS_ADVISOR_MYSQL_CONNECTION_STRING") self.postgresql_connection_string = self.get_settings_value("METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING") self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID") self.data_feed_id = self.get_settings_value("METRICS_ADVISOR_DATA_FEED_ID") self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID") credential = DefaultAzureCredential() self.scrubber.register_name_pair( self.sql_server_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.azure_table_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.azure_blob_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.azure_cosmosdb_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.application_insights_api_key, "connectionstring" ) self.scrubber.register_name_pair( self.azure_data_explorer_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.influxdb_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.influxdb_password, "connectionstring" ) self.scrubber.register_name_pair( self.azure_datalake_account_key, "connectionstring" ) self.scrubber.register_name_pair( self.mongodb_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.mysql_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.postgresql_connection_string, "connectionstring" ) self.scrubber.register_name_pair( self.metric_id, "metric_id" ) self.scrubber.register_name_pair( self.data_feed_id, "data_feed_id" ) self.scrubber.register_name_pair( self.anomaly_detection_configuration_id, "anomaly_detection_configuration_id" ) else: service_endpoint = "https://endpointname.cognitiveservices.azure.com" self.sql_server_connection_string = "SQL_SERVER_CONNECTION_STRING" self.azure_table_connection_string = "AZURE_TABLE_CONNECTION_STRING" self.azure_blob_connection_string = "AZURE_BLOB_CONNECTION_STRING" self.azure_cosmosdb_connection_string = "COSMOS_DB_CONNECTION_STRING" self.application_insights_api_key = "METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY" self.azure_data_explorer_connection_string = "METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING" self.influxdb_connection_string = "METRICS_ADVISOR_INFLUXDB_CONNECTION_STRING" self.influxdb_password = "METRICS_ADVISOR_INFLUXDB_PASSWORD" self.azure_datalake_account_key = "METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY" self.mongodb_connection_string = "METRICS_ADVISOR_AZURE_MONGODB_CONNECTION_STRING" self.mysql_connection_string = "METRICS_ADVISOR_MYSQL_CONNECTION_STRING" self.postgresql_connection_string = "METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING" self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id" self.metric_id = "metric_id" self.data_feed_id = "data_feed_id" credential = MockCredential() self.admin_client = MetricsAdvisorAdministrationClient(service_endpoint, credential) def _create_data_feed(self, name): name = create_random_name(name) return self.admin_client.create_data_feed( name=name, source=SqlServerDataFeedSource( connection_string=self.sql_server_connection_string, query="select * from adsample2 where Timestamp = @StartTime" ), granularity="Daily", schema=DataFeedSchema( metrics=[ DataFeedMetric(name="cost"), DataFeedMetric(name="revenue") ], dimensions=[ DataFeedDimension(name="category"), DataFeedDimension(name="city") ], ), ingestion_settings="2019-10-01T00:00:00Z", ) def _create_data_feed_and_detection_config(self, name): try: data_feed = self._create_data_feed(name) detection_config_name = create_random_name(name) detection_config = self.admin_client.create_detection_configuration( name=detection_config_name, metric_id=data_feed.metric_ids['cost'], description="testing", whole_series_detection_condition=MetricDetectionCondition( smart_detection_condition=SmartDetectionCondition( sensitivity=50, anomaly_detector_direction="Both", suppress_condition=SuppressCondition( min_number=5, min_ratio=5 ) ) ) ) return detection_config, data_feed except Exception as e: self.admin_client.delete_data_feed(data_feed.id) raise e def _create_data_feed_for_update(self, name): data_feed_name = create_random_name(name) return self.admin_client.create_data_feed( name=data_feed_name, source=SqlServerDataFeedSource( connection_string=self.sql_server_connection_string, query=u"select * from adsample2 where Timestamp = @StartTime" ), granularity=DataFeedGranularity( granularity_type="Daily", ), schema=DataFeedSchema( metrics=[ DataFeedMetric(name="cost", display_name="display cost", description="the cost"), DataFeedMetric(name="revenue", display_name="display revenue", description="the revenue") ], dimensions=[ DataFeedDimension(name="category", display_name="display category"), DataFeedDimension(name="city", display_name="display city") ], timestamp_column="Timestamp" ), ingestion_settings=DataFeedIngestionSettings( ingestion_begin_time=datetime.datetime(2019, 10, 1), data_source_request_concurrency=0, ingestion_retry_delay=-1, ingestion_start_offset=-1, stop_retry_after=-1, ), admins=["yournamehere@microsoft.com"], data_feed_description="my first data feed", missing_data_point_fill_settings=DataFeedMissingDataPointFillSettings( fill_type="SmartFilling" ), rollup_settings=DataFeedRollupSettings( rollup_type="NoRollup", rollup_method="None", ), viewers=["viewers"], access_mode="Private", action_link_template="action link template" ) def _create_alert_config_for_update(self, name): try: detection_config, data_feed = self._create_data_feed_and_detection_config(name) alert_config_name = create_random_name(name) alert_config = self.admin_client.create_alert_configuration( name=alert_config_name, cross_metrics_operator="AND", metric_alert_configurations=[ MetricAlertConfiguration( detection_configuration_id=detection_config.id, alert_scope=MetricAnomalyAlertScope( scope_type="TopN", top_n_group_in_scope=TopNGroupScope( top=5, period=10, min_top_count=9 ) ), alert_conditions=MetricAnomalyAlertConditions( metric_boundary_condition=MetricBoundaryCondition( direction="Both", companion_metric_id=data_feed.metric_ids['cost'], lower=1.0, upper=5.0 ) ) ), MetricAlertConfiguration( detection_configuration_id=detection_config.id, alert_scope=MetricAnomalyAlertScope( scope_type="SeriesGroup", series_group_in_scope={'city': 'Shenzhen'} ), alert_conditions=MetricAnomalyAlertConditions( severity_condition=SeverityCondition( min_alert_severity="Low", max_alert_severity="High" ) ) ), MetricAlertConfiguration( detection_configuration_id=detection_config.id, alert_scope=MetricAnomalyAlertScope( scope_type="WholeSeries" ), alert_conditions=MetricAnomalyAlertConditions( severity_condition=SeverityCondition( min_alert_severity="Low", max_alert_severity="High" ) ) ) ], hook_ids=[] ) return alert_config, data_feed, detection_config except Exception as e: self.admin_client.delete_data_feed(data_feed.id) raise e def _create_detection_config_for_update(self, name): try: data_feed = self._create_data_feed(name) detection_config_name = create_random_name("testupdated") detection_config = self.admin_client.create_detection_configuration( name=detection_config_name, metric_id=data_feed.metric_ids['cost'], description="My test metric anomaly detection configuration", whole_series_detection_condition=MetricDetectionCondition( condition_operator="AND", smart_detection_condition=SmartDetectionCondition( sensitivity=50, anomaly_detector_direction="Both", suppress_condition=SuppressCondition( min_number=5, min_ratio=5 ) ), hard_threshold_condition=HardThresholdCondition( anomaly_detector_direction="Both", suppress_condition=SuppressCondition( min_number=5, min_ratio=5 ), lower_bound=0, upper_bound=100 ), change_threshold_condition=ChangeThresholdCondition( change_percentage=50, shift_point=30, within_range=True, anomaly_detector_direction="Both", suppress_condition=SuppressCondition( min_number=2, min_ratio=2 ) ) ), series_detection_conditions=[MetricSingleSeriesDetectionCondition( series_key={"city": "Shenzhen", "category": "Jewelry"}, smart_detection_condition=SmartDetectionCondition( anomaly_detector_direction="Both", sensitivity=63, suppress_condition=SuppressCondition( min_number=1, min_ratio=100 ) ) )], series_group_detection_conditions=[MetricSeriesGroupDetectionCondition( series_group_key={"city": "Sao Paulo"}, smart_detection_condition=SmartDetectionCondition( anomaly_detector_direction="Both", sensitivity=63, suppress_condition=SuppressCondition( min_number=1, min_ratio=100 ) ) )] ) return detection_config, data_feed except Exception as e: self.admin_client.delete_data_feed(data_feed.id) raise e def _create_email_hook_for_update(self, name): return self.admin_client.create_hook( hook=EmailNotificationHook( name=name, emails_to_alert=["yournamehere@microsoft.com"], description="my email hook", external_link="external link" ) ) def _create_web_hook_for_update(self, name): return self.admin_client.create_hook( hook=WebNotificationHook( name=name, endpoint="https://httpbin.org/post", description="my web hook", external_link="external link", username="krista", password="123" ) ) class TestMetricsAdvisorClientBase(AzureTestCase): FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key'] def __init__(self, method_name): super(TestMetricsAdvisorClientBase, self).__init__(method_name) self.vcr.match_on = ["path", "method", "query"] if self.is_live: service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT") self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID") self.anomaly_alert_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_ALERT_CONFIGURATION_ID") self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID") self.incident_id = self.get_settings_value("METRICS_ADVISOR_INCIDENT_ID") self.dimension_name = self.get_settings_value("METRICS_ADVISOR_DIMENSION_NAME") self.feedback_id = self.get_settings_value("METRICS_ADVISOR_FEEDBACK_ID") self.alert_id = self.get_settings_value("METRICS_ADVISOR_ALERT_ID") credential = DefaultAzureCredential() self.scrubber.register_name_pair( self.anomaly_detection_configuration_id, "anomaly_detection_configuration_id" ) self.scrubber.register_name_pair( self.anomaly_alert_configuration_id, "anomaly_alert_configuration_id" ) self.scrubber.register_name_pair( self.metric_id, "metric_id" ) self.scrubber.register_name_pair( self.incident_id, "incident_id" ) self.scrubber.register_name_pair( self.dimension_name, "dimension_name" ) self.scrubber.register_name_pair( self.feedback_id, "feedback_id" ) self.scrubber.register_name_pair( self.alert_id, "alert_id" ) else: service_endpoint = "https://endpointname.cognitiveservices.azure.com" self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id" self.anomaly_alert_configuration_id = "anomaly_alert_configuration_id" self.metric_id = "metric_id" self.incident_id = "incident_id" self.dimension_name = "dimension_name" self.feedback_id = "feedback_id" self.alert_id = "alert_id" credential = MockCredential() self.client = MetricsAdvisorClient(service_endpoint, credential)
[ "noreply@github.com" ]
jayhebe.noreply@github.com
530b1900656472b2656291a8b0a6454f5800aa69
077617fac92c16de69c8ae2f0e38b44754a892e9
/udp.py
16ef2f967c3f02bf644549ca8fe65e3a11751d7d
[]
no_license
ivanahepjuk/nbiot_rpi-shield
e8af1a249ef96184ad310e7d93ab0009f1e0ddf8
45d40dbc9a922c4f89e04c2f3cf2e2f1f387ff4a
refs/heads/master
2021-07-20T08:10:30.321922
2017-10-28T12:09:03
2017-10-28T12:09:03
108,380,921
1
1
null
null
null
null
UTF-8
Python
false
false
1,245
py
#!/usr/bin/env python import socket import serial import time import math #UDP settings: UDP_IP = "89.103.47.53" #89.102.98.39" UDP_PORT = 8089 #serialport settings: #ser = serial.Serial('/dev/ttyACM3', 9600, timeout=61, xonxoff=False, rtscts=False, dsrdtr=False) #serial flushing time.sleep(0.5) #ser.flushInput() #ser.flushOutput() inkrement = 15 smer = 0 index = 0 while True: #precte radek zakonceny \n znakem #"hodnoty,box=Adrspach temp=%.2f,hum=%.2f,pres=%.2f,pm1=%.2f,pm25=%.2f,pm10=%.2f,time=%d\r\n" #funkcni dva radky..!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!: #UDP_MESSAGE = ser.readline() #UDP_MESSAGE = UDP_MESSAGE[:-2] # cutting off the \r\n from the end of serialport-readed data UDP_MESSAGE = 'hodnoty,box=Adrspach hum=' +str(40 + (20 * math.sin(inkrement))) + ',temp=' +str(40 + (20 * math.cos(inkrement))) #temp=21.21,hum=21.21,pres=500.50,pm1=12.32,pm25=12.32,pm10=12.33,time=1111111' print ("UDP target IP:", UDP_IP) print ("UDP target port:", UDP_PORT) print ("message:", UDP_MESSAGE) sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.sendto(bytes(UDP_MESSAGE, "UTF-8"), (UDP_IP, UDP_PORT)) time.sleep(10) inkrement += 0.1
[ "ivanahepjuk@gmail.com" ]
ivanahepjuk@gmail.com
30459cc5e6a093410d325a173ea9cba76452b99a
3b2940c38412e5216527e35093396470060cca2f
/top/api/rest/HotelSoldOrdersIncrementGetRequest.py
08229a2b1227b49f5b2a06b967fb59b0da52b1e9
[]
no_license
akingthink/goods
842eb09daddc2611868b01ebd6e330e5dd7d50be
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
refs/heads/master
2021-01-10T14:22:54.061570
2016-03-04T09:48:24
2016-03-04T09:48:24
45,093,302
0
0
null
null
null
null
UTF-8
Python
false
false
523
py
''' Created by auto_sdk on 2015-01-20 12:44:31 ''' from top.api.base import RestApi class HotelSoldOrdersIncrementGetRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.end_modified = None self.need_guest = None self.need_message = None self.page_no = None self.page_size = None self.start_modified = None self.status = None self.use_has_next = None def getapiname(self): return 'taobao.hotel.sold.orders.increment.get'
[ "yangwenjin@T4F-MBP-17.local" ]
yangwenjin@T4F-MBP-17.local
e0cc11f8c458f81ac2d8596c43ac6e0a26ec20a7
0692fe679d6ecd9a8b004c5223937f03f374237a
/vkcpa/migrations/0001_initial.py
930653a5f9df40bc70dcebc3bb7814a6db820c0f
[]
no_license
DmitryVesennyi/vkcpa
0a23c411ca5e417e2d646b9f3a7ef477194731a1
21540f48ffea2f8333a92441beecf6696db4a508
refs/heads/master
2021-09-05T00:27:42.740029
2018-01-23T03:58:25
2018-01-23T03:58:25
118,547,068
0
0
null
null
null
null
UTF-8
Python
false
false
948
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Users', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('user_id', models.BigIntegerField()), ('name', models.CharField(max_length=255, verbose_name='\u0418\u043c\u044f')), ('surname', models.CharField(max_length=255, verbose_name='\u0424\u0430\u043c\u0438\u043b\u0438\u044f', blank=True)), ('start_date', models.DateTimeField(auto_now_add=True)), ('hashmd5', models.CharField(max_length=255, verbose_name='\u0425\u0435\u0448')), ], options={ }, bases=(models.Model,), ), ]
[ "press.83@list.ru" ]
press.83@list.ru
9217751689c20a44cbffa776fd1f9c8aabb36593
5a396f14b3689273aaf1a6e20dcb0853d78a9f04
/GetSharedWithDomainTeamDriveACLs.py
0c114d9b0daa023ea4ef045d01a424197485f1cf
[]
no_license
NosIreland/GAM-Scripts3
642b4dd827189352afd8357a41b576d6acf159bc
de3ee3007e6906c5b6d28fef8aea27827646db00
refs/heads/master
2023-03-04T21:58:44.594405
2021-02-18T14:39:20
2021-02-18T14:39:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,744
py
#!/usr/bin/env python3 """ # Purpose: For a Google Drive User(s), delete all drive file ACLs for Team Drive files shared with a list of specified domains # Note: This script requires Advanced GAM: # https://github.com/taers232c/GAMADV-XTD3 # Customize: Set DOMAIN_LIST and DESIRED_ALLOWFILEDISCOVERY # Python: Use python or python3 below as appropriate to your system; verify that you have version 3 # $ python -V or python3 -V # Python 3.x.y # Usage: # For all Team Drives, start at step 1; For Team Drives selected by user/group/OU, start at step 7 # All Team Drives # 1: Get all Team Drives. # $ gam redirect csv ./TeamDrives.csv print teamdrives fields id,name # 2: Get ACLs for all Team Drives # $ gam redirect csv ./TeamDriveACLs.csv multiprocess csv TeamDrives.csv gam print drivefileacls ~id fields emailaddress,role,type # 3: Customize GetTeamDriveOrganizers.py for this task: # Set DOMAIN_LIST as required # Set ONE_ORGANIZER = True # Set SHOW_GROUP_ORGANIZERS = False # Set SHOW_USER_ORGANIZERS = True # 4: From that list of ACLs, output a CSV file with headers "id,name,organizers" # that shows the organizers for each Team Drive # $ python3 GetTeamDriveOrganizers.py TeamDriveACLs.csv TeamDrives.csv TeamDriveOrganizers.csv # 5: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line # DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em # DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em # DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em # $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDriveOrganizers.csv gam user ~organizers print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em # 6: Go to step 11 # Selected Team Drives # 7: If want Team Drives for a specific set of organizers, replace <UserTypeEntity> with your user selection in the command below # $ gam redirect csv ./AllTeamDrives.csv <UserTypeEntity> print teamdrives role organizer fields id,name # 8: Customize DeleteDuplicateRows.py for this task: # Set ID_FIELD = 'id' # 9: Delete duplicate Team Drives (some may have multiple organizers). # $ python3 DeleteDuplicateRows.py ./AllTeamDrives.csv ./TeamDrives.csv # 10: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line # DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em # DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em # DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em # $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDrives.csv gam user ~User print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em # Common code # 11: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,permissionId,role,domain,allowFileDiscovery" # that lists the driveFileIds and permissionIds for all ACLs shared with the selected domains. # (n.b., driveFileTitle, role, domain and allowFileDiscovery are not used in the next step, they are included for documentation purposes) # $ python3 GetSharedWithDomainTeamDriveACLs.py filelistperms.csv deleteperms.csv # 12: Inspect deleteperms.csv, verify that it makes sense and then proceed # 13: Delete the ACLs # $ gam csv deleteperms.csv gam user "~Owner" delete drivefileacl "~driveFileId" "~permissionId" """ import csv import re import sys FILE_NAME = 'name' ALT_FILE_NAME = 'title' # If you want to limit finding ACLS for a specific list of domains, use the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',] DOMAIN_LIST = [] # Specify desired value of allowFileDiscovery field: True, False, Any (matches True and False) DESIRED_ALLOWFILEDISCOVERY = 'Any' QUOTE_CHAR = '"' # Adjust as needed LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n' PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type") if (len(sys.argv) > 2) and (sys.argv[2] != '-'): outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='') else: outputFile = sys.stdout outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'permissionId', 'role', 'domain', 'allowFileDiscovery'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR) outputCSV.writeheader() if (len(sys.argv) > 1) and (sys.argv[1] != '-'): inputFile = open(sys.argv[1], 'r', encoding='utf-8') else: inputFile = sys.stdin for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR): for k, v in iter(row.items()): mg = PERMISSIONS_N_TYPE.match(k) if mg and v == 'domain': permissions_N = mg.group(1) domain = row[f'permissions.{permissions_N}.domain'] allowFileDiscovery = row.get(f'permissions.{permissions_N}.allowFileDiscovery', str(row.get(f'permissions.{permissions_N}.withLink') == 'False')) if (not DOMAIN_LIST or domain in DOMAIN_LIST) and (DESIRED_ALLOWFILEDISCOVERY in ('Any', allowFileDiscovery)): outputCSV.writerow({'Owner': row['Owner'], 'driveFileId': row['id'], 'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')), 'permissionId': f'id:{row[f"permissions.{permissions_N}.id"]}', 'role': row[f'permissions.{permissions_N}.role'], 'domain': domain, 'allowFileDiscovery': allowFileDiscovery}) if inputFile != sys.stdin: inputFile.close() if outputFile != sys.stdout: outputFile.close()
[ "ross.scroggs@gmail.com" ]
ross.scroggs@gmail.com
24bce9adfd9986c448487e74e16658ad17c265dd
786de89be635eb21295070a6a3452f3a7fe6712c
/poster/tags/V00-00-01/SConscript
b190c174d8a3bdc4f9abefb1be153557a06627e1
[]
no_license
connectthefuture/psdmrepo
85267cfe8d54564f99e17035efe931077c8f7a37
f32870a987a7493e7bf0f0a5c1712a5a030ef199
refs/heads/master
2021-01-13T03:26:35.494026
2015-09-03T22:22:11
2015-09-03T22:22:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,985
#------------------------------------------------------------------------ # File and Version Information: # $Id$ # # Description: # SConscript file for package poster #------------------------------------------------------------------------ # Do not delete following line, it must be present in # SConscript file for any SIT project Import('*') import os from SConsTools.standardExternalPackage import standardExternalPackage # # For the standard external packages which contain includes, libraries, # and applications it is usually sufficient to call standardExternalPackage() # giving few keyword arguments. Here is a complete list of arguments: # # PREFIX - top directory of the external package # INCDIR - include directory, absolute or relative to PREFIX # INCLUDES - include files to copy (space-separated list of patterns) # PYDIR - Python src directory, absolute or relative to PREFIX # LINKPY - Python files to link (patterns), or all files if not present # PYDIRSEP - if present and evaluates to True installs python code to a # separate directory arch/$SIT_ARCH/python/<package> # LIBDIR - libraries directory, absolute or relative to PREFIX # COPYLIBS - library names to copy # LINKLIBS - library names to link, or all libs if LINKLIBS and COPYLIBS are empty # BINDIR - binaries directory, absolute or relative to PREFIX # LINKBINS - binary names to link, or all binaries if not present # PKGLIBS - names of libraries that have to be linked for this package # DEPS - names of other packages that we depend upon # PKGINFO - package information, such as RPM package name # here is an example setting up a fictional package pkg = "poster" pkg_ver = "0.8.1" PREFIX = os.path.join('$SIT_EXTERNAL_SW', pkg, pkg_ver) PYDIR = os.path.join("lib", '$PYTHON', "site-packages", pkg) PYDIRSEP = True PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found') standardExternalPackage ( pkg, **locals() )
[ "salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7" ]
salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
933b59e302f98e982ead78cbe8328132cfbe6402
6f04a6ef99c581ed2f0519c897f254a7b63fb61d
/rastervision/utils/zxy2geotiff.py
80210424cc55d2fa376a4eef16bfa35762587c46
[ "LicenseRef-scancode-generic-cla", "Apache-2.0" ]
permissive
dgketchum/raster-vision
18030c9a8bfe99386aa95adbf8e3ec51d204947f
fe74bef30daa5821023946576b00c584ddc56de8
refs/heads/master
2020-08-30T13:56:08.598240
2019-11-03T17:38:33
2019-11-03T17:38:33
218,400,435
3
1
NOASSERTION
2019-10-29T23:09:57
2019-10-29T23:09:57
null
UTF-8
Python
false
false
7,481
py
import tempfile from PIL import Image import numpy as np import click import mercantile import rasterio from rasterio.windows import Window import pyproj from rastervision.utils.files import (download_if_needed, get_local_path, upload_or_copy) from rastervision.command.aux.cogify_command import create_cog def lnglat2merc(lng, lat): """Convert lng, lat point to x/y Web Mercator tuple.""" return pyproj.transform( pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'), lng, lat) def merc2lnglat(x, y): """Convert x, y Web Mercator point to lng/lat tuple.""" return pyproj.transform( pyproj.Proj(init='epsg:3857'), pyproj.Proj(init='epsg:4326'), x, y) def merc2pixel(tile_x, tile_y, zoom, merc_x, merc_y, tile_sz=256): """Convert Web Mercator point to pixel coordinates. This is within the coordinate frame of a single ZXY tile. Args: tile_x: (int) x coordinate of ZXY tile tile_y: (int) y coordinate of ZXY tile zoom: (int) zoom level of ZXY tile merc_x: (float) Web Mercator x axis of point merc_y: (float) Web Mercator y axis of point tile_sz: (int) size of ZXY tile """ tile_merc_bounds = mercantile.xy_bounds(tile_x, tile_y, zoom) pix_y = int( round(tile_sz * ((tile_merc_bounds.top - merc_y) / (tile_merc_bounds.top - tile_merc_bounds.bottom)))) pix_x = int( round(tile_sz * ((merc_x - tile_merc_bounds.left) / (tile_merc_bounds.right - tile_merc_bounds.left)))) return (pix_x, pix_y) def _zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog=False): """Generates a GeoTIFF of a bounded region from a ZXY tile server. Args: tile_schema: (str) the URI schema for zxy tiles (ie. a slippy map tile server) of the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles are assumed to be indexed using TMS coordinates, where the y axis starts at the southernmost point. The URI can be for http, S3, or the local file system. zoom: (int) the zoom level to use when retrieving tiles bounds: (list) a list of length 4 containing min_lat, min_lng, max_lat, max_lng output_uri: (str) where to save the GeoTIFF. The URI can be for http, S3, or the local file system """ min_lat, min_lng, max_lat, max_lng = bounds if min_lat >= max_lat: raise ValueError('min_lat must be < max_lat') if min_lng >= max_lng: raise ValueError('min_lng must be < max_lng') is_tms = False if '{-y}' in tile_schema: tile_schema = tile_schema.replace('{-y}', '{y}') is_tms = True tmp_dir_obj = tempfile.TemporaryDirectory() tmp_dir = tmp_dir_obj.name # Get range of tiles that cover bounds. output_path = get_local_path(output_uri, tmp_dir) tile_sz = 256 t = mercantile.tile(min_lng, max_lat, zoom) xmin, ymin = t.x, t.y t = mercantile.tile(max_lng, min_lat, zoom) xmax, ymax = t.x, t.y # The supplied bounds are contained within the "tile bounds" -- ie. the # bounds of the set of tiles that covers the supplied bounds. Therefore, # we need to crop out the imagery that lies within the supplied bounds. # We do this by computing a top, bottom, left, and right offset in pixel # units of the supplied bounds against the tile bounds. Getting the offsets # in pixel units involves converting lng/lat to web mercator units since we # assume that is the CRS of the tiles. These offsets are then used to crop # individual tiles and place them correctly into the output raster. nw_merc_x, nw_merc_y = lnglat2merc(min_lng, max_lat) left_pix_offset, top_pix_offset = merc2pixel(xmin, ymin, zoom, nw_merc_x, nw_merc_y) se_merc_x, se_merc_y = lnglat2merc(max_lng, min_lat) se_left_pix_offset, se_top_pix_offset = merc2pixel(xmax, ymax, zoom, se_merc_x, se_merc_y) right_pix_offset = tile_sz - se_left_pix_offset bottom_pix_offset = tile_sz - se_top_pix_offset uncropped_height = tile_sz * (ymax - ymin + 1) uncropped_width = tile_sz * (xmax - xmin + 1) height = uncropped_height - top_pix_offset - bottom_pix_offset width = uncropped_width - left_pix_offset - right_pix_offset transform = rasterio.transform.from_bounds(nw_merc_x, se_merc_y, se_merc_x, nw_merc_y, width, height) with rasterio.open( output_path, 'w', driver='GTiff', height=height, width=width, count=3, crs='epsg:3857', transform=transform, dtype=rasterio.uint8) as dataset: out_x = 0 for xi, x in enumerate(range(xmin, xmax + 1)): tile_xmin, tile_xmax = 0, tile_sz - 1 if x == xmin: tile_xmin += left_pix_offset if x == xmax: tile_xmax -= right_pix_offset window_width = tile_xmax - tile_xmin + 1 out_y = 0 for yi, y in enumerate(range(ymin, ymax + 1)): tile_ymin, tile_ymax = 0, tile_sz - 1 if y == ymin: tile_ymin += top_pix_offset if y == ymax: tile_ymax -= bottom_pix_offset window_height = tile_ymax - tile_ymin + 1 # Convert from xyz to tms if needed. # https://gist.github.com/tmcw/4954720 if is_tms: y = (2**zoom) - y - 1 tile_uri = tile_schema.format(x=x, y=y, z=zoom) tile_path = download_if_needed(tile_uri, tmp_dir) img = np.array(Image.open(tile_path)) img = img[tile_ymin:tile_ymax + 1, tile_xmin:tile_xmax + 1, :] window = Window(out_x, out_y, window_width, window_height) dataset.write( np.transpose(img[:, :, 0:3], (2, 0, 1)), window=window) out_y += window_height out_x += window_width if make_cog: create_cog(output_path, output_uri, tmp_dir) else: upload_or_copy(output_path, output_uri) @click.command() @click.argument('tile_schema') @click.argument('zoom') @click.argument('bounds') @click.argument('output_uri') @click.option('--make-cog', is_flag=True, default=False) def zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog): """Generates a GeoTIFF of a bounded region from a ZXY tile server. TILE_SCHEMA: the URI schema for zxy tiles (ie. a slippy map tile server) of the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles are assumed to be indexed using TMS coordinates, where the y axis starts at the southernmost point. The URI can be for http, S3, or the local file system. ZOOM: the zoom level to use when retrieving tiles BOUNDS: a space-separated string containing min_lat, min_lng, max_lat, max_lng OUTPUT_URI: where to save the GeoTIFF. The URI can be for http, S3, or the local file system. """ bounds = [float(x) for x in bounds.split(' ')] _zxy2geotiff(tile_schema, int(zoom), bounds, output_uri, make_cog=make_cog) if __name__ == '__main__': zxy2geotiff()
[ "lewfish@gmail.com" ]
lewfish@gmail.com
c276d6fc997d227971b391f5b9a6cc0221128807
78712b3b864eb3d6b78308c7760da9bdd8c51c9d
/skillMatch/migrations/0011_auto_20190328_0200.py
a1bb1f4a4d156d5760104122793bf78f83fe3faa
[]
no_license
UeharaHua/SkillMatch
b36071d79a15a43d06970671f3a15fcbf2678263
e4f9445ec9c637266c85e8b061563728a9fd20bc
refs/heads/master
2022-12-14T09:24:06.075625
2019-09-29T05:51:58
2019-09-29T05:51:58
211,557,817
0
0
null
2022-11-22T02:58:05
2019-09-28T20:28:27
CSS
UTF-8
Python
false
false
498
py
# Generated by Django 2.1.5 on 2019-03-28 06:00 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('skillMatch', '0010_auto_20190325_1343'), ] operations = [ migrations.AlterField( model_name='post', name='course', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='skillMatch.Class'), ), ]
[ "hlu8jm@virginia.edu" ]
hlu8jm@virginia.edu
dc5f47e41dd896ee44f05aa76d5189db027ffe70
d2c4151eff768af64946ababc2e41c13d8973cd3
/ABC146/a.py
c10b5d53d83522345cefe135c52ff627ef03099c
[]
no_license
Intel-out-side/AtCoder
2de19b71981247135432aed2d6d9c2a16c3ab7f0
0c419d2df15fff02032432cb1b1323612484e16e
refs/heads/master
2022-06-23T04:21:12.886072
2022-06-13T14:39:07
2022-06-13T14:39:07
235,240,853
0
0
null
null
null
null
UTF-8
Python
false
false
181
py
week = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"] day = str(input()) if day == "SUN": result = 7 else: result = week.index("SUN") - week.index(day) print(result)
[ "so.eng.eng.1rou@gmail.com" ]
so.eng.eng.1rou@gmail.com
374ed2b88911029a8ab121c1aa784973710303df
f1611fcb4029e59f9b1a16c88b4e7805659b952b
/entity extraction from speech/populate_form.py
d7cf9e30c1a8cba303b8a369d36675b576918a56
[]
no_license
syntaxterrorr/Insurance-Fraud-Detection
c4505490ae14fdd0fefe3a9726ac82d5c47d5910
4989ef0cbde47b463292c4a1cfaa073f2e2bf919
refs/heads/master
2020-04-26T08:28:01.513387
2019-03-04T05:28:58
2019-03-04T05:28:58
173,423,685
1
0
null
null
null
null
UTF-8
Python
false
false
1,692
py
from rasa_nlu.model import Interpreter import json import speech_recognition as sr r = sr.Recognizer() speech = sr.AudioFile('aamir.wav') with speech as source: audio = r.record(source) text = r.recognize_google(audio) # text = u"I am calling to claim insurance for my Honda Civic that suffered an accident two days ago when it came in the way of a multi vehical collision." # This is a call to claim insurance for a [Honda](brand) [Accord](model) from [Karnataka](state). My [Son](relation) was driving his car on the 16th of february this year when he suffered a [front](collision) collision which resulted in [major](severity) loss. We called the [fire](authorities) fighters. It was in the [night](time_of_day). It was a [multi vehical](incident_type) crash. interpreter = Interpreter.load('models/current/nlu') result = interpreter.parse(text) # print(json.dumps(result)) entities = {} incident_hour = {'morning': 9, 'afternoon': 2, 'evening': 6, 'night': 10} relative = {'child':'own-child', 'son':'own-child', 'daughter':'own-child', 'husband':'husband', 'wife':'wife', 'relative':'other-relative'} for entity in result['entities']: if entity['confidence'] < 0.35: continue extractor = entity['extractor'] if extractor == 'ner_crf': key = entity['entity'] value = entity['value'] if key == 'time_of_day': entities['incident_hour'] = incident_hour[value] elif key == 'relation': entities[key] = relative[value] elif key == 'collision': entities[key] = value.capitalize() + ' Collision' else: entities[key] = value.capitalize() print(result['entities'])
[ "aamir@darukhanawalla.com" ]
aamir@darukhanawalla.com
29c49f467208a47e78ae1044871005b417e2ddde
955f9dc96abddd7593f9e68fef48c529512d34c4
/vote/urls.py
b6b85d7f0fc869083a4016272c06e27bbb2b3797
[]
no_license
junsik-Lim/board_test
0443409941b0b58bd44602d0658cc485de82cccd
aae90b21dc31c3b77824d0e21b3810467fcacc2f
refs/heads/main
2023-08-29T11:57:13.344283
2021-10-03T04:57:28
2021-10-03T04:57:28
412,974,858
0
0
null
null
null
null
UTF-8
Python
false
false
283
py
from django.urls import path from . import views app_name = "vote" urlpatterns = [ path('', views.index, name="index"), path('create/', views.create, name="create"), path('detail/<num>', views.detail, name="detail"), path('vote/<conid>', views.vote, name="vote"), ]
[ "bluejun0321@naver.com" ]
bluejun0321@naver.com
e29d4d92cca7b4533c0b30e395a0f3b88edea30c
1cd9ea9920326561830ea31dde3c5507670a4dd8
/check_sudoku.py
ec9297f2f43443d4d76c2ad51d61de606be7c1c5
[]
no_license
iamtraction/CS101-Udacity
d0b34174e088ccb8c6b6923a6afb6a151ae1bdc7
9842335bec48c45ee9f5e84d5ab064b7d8e2bcb3
refs/heads/master
2021-06-20T10:26:49.538644
2017-07-30T15:04:08
2017-07-30T15:04:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,459
py
# THREE GOLD STARS # Sudoku [http://en.wikipedia.org/wiki/Sudoku] # is a logic puzzle where a game # is defined by a partially filled # 9 x 9 square of digits where each square # contains one of the digits 1,2,3,4,5,6,7,8,9. # For this question we will generalize # and simplify the game. # Define a procedure, check_sudoku, # that takes as input a square list # of lists representing an n x n # sudoku puzzle solution and returns the boolean # True if the input is a valid # sudoku square and returns the boolean False # otherwise. # A valid sudoku square satisfies these # two properties: # 1. Each column of the square contains # each of the whole numbers from 1 to n exactly once. # 2. Each row of the square contains each # of the whole numbers from 1 to n exactly once. # You may assume the the input is square and contains at # least one row and column. correct = [[1, 2, 3], [2, 3, 1], [3, 1, 2]] incorrect = [[1, 2, 3, 4], [2, 3, 1, 3], [3, 1, 2, 3], [4, 4, 4, 4]] incorrect2 = [[1, 2, 3, 4], [2, 3, 1, 4], [4, 1, 2, 3], [3, 4, 1, 2]] incorrect3 = [[1, 2, 3, 4, 5], [2, 3, 1, 5, 6], [4, 5, 2, 1, 3], [3, 4, 5, 2, 1], [5, 6, 4, 3, 2]] incorrect4 = [['a', 'b', 'c'], ['b', 'c', 'a'], ['c', 'a', 'b']] incorrect5 = [[1, 1.5], [1.5, 1]] def check_sudoku(row): """Checks if the given square list of lists representing an n x n sudoku puzzle solution is a valid sudoku solution.""" length = len(row) digit = 1 while digit <= length: i = 0 while i < length: # Go through each row and column row_count = 0 col_count = 0 j = 0 while j < length: # For each entry in the ith row/column if row[i][j] == digit: row_count += 1 if row[j][i] == digit: col_count += 1 j += 1 if row_count != 1 or col_count != 1: return False i += 1 digit += 1 return True print check_sudoku(incorrect) #>>> False print check_sudoku(correct) #>>> True print check_sudoku(incorrect2) #>>> False print check_sudoku(incorrect3) #>>> False print check_sudoku(incorrect4) #>>> False print check_sudoku(incorrect5) #>>> False
[ "snkrsn.kampa@gmail.com" ]
snkrsn.kampa@gmail.com
8916c15193078890d423b88049af7ba61c106438
00d5a9202f3dde90b760dfef63932f862d8e1b52
/nsx.py
bea933d58b83a04e2357d640b48d57171f4131f7
[]
no_license
sariego/blender-simulation-visualizer
2cc021d47f19e205d04ba9a650e7330375ff0c5c
b82ee509fee439b7d6a7e895ef98377d066a84ab
refs/heads/master
2021-05-27T23:19:14.350497
2014-11-05T02:02:11
2014-11-05T02:02:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,128
py
from t3dclasses import * from spagheti import * from itehgaps import * root = Tk() root.resizable(0,0) root.title('Visor de trayectoria de particulas') root.iconbitmap('xyz.ico') note = Notebook(root) note.pack() ##first = Frame(root) ##directorio = DirectoryGetter(first) ##frames = FramesGetter(first) ##limits = LimitsGetter(first) ##Separator(first).pack(fill=X) ##particles = ParticlesGetter(first,directorio) ##ExAndGo(first, directorio, particles, frames) fideos = Frame(root) directorio_f = DirectoryGetter(fideos) frames_f = FramesGetter(fideos) detalles = SpaDetails(fideos) show = Show(fideos, directorio_f, detalles) Separator(fideos).pack(fill=X) Go(fideos, directorio_f, frames_f, detalles, show) soedif = Frame(root) directorio_s = DirectoryGetter(soedif) frames_s = FramesGetter(soedif) detalles_s = SpaDetails(soedif) show_s = Show(soedif, directorio_s, detalles_s) Separator(soedif).pack(fill=X) Og(soedif, directorio_s, frames_s, detalles_s, show_s) #status = StatusBar(root) #note.add(first, text='Trayectoria 3D') note.add(fideos, text='Spagheti') note.add(soedif, text='itehgapS') root.mainloop()
[ "pedrosariego@gmail.com" ]
pedrosariego@gmail.com
ce6adbff5e4323bd42d9c2f2edf80e4909dc4551
177fd37d63ab37aa66b5054f74a018aa788a6183
/trips-backend/besttrips/wsgi.py
b4e04dbbfbc8bb7a769961f2d2cf3656db4bec99
[ "MIT" ]
permissive
pgarr/best-trips
504475adfb3c72d558c5c4d036a8eb447296cf64
edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310
refs/heads/main
2023-03-29T01:32:03.285668
2021-04-13T17:19:33
2021-04-13T17:19:33
342,875,208
0
0
null
null
null
null
UTF-8
Python
false
false
395
py
""" WSGI config for besttrips project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'besttrips.settings') application = get_wsgi_application()
[ "garlej.p@gmail.com" ]
garlej.p@gmail.com
0a3ca56c977088ca6e0697402dafb8661ab78d31
d08d1d0fd863e3121e27080ac5892bd39f0b11b8
/vlan-fabric/python/vlan_fabric/tenant.py
83582986a258d51be34d84e463ae50a8caff55a5
[ "MIT" ]
permissive
rrojasc/sandbox-nso
be4211dbedc3d87d8830616db593dac71c051e75
b44dce57904b916a570d0fe272c64cfe1f4c7575
refs/heads/master
2023-03-16T13:04:25.013628
2019-12-16T13:26:12
2019-12-16T13:26:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
20,403
py
# -*- mode: python; python-indent: 4 -*- import ncs from ncs.application import Service # import resource_manager.id_allocator as id_allocator import ipaddress # ------------------------ # SERVICE CALLBACK EXAMPLE # ------------------------ class ServiceCallbacks(Service): # The create() callback is invoked inside NCS FASTMAP and # must always exist. @Service.create def cb_create(self, tctx, root, service, proplist): self.log.info("Service create(service=", service._path, ")") vars = ncs.template.Variables() vars.add("DUMMY", "127.0.0.1") template = ncs.template.Template(service) disable_trunk_negotiation = False # PHASE - Get Fabric Member Devices # primary_ip_address = root.devices.device[pair.primary].config.interface.mgmt["0"].ip.address.ipaddr switch_pairs = root.vlan_fabric[service.fabric].switch_pair switches = root.vlan_fabric[service.fabric].switch # Initialize with NONE border_pair = None self.log.info("Switches for Fabric {} are: ".format(service.fabric)) for pair in switch_pairs: self.log.info( "Pair: {} Primary {} Secondary {}".format( pair.name, pair.primary, pair.secondary ) ) if pair.layer3: border_pair = pair self.log.info( "Layer 3 Switch Pair is {} Primary {} Secondary {}".format( pair.name, pair.primary, pair.secondary ) ) self.log.info( "Switches for Fabric {} are {}".format(service.fabric, switches.__dict__) ) for switch in switches: self.log.info("Switch: {}".format(switch.device)) # PHASE - Get Fabric Interconnect Resources for Fabric fabric_interconnects = root.vlan_fabric[service.fabric].fabric_interconnect self.log.info("Fabric Interconnects for Fabric {} are:".format(service.fabric)) for fabric_interconnect in fabric_interconnects: self.log.info("FI: {}".format(fabric_interconnect.device)) # PHASE - Get VMwar DVS Resources for Fabric vswitches = root.vlan_fabric[service.fabric].vmware_dvs self.log.info( "VMware Distributed vSwitches for Fabric {} are:".format(service.fabric) ) for vswitch in vswitches: self.log.info( "vCenter {} Datacenter {} dVS {}".format( vswitch.vcenter, vswitch.datacenter, vswitch.dvs ) ) # PHASE - Configure Static Routes if configured if border_pair: routing_vars = ncs.template.Variables() routing_vars.add("VRFNAME", service.name) # PHASE - Static routes for route in service.static_routes: # self.log.info("Setting up static route for {} to {} in VRF {}".format(route.network, route.gateway, service.name)) routing_vars.add("STATIC_ROUTE_NETWORK", route.network) routing_vars.add("STATIC_ROUTE_GATEWAY", route.gateway) # PRIMARY self.log.info( "Setting up static route for {} to {} in VRF {} on switch_pair {} Primary Device {}".format( route.network, route.gateway, service.name, border_pair, border_pair.primary, ) ) routing_vars.add("DEVICE_NAME", border_pair.primary) self.log.info("routing_vars={}".format(routing_vars)) template.apply("vrf-static-routes", routing_vars) # Secondary if border_pair.secondary: self.log.info( "Setting up static route for {} to {} in VRF {} on switch_pair {} Primary Device {}".format( route.network, route.gateway, service.name, border_pair, border_pair.secondary, ) ) routing_vars.add("DEVICE_NAME", border_pair.secondary) template.apply("vrf-static-routes", routing_vars) else: self.log.info( "Note: Fabric {} has NO Layer 3 Border Pair.".format(service.fabric) ) # PHASE Process Each Network in Service for network in service.network: # PHASE - Add VLANS to all Fabric Switches self.log.info( "Adding VLAN {} for Network {}".format(network.name, network.vlanid) ) network_vars = ncs.template.Variables() network_vars.add("VLAN_ID", network.vlanid) network_vars.add("VLAN_NAME", network.name) for pair in switch_pairs: self.log.info( "Adding VLAN for Pair: {} Primary {} Secondary {}".format( pair.name, pair.primary, pair.secondary ) ) # PRIMARY network_vars.add("DEVICE_NAME", pair.primary) template.apply("vlan-new", network_vars) if pair.secondary: # Secondary network_vars.add("DEVICE_NAME", pair.secondary) template.apply("vlan-new", network_vars) for switch in switches: self.log.info("Adding VLAN for Switch: {}".format(switch.device)) network_vars.add("DEVICE_NAME", switch.device) template.apply("vlan-new", network_vars) # PHASE - Configure Layer 3 For Network # Check if layer3-on-fabric is configured for network if network.layer3_on_fabric: self.log.info( "Configuring Layer 3 for {} IP Network {} ".format( network.name, network.network ) ) ipnet = ipaddress.ip_network(network.network) hsrp_ipv4 = ipnet.network_address + 1 primary_ipv4 = ipnet.network_address + 2 secondary_ipv4 = ipnet.network_address + 3 network_vars.add("VRFNAME", service.name) network_vars.add("HSRP_GROUP", 1) network_vars.add("HSRP_IPV4", hsrp_ipv4) if network.build_route_neighbors: network_vars.add("BUILD_ROUTING_NEIGHBOR", "True") else: network_vars.add("BUILD_ROUTING_NEIGHBOR", "") # PRIMARY network_vars.add("DEVICE_NAME", border_pair.primary) network_vars.add( "SVI_IPV4", "{}/{}".format(primary_ipv4, ipnet.prefixlen) ) network_vars.add("HSRP_PRIORITY", 110) template.apply("vlan-layer3", network_vars) if network.dhcp_relay_address: network_vars.add("DHCP_RELAY_ADDRESS", network.dhcp_relay_address) self.log.info("Configuring DHCP Relay address {} for {} IP Network {} ".format( network.dhcp_relay_address, network.name, network.network ) ) template.apply("vlan-layer3-dhcp-relay", network_vars) if border_pair.secondary: # Secondary network_vars.add("DEVICE_NAME", border_pair.secondary) network_vars.add( "SVI_IPV4", "{}/{}".format(secondary_ipv4, ipnet.prefixlen) ) network_vars.add("HSRP_PRIORITY", 90) template.apply("vlan-layer3", network_vars) if network.dhcp_relay_address: network_vars.add("DHCP_RELAY_ADDRESS", network.dhcp_relay_address) self.log.info("Configuring DHCP Relay address {} for {} IP Network {} ".format( network.dhcp_relay_address, network.name, network.network ) ) template.apply("vlan-layer3-dhcp-relay", network_vars) else: self.log.info( "Skipping Layer 3 configuration in fabric for {} IP Network {} ".format( network.name, network.network ) ) # PHASE Process Connections for Network # PHASE Switch Connections for switch in network.connections.switch: self.log.info( "Adding Connections for Network {} on Switch {}".format( network.name, switch.device ) ) network_vars.add("DEVICE_NAME", switch.device) switch_platform = {} switch_platform["name"] = root.devices.device[ switch.device ].platform.name switch_platform["version"] = root.devices.device[ switch.device ].platform.version switch_platform["model"] = root.devices.device[ switch.device ].platform.model self.log.info("Switch Platform Info: {}".format(switch_platform)) # For old IOS that supported DTP, need to disable negotiation if ( switch_platform["model"] != "NETSIM" and switch_platform["name"] == "ios" and int(switch_platform["version"][0:2]) < 16 ): disable_trunk_negotiation = True else: disable_trunk_negotiation = False network_vars.add("DISABLE_TRUNK_NEGOTIATION", disable_trunk_negotiation) network_vars.add("MTU_SIZE", "9216") # PHASE Interfaces for interface in switch.interface: self.log.info( "Configuring Intereface {} for Network {} on Switch {}".format( interface.interface, network.name, switch.device ) ) network_vars.add("INTERFACE_ID", interface.interface) network_vars.add("DESCRIPTION", interface.description) network_vars.add("MODE", interface.mode) network_vars.add("MTU_SIZE", "9216") self.log.info("network_vars=", network_vars) template.apply("tenant_network_interface", network_vars) # PHASE Port-Channels for port_channel in switch.port_channel: self.log.info( "Configuring PortChannel {} for Network {} on Switch {}".format( port_channel.portchannel_id, network.name, switch.device ) ) network_vars.add("PORTCHANNEL_ID", port_channel.portchannel_id) network_vars.add("DESCRIPTION", port_channel.description) network_vars.add("MODE", port_channel.mode) network_vars.add("VPC", "") self.log.info("network_vars=", network_vars) template.apply("portchannel-interface", network_vars) # PHASE Port-Channel Member Interfaces for interface in port_channel.interface: self.log.info( "Adding Interface {} to Port-Channel {} on Network {} on Switch {}.".format( interface.interface, port_channel.portchannel_id, network.name, switch.device, ) ) network_vars.add("INTERFACE_ID", interface.interface) self.log.info("network_vars=", network_vars) template.apply("portchannel-member-interface", network_vars) # PHASE Switch Pair connections for pair in network.connections.switch_pair: self.log.info( "Adding Connections for Network {} on Switch Pair {}".format( network.name, pair.name ) ) # Lookup Pair from Fabric # switch_pairs = root.vlan_fabric[service.fabric].switch_pair this_pair = root.vlan_fabric[service.fabric].switch_pair[pair.name] self.log.info( "Primary {} Secondary {}".format( this_pair.primary, this_pair.secondary ) ) # Nexus Leaf Pairs Always False disable_trunk_negotiation = False network_vars.add("DISABLE_TRUNK_NEGOTIATION", disable_trunk_negotiation) # PHASE Interfaces for interface in pair.interface: self.log.info( "Configuring Intereface {} for Network {} on Pair {}".format( interface.interface, network.name, this_pair.name ) ) network_vars.add("INTERFACE_ID", interface.interface) network_vars.add("DESCRIPTION", interface.description) network_vars.add("MODE", interface.mode) network_vars.add("MTU_SIZE", "9216") # Primary network_vars.add("DEVICE_NAME", this_pair.primary) self.log.info("network_vars=", network_vars) template.apply("tenant_network_interface", network_vars) if this_pair.secondary: # Secondary network_vars.add("DEVICE_NAME", this_pair.secondary) self.log.info("network_vars=", network_vars) template.apply("tenant_network_interface", network_vars) # PHASE Port-Channels for port_channel in pair.port_channel: self.log.info( "Configuring Port-Channel {} for Network {} on Pair {}".format( port_channel.portchannel_id, network.name, this_pair.name ) ) network_vars.add("PORTCHANNEL_ID", port_channel.portchannel_id) network_vars.add("DESCRIPTION", port_channel.description) network_vars.add("MODE", port_channel.mode) network_vars.add("MTU_SIZE", "9216") network_vars.add("VPC", True) # Primary network_vars.add("DEVICE_NAME", this_pair.primary) self.log.info("network_vars=", network_vars) template.apply("portchannel-interface", network_vars) # Secondary network_vars.add("DEVICE_NAME", this_pair.secondary) self.log.info("network_vars=", network_vars) template.apply("portchannel-interface", network_vars) # PHASE Port-Channel Member Interfaces for interface in port_channel.interface: self.log.info( "Adding Interface {} to Port-Channel {} on Network {} on Pair {}.".format( interface.interface, port_channel.portchannel_id, network.name, this_pair.name, ) ) network_vars.add("INTERFACE_ID", interface.interface) # Primary network_vars.add("DEVICE_NAME", this_pair.primary) self.log.info("network_vars=", network_vars) template.apply("portchannel-member-interface", network_vars) # Secondary network_vars.add("DEVICE_NAME", this_pair.secondary) self.log.info("network_vars=", network_vars) template.apply("portchannel-member-interface", network_vars) # PHASE Fabric Interconnects for fabric_interconnect in fabric_interconnects: self.log.info( "Configuring Network {} on Fabric Interconnect {}".format( network.name, fabric_interconnect.device ) ) ucs_vars = ncs.template.Variables() ucs_vars.add("DEVICE_NAME", fabric_interconnect.device) ucs_vars.add("VLAN_NAME", network.name) ucs_vars.add("VLAN_ID", network.vlanid) # PHASE - Add VLAN to Configuration self.log.info( "Adding VLAN {} ({}) on Fabric Interconnect {}".format( network.name, network.vlanid, fabric_interconnect.device ) ) self.log.info("ucs_vars=", ucs_vars) template.apply("ucs-vlan-setup", ucs_vars) # PHASE - Update vnic-templates for vnic_template in fabric_interconnect.vnic_template_trunks: ucs_vars.add("UCS_ORG", vnic_template.org) ucs_vars.add("UCS_VNIC_TEMPLATE", vnic_template.vnic_template) self.log.info( "Adding VLAN {} ({}) to vnic-template {}/{} on Fabric Interconnect {}".format( network.name, network.vlanid, vnic_template.org, vnic_template.vnic_template, fabric_interconnect.device, ) ) self.log.info("ucs_vars=", ucs_vars) template.apply("ucs-vnic-template-vlan-setup", ucs_vars) # PHASE - VMwar Distributed Virtual Switch for vswitch in vswitches: self.log.info( "Configuring Network {} on DVS: {}/{}/{}".format( network.name, vswitch.vcenter, vswitch.datacenter, vswitch.dvs ) ) dvs_vars = ncs.template.Variables() dvs_vars.add("DEVICE_NAME", vswitch.vcenter) dvs_vars.add("VLAN_NAME", network.name) dvs_vars.add("VLAN_ID", network.vlanid) dvs_vars.add("VMWARE_DATACENTER", vswitch.datacenter) dvs_vars.add("VMWARE_DVS", vswitch.dvs) self.log.info("dvs_vars=", dvs_vars) template.apply("vmware-dvs-portprofile-setup", dvs_vars) # The pre_modification() and post_modification() callbacks are optional, # and are invoked outside FASTMAP. pre_modification() is invoked before # create, update, or delete of the service, as indicated by the enum # ncs_service_operation op parameter. Conversely # post_modification() is invoked after create, update, or delete # of the service. These functions can be useful e.g. for # allocations that should be stored and existing also when the # service instance is removed. # @Service.pre_lock_create # def cb_pre_lock_create(self, tctx, root, service, proplist): # self.log.info('Service plcreate(service=', service._path, ')') # @Service.pre_modification # def cb_pre_modification(self, tctx, op, kp, root, proplist): # self.log.info('Service premod(service=', kp, ')') # @Service.post_modification # def cb_post_modification(self, tctx, op, kp, root, proplist): # self.log.info('Service premod(service=', kp, ')')
[ "hank.preston@gmail.com" ]
hank.preston@gmail.com
1d7c655cada6cc584bc771570befda0a77c18d18
0f0570125ecbbdfda0acaed9d3ec0a297616f79a
/day4/part1.py
d4665159f88161442f8958dc01d42adf0a30d047
[]
no_license
na-wu/2019-aoc
3b3689d13cab22a4200f177fd6e28adada36a303
ad91fb704c00b590dcd89c1f67d9d1738b866fbd
refs/heads/master
2020-09-23T10:28:28.497778
2019-12-24T22:33:46
2019-12-24T22:33:46
225,477,008
0
0
null
null
null
null
UTF-8
Python
false
false
1,097
py
RANGE1 = 124075 RANGE2 = 580770 # Transform @param num into int array # iteratively check if each element has an identical neighbour # Return True immediately if found, presence of more identical neighbors ignored def checkAdjacent(num: int) -> bool: arr = list(map(int, str(num))) for j in range(len(arr) - 1): if arr[j] == arr[j+1]: return True return False # Create 2 copies of @param num, and transform them into int arrays # If sorting one does not change the array structure, # it means they were initially sorted def checkIncreasing(num: int) -> bool: expectedArr = list(map(int, str(num))) arr = list(map(int, str(num))) if sorted(arr) == expectedArr: return True else: return False def foo(): numPossibilities = 0 for i in range(RANGE1, RANGE2): if checkAdjacent(i) and checkIncreasing(i): # If both conditions are satisfied, it is a possible password numPossibilities = numPossibilities + 1 print(numPossibilities) def main(): foo() if __name__ == '__main__': main()
[ "nwu1018@gmail.com" ]
nwu1018@gmail.com
b4f5e476d9ed960c5b7c75ae3306646d0f653c4a
b519ebf4af176a53036373ba7c59552ed8a86505
/modelo/datosVehiculo.py
003af032d1b96ea3042c0e9a1cfb6e9f5e5274d8
[]
no_license
cesarmcuellar/LavaOpita
f88dd260b306977c2bb92a5f95ede61eb1e04a89
70b0efa0b209b265fe93552b13c0fe5fd7c38c9c
refs/heads/main
2022-12-27T01:09:16.940067
2020-10-10T22:25:34
2020-10-10T22:25:34
301,786,583
0
0
null
null
null
null
UTF-8
Python
false
false
428
py
from flask_mysqldb import MySQL class DatosVehiculo(): def __init__(self, mysql): self.mysql=mysql self.cursor=self.mysql.connection.cursor() def consultarPorPlaca(self, placa): consulta="select * from vehiculos where vehPlaca= %s" self.cursor.execute(consulta,(placa,)) resultado = self.cursor.fetchone() self.cursor.close() return resultado
[ "noreply@github.com" ]
cesarmcuellar.noreply@github.com
cf6a0c4833d16887ee9ee3e5afefb8ed33431c13
eacff46eda2c6b509449979a16002b96d4645d8e
/Collections-a-installer/community-general-2.4.0/tests/integration/targets/launchd/files/ansible_test_service.py
87a23fc47d816bb4b2deacd93a3bcfb45fbf1a9f
[ "MIT", "GPL-3.0-only", "GPL-3.0-or-later" ]
permissive
d-amien-b/simple-getwordpress
5e6d4d15d5f87124ab591e46b63fec552998fdc3
da90d515a0aa837b633d50db4d91d22b031c04a2
refs/heads/master
2023-04-08T22:13:37.347545
2021-04-06T09:25:51
2021-04-06T09:25:51
351,698,069
0
0
MIT
2021-03-31T16:16:45
2021-03-26T07:30:00
HTML
UTF-8
Python
false
false
594
py
#!/usr/bin/env python from __future__ import absolute_import, division, print_function __metaclass__ = type import sys if __name__ == '__main__': if sys.version_info[0] >= 3: import http.server import socketserver PORT = int(sys.argv[1]) Handler = http.server.SimpleHTTPRequestHandler httpd = socketserver.TCPServer(("", PORT), Handler) httpd.serve_forever() else: import mimetypes mimetypes.init() mimetypes.add_type('application/json', '.json') import SimpleHTTPServer SimpleHTTPServer.test()
[ "test@burdo.fr" ]
test@burdo.fr
655fc60113486788cda8dd340498234bbc42f5bc
0172c529b9490f18a40d3f068dd04d994fd164a7
/tests/test_util.py
fb6868a824e17ab1acdaa96c7d119a208b4ec171
[]
no_license
tillahoffmann/variational_bayes
c8cbc5859d0a3ce9600d6f4f5551686a4865af47
590903fb4f2c39bd501ec1e42a2b1267439d2ffb
refs/heads/master
2021-01-20T01:22:31.908349
2018-08-21T12:15:19
2018-08-21T12:15:19
89,263,469
1
1
null
2017-08-22T13:47:53
2017-04-24T16:34:01
Python
UTF-8
Python
false
false
1,107
py
import itertools as it import numpy as np import pytest import variational_bayes as vb @pytest.mark.parametrize('shape', [10, (3, 7)]) def test_softmax(shape): x = np.random.normal(0, 1, shape) proba = vb.softmax(x) np.testing.assert_array_less(0, proba) np.testing.assert_allclose(np.sum(proba, axis=-1), 1) @pytest.mark.parametrize('num_blocks, block_size, offset', it.product([1, 3, 7], [1, 5, 9], [0, 11])) def test_pack_unpack_diag_roundtrip(num_blocks, block_size, offset): blocks = np.random.normal(0, 1, (num_blocks, block_size, block_size)) packed = vb.pack_block_diag(blocks, offset) unpacked = vb.unpack_block_diag(packed, block_size, offset) np.testing.assert_allclose(blocks, unpacked) def test_onehot(): z = np.random.randint(0, 5, 100) onehot = vb.onehot(z, 5) np.testing.assert_equal(np.argmax(onehot, 1), z) def test_cluster_order(): z = np.random.randint(0, 5, 100) onehot = vb.onehot(z) order = np.random.permutation(5) np.testing.assert_equal(vb.cluster_order(onehot[:, order], onehot), order)
[ "tillahoffmann@gmail.com" ]
tillahoffmann@gmail.com
6472b56dba93e6e79962129ca72af412695bb634
ae4441d5f79a46dbd99a575bd814842f5cbbc181
/django_azure_linebot/urls.py
f4c5b24b1eab528b630f191685df68c6840a7e6e
[]
no_license
chrissmart/django_azure_linebot
938ee389316f99e897f0767c3fd9b003ae2e2d7b
1c2d88ea8c55e64dedb880dc45c9cd885e6a301e
refs/heads/master
2021-01-01T19:58:04.185177
2017-07-29T12:38:09
2017-07-29T12:38:09
98,731,457
0
0
null
null
null
null
UTF-8
Python
false
false
777
py
"""django_azure_linebot URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), ]
[ "mr.clare007@hotmail.com" ]
mr.clare007@hotmail.com
8f80e791626212567e6315fb22f46ac16819c206
59f2c160c6497ad60c5eb30ba9408942b46aabf1
/smartcab/simulator.py
a6cd662855505b76698d98072e3aca0cd2ba276a
[]
no_license
atrij/ai_cabdriver
bfd11568471e9dc4d5e8e252e11d87526dae09ae
5cbc5e6305ceaf1ff9eac10720de1d2f6a70ba29
refs/heads/master
2021-01-21T15:03:55.952261
2017-06-25T18:04:10
2017-06-25T18:04:10
95,374,182
2
0
null
null
null
null
UTF-8
Python
false
false
25,166
py
########################################### # Suppress matplotlib user warnings # Necessary for newer version of matplotlib import warnings warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") ########################################### import os import time import random import importlib import csv class Simulator(object): """Simulates agents in a dynamic smartcab environment. Uses PyGame to display GUI, if available. """ colors = { 'black' : ( 0, 0, 0), 'white' : (255, 255, 255), 'red' : (255, 0, 0), 'green' : ( 0, 255, 0), 'dgreen' : ( 0, 228, 0), 'blue' : ( 0, 0, 255), 'cyan' : ( 0, 200, 200), 'magenta' : (200, 0, 200), 'yellow' : (255, 255, 0), 'mustard' : (200, 200, 0), 'orange' : (255, 128, 0), 'maroon' : (200, 0, 0), 'crimson' : (128, 0, 0), 'gray' : (155, 155, 155) } def __init__(self, env, size=None, update_delay=2.0, display=True, log_metrics=False, optimized=False): self.env = env self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 2) * self.env.block_size) self.width, self.height = self.size self.road_width = 44 self.bg_color = self.colors['gray'] self.road_color = self.colors['black'] self.line_color = self.colors['mustard'] self.boundary = self.colors['black'] self.stop_color = self.colors['crimson'] self.quit = False self.start_time = None self.current_time = 0.0 self.last_updated = 0.0 self.update_delay = update_delay # duration between each step (in seconds) self.display = display if self.display: try: self.pygame = importlib.import_module('pygame') self.pygame.init() self.screen = self.pygame.display.set_mode(self.size) self._logo = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "logo.png")), (self.road_width, self.road_width)) self._ew = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "east-west.png")), (self.road_width, self.road_width)) self._ns = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "north-south.png")), (self.road_width, self.road_width)) self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1) self.agent_sprite_size = (32, 32) self.primary_agent_sprite_size = (42, 42) self.agent_circle_radius = 20 # radius of circle, when using simple representation for agent in self.env.agent_states: if agent.color == 'white': agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.primary_agent_sprite_size) else: agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size) agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height()) self.font = self.pygame.font.Font(None, 20) self.paused = False except ImportError as e: self.display = False print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e) except Exception as e: self.display = False print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e) # Setup metrics to report self.log_metrics = log_metrics self.optimized = optimized if self.log_metrics: a = self.env.primary_agent # Set log files if a.learning: if self.optimized: # Whether the user is optimizing the parameters and decay functions self.log_filename = os.path.join("logs", "sim_improved-learning.csv") self.table_filename = os.path.join("logs","sim_improved-learning.txt") else: self.log_filename = os.path.join("logs", "sim_default-learning.csv") self.table_filename = os.path.join("logs","sim_default-learning.txt") self.table_file = open(self.table_filename, 'wb') else: self.log_filename = os.path.join("logs", "sim_no-learning.csv") self.log_fields = ['trial', 'testing', 'parameters', 'initial_deadline', 'final_deadline', 'net_reward', 'actions', 'success'] self.log_file = open(self.log_filename, 'wb') self.log_writer = csv.DictWriter(self.log_file, fieldnames=self.log_fields) self.log_writer.writeheader() def run(self, tolerance=0.05, n_test=0): """ Run a simulation of the environment. 'tolerance' is the minimum epsilon necessary to begin testing (if enabled) 'n_test' is the number of testing trials simulated Note that the minimum number of training trials is always 20. """ self.quit = False # Get the primary agent a = self.env.primary_agent total_trials = 1 testing = False trial = 1 while True: # Flip testing switch if not testing: if total_trials > 20: # Must complete minimum 20 training trials if a.learning: if a.epsilon < tolerance: # assumes epsilon decays to 0 testing = True trial = 1 else: testing = True trial = 1 # Break if we've reached the limit of testing trials else: if trial > n_test: break # Pretty print to terminal print print "/-------------------------" if testing: print "| Testing trial {}".format(trial) else: print "| Training trial {}".format(trial) print "\-------------------------" print self.env.reset(testing,trial) self.current_time = 0.0 self.last_updated = 0.0 self.start_time = time.time() while True: try: # Update current time self.current_time = time.time() - self.start_time # Handle GUI events if self.display: for event in self.pygame.event.get(): if event.type == self.pygame.QUIT: self.quit = True elif event.type == self.pygame.KEYDOWN: if event.key == 27: # Esc self.quit = True elif event.unicode == u' ': self.paused = True if self.paused: self.pause() # Update environment if self.current_time - self.last_updated >= self.update_delay: self.env.step() self.last_updated = self.current_time # Render text self.render_text(trial, testing) # Render GUI and sleep if self.display: self.render(trial, testing) self.pygame.time.wait(self.frame_delay) except KeyboardInterrupt: self.quit = True finally: if self.quit or self.env.done: break if self.quit: break # Collect metrics from trial if self.log_metrics: self.log_writer.writerow({ 'trial': trial, 'testing': self.env.trial_data['testing'], 'parameters': self.env.trial_data['parameters'], 'initial_deadline': self.env.trial_data['initial_deadline'], 'final_deadline': self.env.trial_data['final_deadline'], 'net_reward': self.env.trial_data['net_reward'], 'actions': self.env.trial_data['actions'], 'success': self.env.trial_data['success'] }) # Trial finished if self.env.success == True: print "\nTrial Completed!" print "Agent reached the destination." else: print "\nTrial Aborted!" print "Agent did not reach the destination." # Increment total_trials = total_trials + 1 trial = trial + 1 # Clean up if self.log_metrics: if a.learning: f = self.table_file f.write("/-----------------------------------------\n") f.write("| State-action rewards from Q-Learning\n") f.write("\-----------------------------------------\n\n") for state in a.Q: f.write("{}\n".format(state)) for action, reward in a.Q[state].iteritems(): f.write(" -- {} : {:.2f}\n".format(action, reward)) f.write("\n") self.table_file.close() self.log_file.close() print "\nSimulation ended. . . " # Report final metrics if self.display: self.pygame.display.quit() # shut down pygame def render_text(self, trial, testing=False): """ This is the non-GUI render display of the simulation. Simulated trial data will be rendered in the terminal/command prompt. """ status = self.env.step_data if status and status['waypoint'] is not None: # Continuing the trial # Previous State if status['state']: print "Agent previous state: {}".format(status['state']) else: print "!! Agent state not been updated!" # Result if status['violation'] == 0: # Legal if status['waypoint'] == status['action']: # Followed waypoint print "Agent followed the waypoint {}. (rewarded {:.2f})".format(status['action'], status['reward']) elif status['action'] == None: if status['light'] == 'red': # Stuck at red light print "Agent properly idled at a red light. (rewarded {:.2f})".format(status['reward']) else: print "Agent idled at a green light with oncoming traffic. (rewarded {:.2f})".format(status['reward']) else: # Did not follow waypoint print "Agent drove {} instead of {}. (rewarded {:.2f})".format(status['action'], status['waypoint'], status['reward']) else: # Illegal if status['violation'] == 1: # Minor violation print "Agent idled at a green light with no oncoming traffic. (rewarded {:.2f})".format(status['reward']) elif status['violation'] == 2: # Major violation print "Agent attempted driving {} through a red light. (rewarded {:.2f})".format(status['action'], status['reward']) elif status['violation'] == 3: # Minor accident print "Agent attempted driving {} through traffic and cause a minor accident. (rewarded {:.2f})".format(status['action'], status['reward']) elif status['violation'] == 4: # Major accident print "Agent attempted driving {} through a red light with traffic and cause a major accident. (rewarded {:.2f})".format(status['action'], status['reward']) # Time Remaining if self.env.enforce_deadline: time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline']) print "{:.0f}% of time remaining to reach destination.".format(time) else: print "Agent not enforced to meet deadline." # Starting new trial else: a = self.env.primary_agent print "Simulating trial. . . " if a.learning: print "epsilon = {:.4f}; alpha = {:.4f}".format(a.epsilon, a.alpha) else: print "Agent not set to learn." def render(self, trial, testing=False): """ This is the GUI render display of the simulation. Supplementary trial data can be found from render_text. """ # Reset the screen. self.screen.fill(self.bg_color) # Draw elements # * Static elements # Boundary self.pygame.draw.rect(self.screen, self.boundary, ((self.env.bounds[0] - self.env.hang)*self.env.block_size, (self.env.bounds[1]-self.env.hang)*self.env.block_size, (self.env.bounds[2] + self.env.hang/3)*self.env.block_size, (self.env.bounds[3] - 1 + self.env.hang/3)*self.env.block_size), 4) for road in self.env.roads: # Road self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width) # Center line self.pygame.draw.line(self.screen, self.line_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), 2) for intersection, traffic_light in self.env.intersections.iteritems(): self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), self.road_width/2) if traffic_light.state: # North-South is open self.screen.blit(self._ns, self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2)) self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2), 2) self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size + self.road_width/2), 2) else: self.screen.blit(self._ew, self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2)) self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), 2) self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), 2) # * Dynamic elements self.font = self.pygame.font.Font(None, 20) for agent, state in self.env.agent_states.iteritems(): # Compute precise agent location here (back from the intersection some) agent_offset = (2 * state['heading'][0] * self.agent_circle_radius + self.agent_circle_radius * state['heading'][1] * 0.5, \ 2 * state['heading'][1] * self.agent_circle_radius - self.agent_circle_radius * state['heading'][0] * 0.5) agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1]) agent_color = self.colors[agent.color] if hasattr(agent, '_sprite') and agent._sprite is not None: # Draw agent sprite (image), properly rotated rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90) self.screen.blit(rotated_sprite, self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2, agent._sprite_size[0], agent._sprite_size[1])) else: # Draw simple agent (circle with a short line segment poking out to indicate heading) self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius) self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width) if state['destination'] is not None: self.screen.blit(self._logo, self.pygame.rect.Rect(state['destination'][0] * self.env.block_size - self.road_width/2, \ state['destination'][1]*self.env.block_size - self.road_width/2, \ state['destination'][0]*self.env.block_size + self.road_width/2, \ state['destination'][1]*self.env.block_size + self.road_width/2)) # * Overlays self.font = self.pygame.font.Font(None, 50) if testing: self.screen.blit(self.font.render("Testing Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10)) else: self.screen.blit(self.font.render("Training Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10)) self.font = self.pygame.font.Font(None, 30) # Status text about each step status = self.env.step_data if status: # Previous State if status['state']: self.screen.blit(self.font.render("Previous State: {}".format(status['state']), True, self.colors['white'], self.bg_color), (350, 10)) if not status['state']: self.screen.blit(self.font.render("!! Agent state not updated!", True, self.colors['maroon'], self.bg_color), (350, 10)) # Action if status['violation'] == 0: # Legal if status['action'] == None: self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40)) else: self.screen.blit(self.font.render("Agent drove {}. (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40)) else: # Illegal if status['action'] == None: self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40)) else: self.screen.blit(self.font.render("{} attempted (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40)) # Result if status['violation'] == 0: # Legal if status['waypoint'] == status['action']: # Followed waypoint self.screen.blit(self.font.render("Agent followed the waypoint!", True, self.colors['dgreen'], self.bg_color), (350, 70)) elif status['action'] == None: if status['light'] == 'red': # Stuck at a red light self.screen.blit(self.font.render("Agent idled at a red light!", True, self.colors['dgreen'], self.bg_color), (350, 70)) else: self.screen.blit(self.font.render("Agent idled at a green light with oncoming traffic.", True, self.colors['mustard'], self.bg_color), (350, 70)) else: # Did not follow waypoint self.screen.blit(self.font.render("Agent did not follow the waypoint.", True, self.colors['mustard'], self.bg_color), (350, 70)) else: # Illegal if status['violation'] == 1: # Minor violation self.screen.blit(self.font.render("There was a green light with no oncoming traffic.", True, self.colors['maroon'], self.bg_color), (350, 70)) elif status['violation'] == 2: # Major violation self.screen.blit(self.font.render("There was a red light with no traffic.", True, self.colors['maroon'], self.bg_color), (350, 70)) elif status['violation'] == 3: # Minor accident self.screen.blit(self.font.render("There was traffic with right-of-way.", True, self.colors['maroon'], self.bg_color), (350, 70)) elif status['violation'] == 4: # Major accident self.screen.blit(self.font.render("There was a red light with traffic.", True, self.colors['maroon'], self.bg_color), (350, 70)) # Time Remaining if self.env.enforce_deadline: time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline']) self.screen.blit(self.font.render("{:.0f}% of time remaining to reach destination.".format(time), True, self.colors['black'], self.bg_color), (350, 100)) else: self.screen.blit(self.font.render("Agent not enforced to meet deadline.", True, self.colors['black'], self.bg_color), (350, 100)) # Denote whether a trial was a success or failure if (state['destination'] != state['location'] and state['deadline'] > 0) or (self.env.enforce_deadline is not True and state['destination'] != state['location']): self.font = self.pygame.font.Font(None, 40) if self.env.success == True: self.screen.blit(self.font.render("Previous Trial: Success", True, self.colors['dgreen'], self.bg_color), (10, 50)) if self.env.success == False: self.screen.blit(self.font.render("Previous Trial: Failure", True, self.colors['maroon'], self.bg_color), (10, 50)) if self.env.primary_agent.learning: self.font = self.pygame.font.Font(None, 22) self.screen.blit(self.font.render("epsilon = {:.4f}".format(self.env.primary_agent.epsilon), True, self.colors['black'], self.bg_color), (10, 80)) self.screen.blit(self.font.render("alpha = {:.4f}".format(self.env.primary_agent.alpha), True, self.colors['black'], self.bg_color), (10, 95)) # Reset status text else: self.pygame.rect.Rect(350, 10, self.width, 200) self.font = self.pygame.font.Font(None, 40) self.screen.blit(self.font.render("Simulating trial. . .", True, self.colors['white'], self.bg_color), (400, 60)) # Flip buffers self.pygame.display.flip() def pause(self): """ When the GUI is enabled, this function will pause the simulation. """ abs_pause_time = time.time() self.font = self.pygame.font.Font(None, 30) pause_text = "Simulation Paused. Press any key to continue. . ." self.screen.blit(self.font.render(pause_text, True, self.colors['red'], self.bg_color), (400, self.height - 30)) self.pygame.display.flip() print pause_text while self.paused: for event in self.pygame.event.get(): if event.type == self.pygame.KEYDOWN: self.paused = False self.pygame.time.wait(self.frame_delay) self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (400, self.height - 30)) self.start_time += (time.time() - abs_pause_time)
[ "atrij.singhal@gmail.com" ]
atrij.singhal@gmail.com
acef9f70b1845e351623099172b72e8864af5bd7
ca1342a596280070a1b1f7f702f15dc382569eb7
/guessingGame.py
4c66b69ad167916d11659f4f074c3cff3383a1a9
[]
no_license
Lakshya7312/python_number_guesser
0de0e939f86f6bd66232a38ec589a08df889e343
0a7156074de4506ab9374befc7822d76840aeec5
refs/heads/main
2023-02-26T21:33:39.038699
2021-02-02T08:39:44
2021-02-02T08:39:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
945
py
# Import the random module for generating random numbers import random # Message to start game & instructions print("\nA number between 1 and 9 has been generated!\nYou have only 5 chances to guess the number!\nStart Guessing!\n") # Initializing variables number = random.randint(1, 9) guessCount = 0 # The condition while guessCount < 5: guess = int(input("Enter your guess: ")) guessCount += 1 if number > guess: print("\nYour guess is smaller than the number, try guessing higher!\n") elif number < guess: print("\nYour guess is higher than the number, try guessing smaller!\n") if number == guess: print("\nYaY! You did it in " + str(guessCount) + " try(s)\n") break if guess == number: print("You guessed the number in " + str(guessCount) + " try(s)!") else: print("Your 5 tries are over! You could'nt guess the number\nThe number was " + str(number))
[ "noreply@github.com" ]
Lakshya7312.noreply@github.com
19e5431649b0241c81c88ff498959a3fad797237
4f179fdd48108020f49064be6686abcaac69d1ef
/Medium/11_container_with_most_water.py
95dfcdd654e6cd1fec0561ba2c9be6f804acfe7a
[]
no_license
RuchitDoshi/LeetCode_Practice
b1e4fc64b9e8b5b60b1d4c115d7f1477b83fa6dc
48dd00abc4b71e83b475ecdac23bc3ddbe55641e
refs/heads/master
2023-03-04T07:45:28.978099
2021-02-14T04:46:14
2021-02-14T04:46:14
283,289,648
0
0
null
null
null
null
UTF-8
Python
false
false
467
py
class Solution: def maxArea(self, height) -> int: area=0 head=0 tail=len(height)-1 while(head!=tail): if (height[head] < height[tail]): temp=(tail-head)*(height[head]) head+=1 else: temp=(tail-head)*height[tail] tail-=1 if temp > area : area=temp return area
[ "doshiruchit12@gmail.com" ]
doshiruchit12@gmail.com
4cbf6e3fcafd24fc240850a479e41ddfe6d770ac
d5b339d5b71c2d103b186ed98167b0c9488cff09
/marvin/cloudstackAPI/deleteCondition.py
e6c1d13261e1a92194f4e5a345cf1351557e1bd8
[ "Apache-2.0" ]
permissive
maduhu/marvin
3e5f9b6f797004bcb8ad1d16c7d9c9e26a5e63cc
211205ae1da4e3f18f9a1763f0f8f4a16093ddb0
refs/heads/master
2020-12-02T17:45:35.685447
2017-04-03T11:32:11
2017-04-03T11:32:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
705
py
"""Removes a condition""" from baseCmd import * from baseResponse import * class deleteConditionCmd (baseCmd): typeInfo = {} def __init__(self): self.isAsync = "true" """the ID of the condition.""" """Required""" self.id = None self.typeInfo['id'] = 'uuid' self.required = ["id", ] class deleteConditionResponse (baseResponse): typeInfo = {} def __init__(self): """any text associated with the success or failure""" self.displaytext = None self.typeInfo['displaytext'] = 'string' """true if operation is executed successfully""" self.success = None self.typeInfo['success'] = 'boolean'
[ "int-mccd_jenkins@schubergphilis.com" ]
int-mccd_jenkins@schubergphilis.com
9fe7a328b27380a9afc1f19106fa9edd8aa1033c
21208873652ce9a35035801cea488004e337b07b
/data_loader/__init__.py
784c4dc41287ec0e8680637c3b93983f20eae44f
[ "Apache-2.0" ]
permissive
zlszhonglongshen/crnn.pytorch
55321a6764a6143be7ab9d2c6b3bcafcdd9470e7
bf7a7c62376eee93943ca7c68e88e3d563c09aa8
refs/heads/master
2022-11-07T22:57:28.983335
2020-06-19T03:01:35
2020-06-19T03:01:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,398
py
# -*- coding: utf-8 -*- # @Time : 18-11-16 下午5:46 # @Author : zhoujun import copy from torch.utils.data import DataLoader from torchvision import transforms def get_dataset(data_path, module_name, transform, dataset_args): """ 获取训练dataset :param data_path: dataset文件列表,每个文件内以如下格式存储 ‘path/to/img\tlabel’ :param module_name: 所使用的自定义dataset名称,目前只支持data_loaders.ImageDataset :param transform: 该数据集使用的transforms :param dataset_args: module_name的参数 :return: 如果data_path列表不为空,返回对应的Dataset对象,否则None """ from . import dataset s_dataset = getattr(dataset, module_name)(transform=transform, data_path=data_path, **dataset_args) return s_dataset def get_transforms(transforms_config): tr_list = [] for item in transforms_config: if 'args' not in item: args = {} else: args = item['args'] cls = getattr(transforms, item['type'])(**args) tr_list.append(cls) tr_list = transforms.Compose(tr_list) return tr_list def get_dataloader(module_config, num_label): if module_config is None: return None config = copy.deepcopy(module_config) dataset_args = config['dataset']['args'] dataset_args['num_label'] = num_label if 'transforms' in dataset_args: img_transfroms = get_transforms(dataset_args.pop('transforms')) else: img_transfroms = None # 创建数据集 dataset_name = config['dataset']['type'] data_path_list = dataset_args.pop('data_path') if 'data_ratio' in dataset_args: data_ratio = dataset_args.pop('data_ratio') else: data_ratio = [1.0] _dataset_list = [] for data_path in data_path_list: _dataset_list.append(get_dataset(data_path=data_path, module_name=dataset_name, dataset_args=dataset_args, transform=img_transfroms)) if len(data_ratio) > 1 and len(dataset_args['data_ratio']) == len(_dataset_list): from . import dataset loader = dataset.Batch_Balanced_Dataset(dataset_list=_dataset_list, ratio_list=data_ratio, loader_args=config['loader']) else: _dataset = _dataset_list[0] loader = DataLoader(dataset=_dataset, **config['loader']) loader.dataset_len = len(_dataset) return loader
[ "572459439@qq.com" ]
572459439@qq.com
b1595331ebd43ebd2cf3e52cfd1d6589b83e28b2
391dbe903b191fd2d439947a5bf5f73d3a19db7a
/pyocd/utility/hex.py
01040beddb958dc07d06ca5ef2fef96a41ca14c0
[ "MIT" ]
permissive
XIVN1987/RTTView
eb45b2e5c5d7fed3178980f5f118e91af640aa5f
05d237f0baa8dd1107018a9d560eb205c6e5432e
refs/heads/master
2023-03-06T07:52:47.345165
2023-02-08T09:53:43
2023-02-08T09:53:43
138,362,483
80
27
MIT
2021-10-19T05:09:31
2018-06-23T01:40:08
Python
UTF-8
Python
false
false
1,689
py
# pyOCD debugger # Copyright (c) 2018 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys def format_hex_width(value, width): if width == 8: return "%02x" % value elif width == 16: return "%04x" % value elif width == 32: return "%08x" % value else: raise ValueError("unrecognized register width (%d)" % width) def dump_hex_data(data, startAddress=0, width=8, output=None): if output is None: output = sys.stdout i = 0 while i < len(data): output.write("%08x: " % (startAddress + (i * (width // 8)))) while i < len(data): d = data[i] i += 1 if width == 8: output.write("%02x " % d) if i % 4 == 0: output.write(" ") if i % 16 == 0: break elif width == 16: output.write("%04x " % d) if i % 8 == 0: break elif width == 32: output.write("%08x " % d) if i % 4 == 0: break output.write("\n")
[ "XIVN1987@163.com" ]
XIVN1987@163.com
2f6d6fbdfda9f1bfcda1e71694f8a0666de5b492
8c0674a5e31d4de10235e6533145ab2e15a834d9
/0x03-python-data_structures/8-multiple_returns.py
43d1dafa6c062b66ad3d561443d9c24e91bf0136
[]
no_license
jdanielue/holbertonschool-higher_level_programming
1815f86b15490998c7ca7a9a22450c8cb6968f60
aff477e3c7629da333e0675395e9a24fdaf4dc73
refs/heads/master
2023-04-16T23:01:28.961579
2021-05-06T13:48:48
2021-05-06T13:48:48
319,405,397
0
0
null
null
null
null
UTF-8
Python
false
false
155
py
#!/usr/bin/python3 def multiple_returns(sentence): if len(sentence) == 0: return 0, None return len(sentence), sentence[0]
[ "jdurregoe@gmail.com" ]
jdurregoe@gmail.com
b8b31f2f8c5f2ae17653cf45b579c862992df144
f5ba09db506abc4d356ad7b86fdb5ffa88de61dd
/homework/HW3.py
041df4deff63f18a6cdddb586f5fc477b69eb366
[]
no_license
bmlee-99/my_project
698367a9ca7aed0bb765a326e9bd2e213f93be6a
b46ad8f2c3ea3bb995b124d2ba501a8f3aec86d2
refs/heads/main
2023-02-20T15:50:42.085015
2021-01-21T10:35:42
2021-01-21T10:35:42
322,453,121
0
0
null
null
null
null
UTF-8
Python
false
false
927
py
from pymongo import MongoClient client = MongoClient('localhost', 27017) db = client.dbsparta import requests from bs4 import BeautifulSoup headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'} data = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&rtm=N&ymd=20200713', headers=headers) soup = BeautifulSoup(data.text, 'html.parser') trs = soup.select('#body-content > div.newest-list > div > table > tbody > tr') for tr in trs: rank = tr.select_one('td.number').text[0:3].strip() # text.split()[0] 으로도 할 수 있다. title = tr.select_one('td.info > a.title.ellipsis').text.strip() artist = tr.select_one('td.info > a.artist.ellipsis').text doc = { 'rank': rank, 'title':title, 'artist':artist } # 딕션어리로 만듬 db.genieranking.insert_one(doc)
[ "bm.lee@erpper.com" ]
bm.lee@erpper.com
3e9b1c5df00f5f386c3f78b3a37146483923a0fc
69c5ff8edc25452a6732caf74de020eedaaf5a69
/signal_test.py
cfe0e1dca1294e184fdc5d764f61b1d61333f0a0
[]
no_license
ree-rishun/IoH
e7a97dfe3858b0d88237b0f84468f41b67d7110c
af66c25e17e00b5da46cd0c99bafab395c85bae4
refs/heads/master
2020-07-03T05:57:20.152574
2019-08-12T05:53:47
2019-08-12T05:53:47
201,810,265
0
0
null
null
null
null
UTF-8
Python
false
false
1,680
py
# -*- coding: utf-8 -*- # import import time import sys import wiringpi import RPi.GPIO as GPIO # 宣言 # GPIO Pin SPICLK = 11 SPIMOSI = 10 SPIMISO = 9 SPICS = 8 # GPIO config GPIO.setmode(GPIO.BCM) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICS, GPIO.OUT) # ADC関数 def readadc(adcnum, clockpin, mosipin, misopin, cspin): if adcnum > 7 or adcnum < 0: return -1 GPIO.output(cspin, GPIO.HIGH) GPIO.output(clockpin, GPIO.LOW) GPIO.output(cspin, GPIO.LOW) commandout = adcnum commandout |= 0x18 # スタートビット+シングルエンドビット commandout <<= 3 # LSBから8ビット目を送信するようにする for i in range(5): # LSBから数えて8ビット目から4ビット目までを送信 if commandout & 0x80: GPIO.output(mosipin, GPIO.HIGH) else: GPIO.output(mosipin, GPIO.LOW) commandout <<= 1 GPIO.output(clockpin, GPIO.HIGH) GPIO.output(clockpin, GPIO.LOW) adcout = 0 # 13ビット読む(ヌルビット+12ビットデータ) for i in range(13): GPIO.output(clockpin, GPIO.HIGH) GPIO.output(clockpin, GPIO.LOW) adcout <<= 1 if i>0 and GPIO.input(misopin)==GPIO.HIGH: adcout |= 0x1 GPIO.output(cspin, GPIO.HIGH) return adcout # main loop while True: try: # ADCから値を取得 inputVal = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS) print(inputVal) except (KeyboardInterrupt, SystemExit): exit()
[ "ree0432@gmail.com" ]
ree0432@gmail.com
95284482bae6b27e575c9dba31d4eb4b90091235
7a9b0a37b5f7519898e58237a46542b17dbdd0b1
/bookmark/views.py
9527727871930c2332f09f63d8769cdf47f12003
[]
no_license
DDIPONG/devops
39dc1c228dba441253839669eb9c922f43e1d0f1
40ffd9ab074bfc635ca1865dddac18c283569b64
refs/heads/master
2022-12-27T12:37:23.992065
2020-10-06T06:46:42
2020-10-06T06:46:42
299,507,706
0
0
null
null
null
null
UTF-8
Python
false
false
574
py
from django.shortcuts import render # Create your views here. from django.views.generic import ListView, DetailView # In order to use a classed Generic View, Import ListView and Detailview from bookmark.models import Bookmark #Import class Model to search Table info. class BookmarkLV(ListView): model = Bookmark # BookmarkLV is a view to show a record of Bookmark Record List # It's inherited Generic view. # In case of Inherit Listview, class BookmarkDV(DetailView): model = Bookmark #BookmarkDV is a view to show detail info of certain records of a Table.
[ "root@ip-192-168-114-129.ap-northeast-2.compute.internal" ]
root@ip-192-168-114-129.ap-northeast-2.compute.internal
a39d2cecc8e96bfe1de6ff608258dea96d977463
a267b269c261656bdefe99586c7dc3fb9d7f4ad1
/lesson1/price.py
cec3719bc3d56c01438dbc07d524f51f49849deb
[]
no_license
Sandello76/learn-python
2e6785b9d716c8b32a24a2f2712091f2a82fea60
7309da4fa50ddb1ca311df46fa493d69024a84ca
refs/heads/master
2023-05-06T17:39:46.223060
2021-05-29T00:31:59
2021-05-29T00:31:59
367,744,547
0
0
null
2021-05-29T00:32:00
2021-05-15T22:56:55
Python
UTF-8
Python
false
false
516
py
'''price = 100 discount = 5 price_with_discount = price - price * discount / 100 print(price_with_discount) ''' def discounted(price, discount, max_discount=20): price = abs(float(price)) discount = abs(float(discount)) max_discount = abs(float(max_discount)) if max_discount > 99: raise ValueError('Слишком большая максимальная скидка') if discount >= max_discount: return price else: return price - (price * discount / 100)
[ "raue76@gmail.com" ]
raue76@gmail.com
364356172473bbaedbfc17d1fa14802f452bec04
4493b3f0879000ab48cba198b1e059249198a740
/dataset_config.py
998d289cc0617edfa2f41a9eb2bfa6add1e656a3
[ "BSD-3-Clause" ]
permissive
AGalassi/StructurePrediction18
7216a375d170bf27c12375af98c70a88f6f57985
ae739e492e4ad55908b0275c49e54d7586a481da
refs/heads/master
2023-06-08T15:54:04.424556
2023-05-24T09:17:21
2023-05-24T09:17:21
125,429,569
11
6
BSD-3-Clause
2023-03-24T22:24:10
2018-03-15T21:44:36
Python
UTF-8
Python
false
false
17,951
py
__author__ = "Andrea Galassi" __copyright__ = "Copyright 2018, Andrea Galassi" __license__ = "BSD 3-clause" __version__ = "0.0.1" __email__ = "a.galassi@unibo.it" """ output_units : (link classifier, relation classifier, source classifier, target classifier) """ dataset_info = {"AAEC_v2": {"output_units": (2, 5, 3, 3), "min_text": 168, "min_prop": 72, "link_as_sum": [[0, 2], [1, 3, 4]], "categorical_prop": {'Premise': [1, 0, 0, ], 'Claim': [0, 1, 0, ], 'MajorClaim': [0, 0, 1], }, "categorical_link": {'supports': [1, 0, 0, 0, 0], 'inv_supports': [0, 1, 0, 0, 0], 'attacks': [0, 0, 1, 0, 0], 'inv_attacks': [0, 0, 0, 1, 0], None: [0, 0, 0, 0, 1], }, "evaluation_headline_short": ("set\t" + "F1 AVG all\tF1 AVG LP\tF1 Link\t" + "F1 R AVG dir\tF1 R support\tF1 R attack\t" + "F1 P AVG\t" + "F1 P premise\tF1 P claim\tF1 P majclaim\t" + "F1 P avg\n\n"), "evaluation_headline": ("set\t" + "F1 AVG all\tF1 AVG LP\t" "F1 Link\tF1 R AVG dir\t" "F1 R support\tF1 R attack\t" + "F1 P AVG\tF1 P premise\tF1 P claim\tF1 P majclaim\tF1 P avg\t" + "Pr P AVG\tPr P premise\tPr P claim\tPr P majclaim\tPr P avg\t" + "Rec P AVG\tRec P premise\tRec P claim\tRec P majclaim\tRec P avg\t" + "Supp P premise\tSupp P claim\tSupp P majclaim\t" "F1 nonLink\tF1_R_avg\tF1_R_not-rel" + "\n\n"), "prop_types": ['Premise', 'Claim', 'MajorClaim'], "rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"], }, "ECHR2018": {"output_units": (2, 8, 2, 2), "min_text": 168, # NO "min_prop": 95, "link_as_sum": [[0, 2], [1, 3, 4, 5, 6, 7]], "categorical_prop": {'premise': [1, 0], 'claim': [0, 1], }, "categorical_link": {'Support': [1, 0, 0, 0, 0, 0, 0, 0], 'inv_Support': [0, 1, 0, 0, 0, 0, 0, 0], 'Attack': [0, 0, 1, 0, 0, 0, 0, 0], 'inv_Attack': [0, 0, 0, 1, 0, 0, 0, 0], 'Citation': [0, 0, 0, 0, 1, 0, 0, 0], 'inv_Citation': [0, 0, 0, 0, 0, 1, 0, 0], 'Duplicate': [0, 0, 0, 0, 0, 0, 1, 0], None: [0, 0, 0, 0, 0, 0, 0, 1], }, "evaluation_headline_short": ("set\t" + "F1 AVG all\tF1 AVG LP\tF1 Link\t" + "F1 R AVG dir\tF1 R support\tF1 R attack\t" + "F1 P AVG\t" + "F1 P premise\tF1 P claim\t" + "F1 P avg\n\n"), "prop_types": ['Premise', 'Claim'], "rel_types": ['Support', 'inv_Support', 'Attack', 'inv_Attack', 'Citation', 'inv_Citation', 'Duplicate', 'None'], }, "cdcp_ACL17": {"output_units": (2, 5, 5, 5), "min_text": 552, "min_prop": 153, "link_as_sum": [[0, 2], [1, 3, 4]], "categorical_prop": {'policy': [1, 0, 0, 0, 0], 'fact': [0, 1, 0, 0, 0], 'testimony': [0, 0, 1, 0, 0], 'value': [0, 0, 0, 1, 0], 'reference': [0, 0, 0, 0, 1], }, "categorical_link": {'reasons': [1, 0, 0, 0, 0], 'inv_reasons': [0, 1, 0, 0, 0], 'evidences': [0, 0, 1, 0, 0], 'inv_evidences': [0, 0, 0, 1, 0], None: [0, 0, 0, 0, 1], }, "evaluation_headline_short": ("set\t" "F1 AVG all\tF1 AVG LP\tF1 Link\t" "F1 R AVG dir\tF1 R reason\tF1 R evidence\t" + "F1 P AVG\t" + "F1 P policy\tF1 P fact\tF1 P testimony\t" + "F1 P value\tF1 P reference\tF1 P avg\n\n"), "evaluation_headline": ("set\t" "F1 AVG all\t" "F1 AVG LP\t" "F1 Link\t" "F1 R AVG dir\tF1 R reason\tF1 R evidence\tF1 P AVG\t" "F1 P policy\tF1 P fact\tF1 P testimony\tF1 P value\tF1 P reference\tf1 P avg\t" "PR P AVG\tPr P policy\tPr P fact\tPr P testimony\tPr P value\tPr P reference\tpr P avg\t" "REC P AVG\tRec P policy\tRec P fact\tRec P testimony\tRec P value\tRec P reference\trec P avg\t" "Supp P policy\tSupp P fact\tSupp P testimony\tSupp P value\tSupp P reference\t" "F1 nonLink\tF1_R_avg\tF1_R_not-rel\n\n"), "prop_types": ['policy', 'fact', 'testimony', 'value', 'reference'], "rel_types": ['reasons', 'inv_reasons', 'evidences', 'inv_evidences', "None"], }, "RCT": {"output_units": (2, 5, 2, 2), "min_text": 2, # wrong, never measured "min_prop": 181, "link_as_sum": [[0, 2], [1, 3, 4]], "categorical_prop": {'Premise': [1, 0, ], 'Claim': [0, 1, ] }, "categorical_link": {'support': [1, 0, 0, 0, 0], 'inv_support': [0, 1, 0, 0, 0], 'attack': [0, 0, 1, 0, 0], 'inv_attack': [0, 0, 0, 1, 0], None: [0, 0, 0, 0, 1], }, "evaluation_headline_short": ("set\t" + "F1 AVG all\t" + "F1 AVG LP\t" + "F1 Link\t" + "F1 R AVG dir\tF1 R support\tF1 R attack\t" + "F1 P AVG\t" + "F1 P premise\tF1 P claim\t" + "F1 P avg\n\n"), "evaluation_headline": ("set\t" + "F1 AVG all\tF1 AVG LP\t" "F1 Link\tF1 R AVG dir\t" "F1 R support\tF1 R attack\t" + "F1 P AVG\tF1 P premise\tF1 P claim\tF1 P avg\t" + "Pr P AVG\tPr P premise\tPr P claim\tPr P avg\t" + "Rec P AVG\tRec P premise\tRec P claim\tRec P avg\t" + "Supp P premise\tSupp P claim\t" "F1 nonLink\tF1_R_avg\tF1_R_not-rel" + "\n\n"), "prop_types": ['Premise', 'Claim',], "rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"], }, "scidtb_argmin_annotations": {"output_units": (2, 5, 6, 6), "min_text": 2, # wrong, never measured "min_prop": 95, "link_as_sum": [[0, 2], [1, 3, 4]], "categorical_prop": {'proposal': [1, 0, 0, 0, 0, 0], 'assertion': [0, 1, 0, 0, 0, 0], 'result': [0, 0, 1, 0, 0, 0], 'observation': [0, 0, 0, 1, 0, 0], 'means': [0, 0, 0, 0, 1, 0], 'description': [0, 0, 0, 0, 0, 1], }, "categorical_link": {'support': [1, 0, 0, 0, 0], 'inv_support': [0, 1, 0, 0, 0], 'attack': [0, 0, 1, 0, 0], 'inv_attack': [0, 0, 0, 1, 0], None: [0, 0, 0, 0, 1], }, # TODO: FIX THIS "evaluation_headline_short": ("set\t" "F1 AVG all\tF1 AVG LP\tF1 Link\t" "F1 R AVG dir\tF1 R reason\tF1 R evidence\t" + "F1 P AVG\t" + "F1 P policy\tF1 P fact\tF1 P testimony\t" + "F1 P value\tF1 P reference\tF1 P avg\n\n"), "evaluation_headline": ("set\t" "F1 AVG all\t" "F1 AVG LP\t" "F1 Link\t" "F1 R AVG dir\tF1 R support\tF1 R attack\tF1 P AVG\t" "F1 P proposal\tF1 P assertion\tF1 P result\tF1 P observation\tF1 P means\tF1 P description\t" "f1 P avg\t" "PR P AVG\t" "Pr P proposal\tPr P assertion\tPr P result\tPr P observation\tPr P means\tPr P description\t" "pr P avg\t" "REC P AVG\t" "Rec P proposal\tRec P assertion\tRec P result\tRec P observation\tRec P means\tRec P description\t" "rec P avg\t" "Supp P proposal\tSupp P assertion\tSupp P result\tSupp P observation\tSupp P means\tSupp P description\t" "F1 nonLink\tF1_R_avg\tF1_R_not-rel\n\n"), "prop_types": ['proposal', 'assertion', 'result', 'observation', 'means', 'description'], "rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"], }, "DrInventor": {"output_units": (2, 6, 3, 3), "min_text": 2, # wrong, never measured "min_prop": 106, "link_as_sum": [[0, 2, 4], [1, 3, 5]], "categorical_prop": {'own_claim': [1, 0, 0,], 'background_claim': [0, 1, 0], 'data': [0, 0, 1], }, "categorical_link": {'supports': [1, 0, 0, 0, 0, 0], 'inv_supports': [0, 1, 0, 0, 0, 0], 'contradicts': [0, 0, 1, 0, 0, 0], 'inv_contradicts': [0, 0, 0, 1, 0, 0], 'semantically_same': [0, 0, 0, 0, 1, 0], None: [0, 0, 0, 0, 0, 1], }, "evaluation_headline_short": ("set\t" + "F1 AVG all\tF1 AVG LP\tF1 Link\t" + "F1 R AVG dir\tF1 R supports\tF1 R contradicts\t" + "F1 P avg\t" + "F1 P own_claim\tF1 P background_claim\tF1 P data\t" + "f1 P avg\n\n"), "evaluation_headline": ("set\t" + "F1 AVG all\tF1 AVG LP\tF1 Link\t" + "F1 R AVG dir\tF1 R supports\tF1 R contradicts\tF1 R semsame\t" + "F1 P avg\t" + "F1 P own_claim\tF1 P background_claim\tF1 P data\t" + "f1 P avg\t" "PR P AVG\t" + "Pr P own_claim\tPr P background_claim\tPr P data\t" + "Pr P avg\t" "REC P AVG\t" + "Rec P own_claim\tRec P background_claim\tRec P data\t" + "Rec P avg\t" "Supp P own_claim\tSupp P background_claim\tSupp P data\t" + "F1 non-Link\tF1_R_avg\tF1_R_not-rel" "\n\n"), "prop_types": ['own_claim', 'background_claim', 'data'], "rel_types": ['supports', 'inv_supports', 'contradicts', 'inv_contradicts', "semantically_same", "None"], }, }
[ "a.galaxy@outlook.it" ]
a.galaxy@outlook.it
f15b490903261f2afb007cd9301a5d6ac771bbba
23e9bc3c6810b3cfa897653eefe74dadf5817d6b
/2020/day1-2.py
f7d4a87409d28ecaa1760d00e9dca9c543fe3416
[]
no_license
LordBrom/advent-of-code
3f68789b0fd31e22f840b26c7984cccdfc26e4c8
f7165af7f03b3fb0d3db01191cdcff8a95474b8f
refs/heads/master
2023-07-31T19:19:09.062740
2021-09-05T19:58:16
2021-09-05T19:58:16
319,400,695
0
0
null
null
null
null
UTF-8
Python
false
false
338
py
inputsArray = open("day1.in", "r").read().split("\n") inputsArray.pop() for i in range(len(inputsArray)): for j in range(i, len(inputsArray)): for k in range(j, len(inputsArray)): if int(inputsArray[i]) + int(inputsArray[j]) + int(inputsArray[k]) == 2020: print(int(inputsArray[i]) * int(inputsArray[j]) * int(inputsArray[k]))
[ "mills.nate@gmail.com" ]
mills.nate@gmail.com
6063109ef967c55b97de6c5daf44a5c8e88ac02e
11cfe900a2cd2363bca41c6e9fa45710e7c578cc
/backend/admin.py
b4bafb653197350b06e2e7238d5f15c9180d8a59
[]
no_license
RENZOje/remote_learning
2dd5a353733a49a080769740afad0ffe513731e5
8ce4ee4b2a832bea921be94f630101434024ec06
refs/heads/master
2023-06-10T21:31:29.423609
2021-06-11T15:58:54
2021-06-11T15:58:54
347,067,622
0
0
null
null
null
null
UTF-8
Python
false
false
1,012
py
from django.contrib import admin from django import forms from .models import * from ckeditor_uploader.widgets import CKEditorUploadingWidget # Register your models here. admin.site.register(Quiz) admin.site.register(Grade) admin.site.register(Teacher) admin.site.register(Section) admin.site.register(Student) admin.site.register(Course) admin.site.register(Group_custom) admin.site.register(Assignment) admin.site.register(UploadAssignment) class AnswerInline(admin.TabularInline): model = Answer class QuestionAdmin(admin.ModelAdmin): inlines = [AnswerInline] admin.site.register(Question, QuestionAdmin) admin.site.register(Answer) admin.site.register(ResultQuiz) admin.site.register(ResultAssignment) class AtricleAdminForm(forms.ModelForm): description = forms.CharField(widget=CKEditorUploadingWidget()) class Meta: model = Article fields = '__all__' class ArticleAdmin(admin.ModelAdmin): form = AtricleAdminForm admin.site.register(Article, ArticleAdmin)
[ "RENZOje@users.noreply.github.com" ]
RENZOje@users.noreply.github.com
cadcaf82134eb1b16eeb3991d6a5f168d62aaa56
714301f86767b075dd7a9132535e25689b7a7e4a
/Windows/src/LaZagne/softwares/databases/dbvis.py
02efcb16ebc7402c9d8b1ab802b48ea5dfb0ee4c
[]
no_license
eddgomez/LaZagne
ebac0cc9a57786a064922380090fb3850492f876
aafee7e88361db1466d3b5148838700b5fab36bc
refs/heads/master
2021-01-15T21:39:36.390397
2015-02-20T15:18:27
2015-02-20T15:18:27
31,222,312
1
0
null
2015-02-23T18:24:15
2015-02-23T18:24:15
null
UTF-8
Python
false
false
3,057
py
from Crypto.Hash import MD5 from Crypto.Cipher import DES import binascii, array, hashlib import base64, re, os import xml.etree.cElementTree as ET from config.write_output import print_output, print_debug from config.constant import * from config.header import Header class Dbvisualizer(): # ---- functions used to decrypt the password ---- def get_salt(self): salt_array = [-114,18,57,-100,7,114,111,90] salt = array.array('b', salt_array) hexsalt = binascii.hexlify(salt) return binascii.unhexlify(hexsalt) def get_iteration(self): return 10 def get_derived_key(self, password, salt, count): key = bytearray(password) + salt for i in range(count): m = hashlib.md5(key) key = m.digest() return (key[:8], key[8:]) def decrypt(self, salt, msg, password): enc_text = base64.b64decode(msg) (dk, iv) = self.get_derived_key(password, salt, self.get_iteration()) crypter = DES.new(dk, DES.MODE_CBC, iv) text = crypter.decrypt(enc_text) return re.sub(r'[\x01-\x08]','',text) def get_passphrase(self): return 'qinda' # ---- end of the functions block ---- def get_infos(self, path, passphrase, salt): xml_file = path + os.sep + 'config70/dbvis.xml' if os.path.exists(xml_file): tree = ET.ElementTree(file=xml_file) pwdFound = [] for e in tree.findall('Databases/Database'): values = {} try: values['Connection Name'] = e.find('Alias').text except: pass try: values['Userid'] = e.find('Userid').text except: pass try: ciphered_password = e.find('Password').text try: password = self.decrypt(salt, ciphered_password, passphrase) values['Password'] = password passwordFound = True except: pass except: pass try: values['Driver'] = e.find('UrlVariables//Driver').text.strip() except: pass try: elem = e.find('UrlVariables') for ee in elem.getchildren(): for ele in ee.getchildren(): if 'Server' == ele.attrib['UrlVariableName']: values['Server'] = str(ele.text) if 'Port' == ele.attrib['UrlVariableName']: values['Port'] = str(ele.text) if 'SID' == ele.attrib['UrlVariableName']: values['SID'] = str(ele.text) except: pass if len(values) > 0: pwdFound.append(values) # print the results print_output("DbVisualizer", pwdFound) def get_mainPath(self): if 'HOMEPATH' in os.environ: path = os.environ['HOMEPATH'] + os.sep + '.dbvis' if os.path.exists(path): return path else: return 'DBVIS_NOT_EXISTS' else: return 'var_Env_Not_Found' def retrieve_password(self): # print title Header().title_debug('Dbvisualizer') mainPath = self.get_mainPath() if mainPath == 'DBVIS_NOT_EXISTS': print_debug('INFO', 'Dbvisualizer not installed.') elif mainPath == 'var_Env_Not_Found': print_debug('ERROR', 'The HOMEPATH environment variable is not definded.') else: passphrase = self.get_passphrase() salt = self.get_salt() self.get_infos(mainPath, passphrase, salt)
[ "zanni.alessandro@gmail.com" ]
zanni.alessandro@gmail.com
b89514586d2c8ef0a1fa99979861bca2c7fc1001
6d486cc1fc08b79111c701f5c9b0e74abf809e4f
/test/test_dml.py
382d8f939676d32a8c02a99c4d958a5cf0f8e799
[ "MIT" ]
permissive
albertvisser/a-propos
1d16c122a7698e6e32a55767a1af9acbbf6c3ca2
31c89bc4560b93635a79ab3ab7d224b8d8f756fb
refs/heads/master
2023-04-30T19:28:40.235949
2023-04-15T13:29:44
2023-04-15T13:29:44
219,269,688
0
0
null
null
null
null
UTF-8
Python
false
false
1,736
py
import pytest from apropos import dml def test_get_apofile(monkeypatch, capsys): assert dml.get_apofile('') == dml.pathlib.Path('apropos.apo') assert dml.get_apofile('test') == dml.pathlib.Path('test.apo') assert dml.get_apofile('testing.pck') == dml.pathlib.Path('testing.apo') def test_load_notes_file_not_found(tmp_path): """run the load_notes method with no existing file """ # monkeypatch.setattr(dml.pathlib.Path, 'exists', lambda *x: False) apofile = tmp_path / 'apropos.apo' opts, apodata = dml.load_notes(apofile) assert opts == {"AskBeforeHide": True, "ActiveTab": 0, 'language': 'eng', 'NotifyOnSave': True, 'NotifyOnLoad': True} assert apodata == {} def test_load_notes_not_a_pickle(tmp_path): """run the load_notes method with a non-pickle file """ apofile = tmp_path / 'apropos.apo' with apofile.open(mode='w') as f: f.write("oihgyavjjvjdvj diidn dnni") f.write("\n") opts, apodata = dml.load_notes(apofile) def test_load_notes_happy_flow(tmp_path): """run the load_notes method on a "correct" file """ apofile = tmp_path / 'apropos.apo' with apofile.open(mode='wb') as f: dml.pickle.dump({0: 'opts', 1: 'apodata'}, f, protocol=2) opts, apodata = dml.load_notes(apofile) assert opts == 'opts' assert apodata == {1: 'apodata'} def test_save_notes_happy_flow(tmp_path): """save the notes and check if it's readable correctly """ apofile = tmp_path / 'apropos.apo' opts = 'opts' apodata = {1: 'apodata'} dml.save_notes(apofile, opts, apodata) with apofile.open(mode='rb') as f: data = dml.pickle.load(f) assert data == {0: 'opts', 1: 'apodata'}
[ "albert.visser@gmail.com" ]
albert.visser@gmail.com
b8954b6cea35abb939ed06c8276b23e8b81f83d3
b2e340f22a7f613dc33ea361ba87a393d65b723c
/LogicAnalyzer/config/config.py
f19d2b4e3d649ece283274df9b734d2dc8094f99
[ "MIT" ]
permissive
CospanDesign/logic-analyzer
6369cfc423f3fae050f9ab784a6ae94003422654
284ea339c001b4845a46fcb0672511487271c9c3
refs/heads/master
2021-01-20T18:58:53.477152
2016-06-24T02:22:04
2016-06-24T02:22:04
61,488,220
1
1
null
null
null
null
UTF-8
Python
false
false
2,335
py
import logging import json TRIGGER = "trigger" TRIGGER_MASK = "trigger_mask" TRIGGER_EDGE = "trigger_edge" TRIGGER_BOTH_EDGE = "both_edges" TRIGGER_REPEAT = "repeat" TRIGGER_AFTER = "trigger_after" CAPABILITY_NAMES = [ TRIGGER, TRIGGER_MASK, TRIGGER_EDGE, TRIGGER_BOTH_EDGE, TRIGGER_REPEAT, TRIGGER_AFTER ] CALLBACK_START = "start" CALLBACK_STOP = "stop" CALLBACK_FORCE = "force" CALLBACK_UPDATE = "update" CALLBACK_GET_SIZE = "get_size" CALLBACK_CLOSE = "close" CALLBACK_NAMES = [ CALLBACK_START, CALLBACK_STOP, CALLBACK_FORCE, CALLBACK_UPDATE, CALLBACK_GET_SIZE, CALLBACK_CLOSE ] class Config(object): @staticmethod def get_name(): return "Invalid Config, make your own!!" def __init__(self): self.log = logging.getLogger("LAX") self.caps = {} self.callbacks = {} self.channels = [] for name in CAPABILITY_NAMES: self.caps[name] = None for name in CALLBACK_NAMES: self.callbacks[name] = None def get_channel_dict(self): """ Return a dictionary that maps names to channel(s) """ return self.channels def get_capabilities(self): """ Return a list of capabilities (strings) that this device supports """ names = [] for name in self.caps: if self.caps[name] is not None: names.append(name) return names def has_capability(self, name): """ Return true if the device has the capabilities """ return self.caps[name] is not None def get_value(self, name): "Get the value of a capability" if not self.has_capability(name): raise AssertionError("LAX Does not have capability") else: return self.caps[name] def set_callback(self, name, func): self.log.debug("Setting callback for: %s" % name) self.callbacks[name] = func def ready(self): """The controller tells the config interface it's ready""" raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name) def captured(self): """callback when capture occurs""" raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name)
[ "cospan@gmail.com" ]
cospan@gmail.com
fa8bf7a05bf66710fd0d1de3652d77a24a921dc4
732ea9c77c138ed29ac259618f3f75c2cfea7ae3
/ds/subset.py
36def3b8da5734584523f0155ee15379b968874e
[]
no_license
sharmak/python
842ae452434247b62f2f43f25dce9ec88dc0ceff
3218a8bad7a02bfde622d922e9e3fe8d5359e20a
refs/heads/master
2020-04-28T00:28:11.692739
2015-11-29T17:13:17
2015-11-29T17:13:17
24,114,794
1
0
null
null
null
null
UTF-8
Python
false
false
1,017
py
# -*- coding: utf-8 -*- """ Created on Mon Dec 29 06:00:54 2014 @author: kishor """ # Subset problem # Given a set find all the subset of the given set # e.g. {1,2,3} => {{1},{2}, {3}, {1,2}, {1,3} {2,3}, {1,2,3}, {}} # Solve the problem using backtracking def is_subset_solution(n, k): return n == k def generate_subset_candidates(n, k): # Kth element can be either present or not # present in the subset solution return [True, False] def process_subset_solution(a, data): values = list() for i in xrange(len(a)): if a[i]: values.append(data[i]) print(values) def subset_backtrack(a, n, k, data): #print(n, k) if is_subset_solution(n, k): process_subset_solution(a, data) else: k = k + 1 #print(k) #print(n) candidates = generate_subset_candidates(n, k) for c in candidates: a[k-1] = c subset_backtrack(a, n, k, data) subset_backtrack([False,False,False], 3, 0, [1,2,3])
[ "kishor.iitr@gmail.com" ]
kishor.iitr@gmail.com
3f88cdeb167581de419c97c7d69a29b54ad556fd
c0c84529d07551bd6cac4fce3bbb44bb51e25ff1
/CO1PG15.py
25dd00b91fdba7afe96ab964fb945cf9cc1f1e42
[]
no_license
Amalajoy/ProgrammingLab-Amala
1c73d282afe922be4eb5541a34b525643e55c7e5
9229a764b40a589a19b5ee7e17e49c3b9b201d0d
refs/heads/main
2023-03-13T02:58:36.138084
2021-02-17T18:34:36
2021-02-17T18:34:36
321,900,750
1
0
null
2021-01-11T07:26:15
2020-12-16T07:17:29
null
UTF-8
Python
false
false
158
py
color_list_1 = set(["White", "Violet", "Indigo","Blue"]) color_list_2 = set(["Blue", "Green","Yellow","White"]) print(color_list_1.difference(color_list_2))
[ "noreply@github.com" ]
Amalajoy.noreply@github.com
2f5f68b3f678d84bdc6d49307107c175f3c16b8f
4454e19d52e71e5fd1435e2a37dcfd074f944f83
/utils/ms_ssim.py
549732d887c62f8b861c54854093f878b64c3ca6
[]
no_license
Emr03/deepInfoMax
854d6c3289ae13b7aa1e8783a7a4db9f9946499f
b5c0182d71c88c1af872fcd78c51cffdbf10106e
refs/heads/master
2022-11-02T16:06:27.899770
2020-06-16T21:21:53
2020-06-16T21:21:53
248,629,053
1
0
null
null
null
null
UTF-8
Python
false
false
1,737
py
import torch import math def ms_ssim(X_a, X_b, window_size=11, size_average=True, C1=0.01**2, C2=0.03**2): """ Taken from Po-Hsun-Su/pytorch-ssim """ channel = X_a.size(1) def gaussian(sigma=1.5): gauss = torch.Tensor( [math.exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) return gauss / gauss.sum() def create_window(): _1D_window = gaussian(window_size).unsqueeze(1) _2D_window = _1D_window.mm( _1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = torch.Tensor( _2D_window.expand(channel, 1, window_size, window_size).contiguous()) return window.cuda() window = create_window() mu1 = torch.nn.functional.conv2d(X_a, window, padding=window_size // 2, groups=channel) mu2 = torch.nn.functional.conv2d(X_b, window, padding=window_size // 2, groups=channel) mu1_sq = mu1.pow(2) mu2_sq = mu2.pow(2) mu1_mu2 = mu1 * mu2 sigma1_sq = torch.nn.functional.conv2d( X_a * X_a, window, padding=window_size // 2, groups=channel) - mu1_sq sigma2_sq = torch.nn.functional.conv2d( X_b * X_b, window, padding=window_size // 2, groups=channel) - mu2_sq sigma12 = torch.nn.functional.conv2d( X_a * X_b, window, padding=window_size // 2, groups=channel) - mu1_mu2 ssim_map = (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))) if size_average: return ssim_map.mean() else: return ssim_map.mean(1).mean(1).mean(1)
[ "elsa.riachi@mail.mcgill.ca" ]
elsa.riachi@mail.mcgill.ca
1935bfa537c5f257092b4e5689d56e2394be68bb
a09c10c29478fed167c94d83d5dff9371f9a1680
/Client.py
ec5149235aa6288eed9ea16cd6590f770fc45567
[]
no_license
batra98/Distributed-Web-Cache
83e208689b18b95724dd0ba657b4ef89e9054d2a
7e08dfe4dd6739c779c59da3ab7301f3cb33af6a
refs/heads/master
2022-11-28T05:21:33.220922
2020-08-07T10:15:32
2020-08-07T10:15:32
285,793,260
2
0
null
2020-08-07T09:41:56
2020-08-07T09:41:56
null
UTF-8
Python
false
false
1,157
py
import socket import sys def send(ip, port, message): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip, port)) try: sock.sendall(bytes(message, 'ascii')) response = str(sock.recv(1024), 'ascii') print("Received: {}".format(response)) return response.split(None, 1) finally: sock.close() def get(ip, port, key): return send(ip, port, "get {0}".format(key)) def add(ip, port, key, data): return send(ip, port, "add {0} {1}".format(key, data)) def add_node(ip, port, key): return send(ip, port, "addnode {0}".format(key)) def rm_node(ip, port, key): return send(ip, port, "rmnode {0}".format(key)) def stats(ip, port): return send(ip, port, "stats") def performance(ip,port): return send(ip,port, "performance") def test_load_balancing(ip,port,num_node,num_data): return send(ip,port, "test {0} {1}".format(num_node,num_data)) def clean(ip,port): return send(ip,port,"clean") if __name__ == "__main__": ip, port = sys.argv[1], int(sys.argv[2]) while True: command = input("> ") send(ip, port, command)
[ "sarthak.singhal@students.iiit.ac.in" ]
sarthak.singhal@students.iiit.ac.in
69fa3adebed46d0c5a9509edfcf765554631cdb7
a16d3c43e455298c371ff853e14acd8eea1db8fd
/test/test_seq_util.py
fe03b10b9322e58fb0cd788fb1afa0724283ba64
[]
no_license
TyloRoberts/fosvis
a17eebe9dc29a589ac38c902bf1b06841b0a9e53
992535b2799dc04334b7be97f81b64e986dcd0cf
refs/heads/master
2023-02-19T00:38:36.931003
2021-01-21T22:19:05
2021-01-21T22:19:05
316,780,812
2
0
null
null
null
null
UTF-8
Python
false
false
5,596
py
from fosvis import seq_util import unittest from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord import filecmp import os import pandas as pd from pandas.testing import assert_frame_equal import difflib # Done class test_remove_too_small_contigs(unittest.TestCase): def test_remove_too_small_contigs(self): input_file = 'test/data_for_testing/Fosmid_Size_Selection_Tests/test_remove_too_small_contigs_input.fasta' result = seq_util.remove_too_small_contigs(input_file, 100) seq3_len_100 = SeqRecord( Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"), id="seq3_len_100", name="seq3_len_100", description="seq3_len_100") seq4_len_101 = SeqRecord( Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"), id="seq4_len_101", name="seq4_len_101", description="seq4_len_101") seq5_len_200 = SeqRecord( Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"), id="seq5_len_200", name="seq5_len_200", description="seq5_len_200") expected_result = [seq3_len_100, seq4_len_101, seq5_len_200] self.assertEqual(len(result), len(expected_result)) self.assertTrue(result[0].seq == expected_result[0].seq) self.assertTrue(result[1].seq == expected_result[1].seq) self.assertTrue(result[2].seq == expected_result[2].seq) self.assertTrue(result[0].id == expected_result[0].id) self.assertTrue(result[1].id == expected_result[1].id) self.assertTrue(result[2].id == expected_result[2].id) # Done # class test_write_seqs_to_file(unittest.TestCase): # # def test_write_seqs_to_file(self): # file_to_write = 'test/data_for_testing/Fosmid_Size_Selection_Tests/write_seqs_to_file_actual_output.fasta' # seq1 = SeqRecord( # Seq("AAGGTTCC"), # id="seq1", # name="seq1", # description="seq1") # seq2 = SeqRecord( # Seq("GGAACCTT"), # id="seq2", # name="seq2", # description="seq2") # seqs_to_write = [seq1,seq2] # # seq_util.write_seqs_to_file(seqs_to_write, file_to_write) # # expected_output_file = 'test/data_for_testing/Fosmid_Size_Selection_Tests/test_write_seqs_to_file_expected_output.fasta' # # self.assertTrue(filecmp.cmp(expected_output_file, file_to_write, shallow=False)) # # os.remove(file_to_write) # Done class test_get_karyotype_data(unittest.TestCase): def test_get_karyotype_data(self): seq1_len_8 = SeqRecord( Seq("AAGGTTCC"), id="seq1_len_8", name="seq1_len_8", description="seq1_len_8") seq2_len_18 = SeqRecord( Seq("GGAACCTTGGAACCTT"), id="seq1_len_8", name="seq1_len_8", description="seq1_len_8") seqs = [seq1_len_8,seq2_len_18] result = seq_util.get_karyotype_data(seqs) expected_result_data = {'chr_prefix':['chr', 'chr'], '-prefix':['-', '-'], 'variable_name':['seq1_len_8', 'seq1_len_8'], 'diagram_label':['1', '2'], 'start':[1, 1], 'end':[9, 17], 'color':['rgb(120,120,120,0.4)', 'rgb(120,120,120,0.4)']} expected_result_df = pd.DataFrame(expected_result_data) self.assertEqual(assert_frame_equal(result, expected_result_df, check_dtype=False), None) # Done class test_gc_interval(unittest.TestCase): def test_gc_interval(self): interval_10_input_file = 'test/data_for_testing/gc_interval_tests/gc_content_test_interval_10_input.fasta' interval_3_input_file = 'test/data_for_testing/gc_interval_tests/gc_content_test_interval_3_input.fasta' interval_10_result = seq_util.gc_interval(interval_10_input_file, 10) interval_3_result = seq_util.gc_interval(interval_3_input_file, 3) expected_interval_10_result = {'contig':['interval_10_mix', 'interval_10_mix', 'interval_10_mix', 'interval_10_mix', 'interval_10_all_GC', 'interval_10_all_GC', 'interval_10_not_divis_by_10'], 'interval_start':[1, 11, 21, 31, 1, 11, 1], 'interval_end':[11,21,31,41, 11, 21, 5], 'gc_content':[100,50,50,0,100,100,50]} expected_interval_10_result_df = pd.DataFrame(expected_interval_10_result) expected_interval_3_result = {'contig':['interval_3_mix', 'interval_3_mix', 'interval_3_mix', 'interval_3_mix', 'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3'], 'interval_start':[1,4,7,10, 1, 4, 7, 10], 'interval_end':[4, 7, 10, 13, 4, 7, 10, 11], 'gc_content':[((2/3)*100), 0, 100, (1/3)*100, 100,100,0,100]} expected_interval_3_result_df = pd.DataFrame(expected_interval_3_result) self.assertEqual(assert_frame_equal(interval_10_result, expected_interval_10_result_df, check_dtype=False), None) self.assertEqual(assert_frame_equal(interval_3_result, expected_interval_3_result_df, check_dtype=False), None) if __name__ == "__main__": unittest.main()
[ "troberts@shamwow.microbiology.ubc.ca" ]
troberts@shamwow.microbiology.ubc.ca
0fbd545a8ceab1cdd8f34f23bbb35f2cfe068d26
c08ffbe1a032611b32c865391b3a1043101f3182
/DigitalJournal.py
6414e25bb1ea873d6830a40bb5525b26985ee9f8
[]
no_license
bseibo61/LastfmLocation
3d35e5f2c84b788713818f577e266f4a486a7b03
612ae59231dad20b104cd4c1f4f5335d0767a717
refs/heads/master
2022-08-04T14:13:36.083976
2022-07-31T22:30:43
2022-07-31T22:30:43
231,705,835
0
0
null
null
null
null
UTF-8
Python
false
false
3,431
py
import pandas as pd import glob, os, json import datetime import bisect import itertools import xml.etree.ElementTree as et location_path = r'Data\GoogleData1-1-2020\Location History\Semantic Location History\2019' smsPath = r'smsData' all_files = glob.glob(os.path.join('', "*.json")) def findkeys(node, kv): if isinstance(node, list): for i in node: for x in findkeys(i, kv): yield x elif isinstance(node, dict): if kv in node: yield node[kv] for j in node.values(): for x in findkeys(j, kv): yield x # print intresting bits of full_json def printFinalJson(json): for i in json: match i['name']: case 'text': print("{0}: {1}".format(i['contactName'], i['body'])) case 'activitySegment': print(i['activityType']) case 'placeVisit': print(i['placeName']) case _: raise Exception("Trying to print unrecognized name") # Read location json full_location_json = [] year_list = ['2020'] for year in year_list: # for f in glob.glob(os.path.join(r'Data\GoogleData1-1-2020\Location History\Semantic Location History\{0}'.format(year), "*.json")): for f in glob.glob(os.path.join(location_path, "*.json")): with open(f, encoding="utf8") as i: full_location_json.append(json.load(i)) # Read lastFM csv music_df = pd.read_csv(r'Data/lastfm.csv', names=['artist', 'album', 'song', 'date']) # Read SMS xml sms_xml = open('Data/SmsDataTest.xml', 'r', encoding="utf8").read() root = et.XML(sms_xml) sms_json = [] for child in root: if child.tag == 'sms': sms_json.append({ 'name':'text', 'body': child.attrib['body'], 'startTime': child.attrib['date'], 'type': child.attrib['type'], 'contactName': child.attrib['contact_name'] }) # Convert lastfm time string to timestamp like google has music_df['timestamp'] = music_df['date'].apply(lambda date: datetime.datetime.timestamp(datetime.datetime.strptime(date, '%d %b %Y %H:%M'))) temp_location_json = [] for month in full_location_json: for activity in month['timelineObjects']: segmentName = next(iter(activity)) startTime = list(findkeys(activity, 'startTimestampMs'))[0] activityType = '' placeAddress = '' placeName = '' # get activity type if(segmentName == 'activitySegment'): activityType = activity['activitySegment']['activityType'] if(segmentName == 'placeVisit'): placeName = activity['placeVisit']['location']['name'] # need .get beacuse some places don't have addresses placeAddress = activity['placeVisit']['location'].get('address') temp_location_json.append( { 'name': segmentName, 'activityType': activityType, 'startTime': startTime, 'placeName': placeName, 'placeAddress': placeAddress }) full_location_json = temp_location_json full_json = full_location_json + sms_json # Sort months chronologically full_json = sorted(full_json, key = lambda i: i['startTime'] ) # TODO add in markers for each new day, get lastfm songs in full_json, look into making a webpage to display everything # more text analytics? like most used words with people, texting frequency with people ect printFinalJson(full_json)
[ "brseibol@buffalo.edu" ]
brseibol@buffalo.edu
aa5eece40af1223be935846770e0a2e01e7a3ec4
732a0dcf738d220b320369cdcb7bd49368fde3fe
/PythonWebServerTemplate/src/controller/index.py
7d942fc7e2404a9a0b9c534b381a9c8f990bbe46
[]
no_license
RogerLai/Tools
03c50dfecf3f730210aa0e89a63ba5e34f686f4a
1e679ea4fa07cfe618d01037f171ad6bfcfc8ae0
refs/heads/master
2021-01-10T15:29:19.311679
2015-12-23T15:15:48
2015-12-23T15:15:48
44,651,691
0
0
null
null
null
null
UTF-8
Python
false
false
816
py
#!/usr/bin/env Python #coding=utf-8 ''' Created on Apr 13, 2015 @author: rogerlai ''' import tornado.web from common import process from common.config import TEMPLATE_PATH, STATIC_HOST, WEB_SERVER_ADDR loader = tornado.web.template.Loader(TEMPLATE_PATH) class WebGetIndexHandler(tornado.web.RequestHandler): @staticmethod def get_handler(self): param_dict = {} param_dict['title'] = u'随机分组' param_dict['static_host'] = STATIC_HOST param_dict['web_server'] = WEB_SERVER_ADDR response = loader.load("index.html").generate(params = param_dict) return response def get(self): self.write(process.process_request(self.request, lambda: WebGetIndexHandler.get_handler(self), 'html'))
[ "laixingrong@egeio.com" ]
laixingrong@egeio.com
5a405a2cc937b4a389075317ae656d5cd0d8aaeb
02bf59df060b4a680d4c4cdbd6c8780952f02d48
/GetReferralCodeByDeviceId.py
26f93e4e59278934ac76108f332d96691d81f001
[]
no_license
EdwardWuYiHsuan/Referral-Code
c874b020899cf356214b436d6948cd532a9d446e
88fb041b32eb9f494094dc1469aae2aa9f36e26f
refs/heads/main
2023-06-09T19:10:32.555450
2021-07-01T09:23:18
2021-07-01T09:23:18
370,632,238
0
0
null
null
null
null
UTF-8
Python
false
false
936
py
import datetime import redis endpoint = "elasticache-savedeviceidandreferralcode.akto78.ng.0001.apne1.cache.amazonaws.com"; def lambda_handler(event, context): print("[Info] Request : {}".format(event)); timestamp = datetime.datetime.utcnow().isoformat() + 'Z'; deviceId = event.get("device_id"); if deviceId is None: return { "code" : "0001", "desc" : "Invalid 'device_id'", "timestamp" : timestamp } try: redisClient = redis.StrictRedis(host=endpoint, port=6379, db=0, socket_timeout=1); referralCode = redisClient.get(deviceId); if referralCode is None: return { "code" : "0004", "desc" : "Referral code not found", "timestamp" : timestamp } except: return { "code" : "0005", "desc" : "Failed to connect to redis", "timestamp" : timestamp } return { "code" : "0", "desc" : "success", "timestamp" : timestamp, "data" : { "referral_code" : referralCode } }
[ "edwardwu@xrex.io" ]
edwardwu@xrex.io
bf91de8bc79c94d76bf93ea0cc534b567dc2e161
4d9bd7874fc5a4f2ec56bb172f4e93a9601c4c83
/main.py
4864dd4bbecc043b09c96f4fb427a06e03a0c031
[]
no_license
liziniu/Model-Uncertainty-in-Neural-Networks
ff65009b3c165c4fd82efb9759cb26d41f914a2e
67c6042c52dd7e7a918ab42d34764bbb9a88c8a2
refs/heads/master
2020-05-04T00:26:47.315086
2019-04-06T03:19:47
2019-04-06T03:19:47
178,884,785
0
0
null
null
null
null
UTF-8
Python
false
false
1,506
py
from model1.default import get_config from model1.model import Model from utli import load_data, get_session, update_para import argparse def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--model", type=int, default=1) parser.add_argument("--epochs", type=int, default=100) parser.add_argument("--batch_size", type=int, default=128) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--num_units", type=int, default=100) parser.add_argument("--pi", type=float, default=0.25) parser.add_argument("--mu1", type=float, default=0.0) parser.add_argument("--std1", type=float, default=0.5) parser.add_argument("--mu2", type=float, default=0.0) parser.add_argument("--std2", type=float, default=1.5) parser.add_argument("--train", action="store_true", default=False) parser.add_argument("--load_path", type=str, default="logs/model1/") return parser.parse_args() def main(args): sess = get_session() default_para = get_config() para = update_para(default_para, args) model = Model(sess, para) x_train, x_test, y_train, y_test = load_data() x_train_ = x_train[:-5000] y_train_ = y_train[:-5000] x_valid = x_train[-5000:] y_valid = y_train[-5000:] if args.train: model.train(x_train_, y_train_, x_valid, y_valid) else: model.load(args.load_path) model.test(x_test, y_test) if __name__ == "__main__": args = arg_parse() main(args)
[ "374387855@qq.com" ]
374387855@qq.com
1125e6f6ae45ed4a3a5edb239f269df89b29130e
7937031274c8ebebd6f8391af245216421338cc7
/myenv/bin/easy_install-3.8
dd98319436e38a41ec73653bac068144f5feac69
[]
no_license
ISLAMTU/Flappy_bird
fa621bfcf523ab7b255d416e9eb049abd448bdc5
37caacf3cbdab084ab5a65727abb46b802fd47c8
refs/heads/master
2023-03-10T07:22:43.186020
2021-02-16T21:25:39
2021-02-16T21:25:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
265
8
#!/home/mohamed/Desktop/bird_game/myenv/bin/python # -*- coding: utf-8 -*- import re import sys from setuptools.command.easy_install import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "mohamedhisham694@gmail.com" ]
mohamedhisham694@gmail.com
f80411d77a0e6127a5f80503c618a43932da409a
29c39568880658d341ebc61202253a9a242327a8
/sku/views.py
169a8e90bf60b0bc666e1a53c19185a4d1b4457c
[]
no_license
thomaslzb/warehouse
e524937203452ddee0761ed296c6fd6a1605b3ec
0dabda06e5898e0b82e63641f6055d229d83d4e8
refs/heads/main
2023-04-30T12:06:30.488162
2021-05-20T07:09:28
2021-05-20T07:09:28
287,015,619
0
0
null
null
null
null
UTF-8
Python
false
false
17,003
py
import datetime import math import xlrd from django.core.files.storage import FileSystemStorage from django.http import HttpResponseRedirect from django.shortcuts import render, reverse from django.views import View from django.views.generic.detail import DetailView from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.views.generic.list import ListView from django.db import transaction from menu.views import get_user_grant_list from quote.models import EuroCountry from quote.public_func import parcel from .forms import SkuUKForm, SkuEuroForm, SkuForm from .models import Sku, SkuFileUpload MY_MENU_LOCAL = 'MY_SKU' def valid_file(req): error = '' if not len(req.FILES): # 判断是否有选择文件 error = 'Must selected a file to upload.' try: uploaded_file = req.FILES['document'] # 通过文件的后缀名,判断选择的文件是否是excel文件 if not uploaded_file.name.split('.')[-1].upper() in ['XLS', 'XLSX']: error = 'Only excel file can be uploaded.' # 判断选择的文件是否大于5M 1M = bytes/1000000 if uploaded_file.size / 1000000 > 5: error = 'File size = ' + format(uploaded_file.size / 1000000, "4.2") + 'M. File size can not more than 5M.' except: error = 'Must selected a file to upload.' return error def valid_excel_data(excel_table): error = False n_rows = excel_table.nrows # 行数 for i in range(1, n_rows): rowValues = excel_table.row_values(i) try: if float(rowValues[2]) <= 0: error = True if float(rowValues[3]) <= 0: error = True if float(rowValues[4]) <= 0: error = True if float(rowValues[5]) <= 0: error = True except: error = True if error: break return error class SkuCreateView(CreateView): model = Sku form_class = SkuForm template_name = 'sku_create.html' success_url = '/sku/sku-list' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。 response = super().form_invalid(form) return response class SkuSaveAndAnotherView(SkuCreateView): success_url = '/sku/add' class SkuUpdateView(UpdateView): model = Sku form_class = SkuForm template_name = 'sku_edit.html' success_url = '/sku/sku-list' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。 response = super().form_invalid(form) return response class SkuListView(ListView): model = Sku template_name = 'sku_list.html' paginate_by = 10 def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context def get_queryset(self): query_status = self.request.GET.get('status') query_sku = self.request.GET.get('s_sku') query_product = self.request.GET.get('s_product') if query_status or query_sku or query_product: if query_status == '': return Sku.objects.filter(sku_no__icontains=query_sku, sku_name__icontains=query_product, custom_id=self.request.user.id, ) else: return Sku.objects.filter(is_ok__exact=query_status, sku_no__icontains=query_sku, sku_name__icontains=query_product, custom_id=self.request.user.id, ) else: return Sku.objects.filter(custom_id=self.request.user.id) class SkuUKDetail(DetailView): model = Sku template_name = 'sku_detail_uk.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context class SkuEuroDetail(DetailView): model = Sku template_name = 'sku_detail_euro.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) all_euro_queryset = EuroCountry.objects.all().order_by('country') context['all_euro'] = all_euro_queryset context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context class SkuQuoteUK(View): def post(self, request, slug): sku_uk_form = SkuUKForm(request.POST) sku_queryset = Sku.objects.filter(id__exact=slug) if sku_uk_form.is_valid(): length = int(math.ceil(sku_queryset[0].sku_length)) width = int(math.ceil(sku_queryset[0].sku_width)) high = int(math.ceil(sku_queryset[0].sku_high)) # 确定长,宽,高的正确顺序 length > width > high list_sort = [length, width, high] list_sort.sort() high = list_sort[0] width = list_sort[1] length = list_sort[2] is_uk = True weight = math.ceil(math.ceil(sku_queryset[0].sku_weight)) qty = int(request.POST.get("qty", 0)) postcode = request.POST.get("postcode", "").upper() address_type = request.POST.get("addresstype", "").upper() user_id = request.user.id company_code = 'HERM' l_hermes = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk) company_code = 'PASC' l_pacelforce = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk) company_code = 'DHL' l_dhl = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk) company_code = 'DPD' l_dpd = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk) company_code = 'UPS' l_ups = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk) if (not l_hermes[10]) and (not l_pacelforce[10]) and (not l_dhl[10]) and (not l_dpd[10]) and ( not l_ups[10]): return render(request, 'quote_error.html', {'go': 'UK', 'length': length, 'width': width, 'high': high, 'weight': weight, 'qty': qty, 'postcode': postcode, 'address_type': address_type, "quote_uk_form": sku_uk_form, 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'sku_no': sku_queryset[0].sku_no, 'sku_name': sku_queryset[0].sku_name, }) l_hermes = l_hermes[:-1] l_pacelforce = l_pacelforce[:-1] l_dhl = l_dhl[:-1] l_dpd = l_dpd[:-1] l_ups = l_ups[:-1] return render(request, 'list_price.html', { 'hermes': l_hermes, 'parcelforce': l_pacelforce, 'dhl': l_dhl, 'dpd': l_dpd, 'ups': l_ups, 'length': length, 'width': width, 'high': high, 'weight': weight, 'qty': qty, 'postcode': postcode, 'address_type': address_type, 'is_uk': is_uk, 'now': datetime.datetime.now(), 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'sku_no': sku_queryset[0].sku_no, 'sku_name': sku_queryset[0].sku_name, }) return render(request, "sku_detail_uk.html", { 'sku_uk_form': sku_uk_form, 'object': sku_queryset[0], 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), }) class SkuQuoteEURO(View): def post(self, request, slug): all_euro = EuroCountry.objects.all().filter(belong='EURO') sku_euro_form = SkuEuroForm(request.POST) sku_queryset = Sku.objects.filter(id__exact=slug) if sku_euro_form.is_valid(): length = int(math.ceil(sku_queryset[0].sku_length)) width = int(math.ceil(sku_queryset[0].sku_width)) high = int(math.ceil(sku_queryset[0].sku_high)) # 确定长,宽,高的正确顺序 length > width > high list_sort = [length, width, high] list_sort.sort() high = list_sort[0] width = list_sort[1] length = list_sort[2] is_uk = False weight = math.ceil(math.ceil(sku_queryset[0].sku_weight)) qty = int(request.POST.get("qty", 0)) postcode = request.POST.get("euro", "") address_type = request.POST.get("addresstype", "").upper() user_id = request.user.id company_code = 'HERM' l_hermes = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, ) company_code = 'PASC' l_pacelforce = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, ) company_code = 'DHL' l_dhl = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, ) company_code = 'DPD' l_dpd = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, ) company_code = 'UPS' l_ups = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, ) if (not l_hermes[10]) and (not l_pacelforce[10]) \ and (not l_dhl[10]) and (not l_dpd[10]) and (not l_ups[10]): return render(request, 'quote_error.html', {'go': 'EURO', 'length': length, 'width': width, 'high': high, 'weight': weight, 'qty': qty, 'postcode': postcode, 'address_type': address_type, "quote_uk_form": sku_euro_form, 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'sku_no': sku_queryset[0].sku_no, 'sku_name': sku_queryset[0].sku_name, }) l_hermes = l_hermes[:-1] l_pacelforce = l_pacelforce[:-1] l_dhl = l_dhl[:-1] l_dpd = l_dpd[:-1] l_ups = l_ups[:-1] return render(request, 'list_price.html', { 'hermes': l_hermes, 'parcelforce': l_pacelforce, 'dhl': l_dhl, 'dpd': l_dpd, 'ups': l_ups, 'length': length, 'width': width, 'high': high, 'weight': weight, 'qty': qty, 'postcode': postcode, 'address_type': address_type, 'is_uk': is_uk, 'now': datetime.datetime.now(), 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'sku_no': sku_queryset[0].sku_no, 'sku_name': sku_queryset[0].sku_name, }) return render(request, "sku_detail_euro.html", { 'sku_uk_form': sku_euro_form, 'object': sku_queryset[0], 'all_euro': all_euro, 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), }) class SkuDeleteView(DeleteView): model = Sku template_name = "sku_confirm_delete.html" def get_object(self, queryset=None): """ Hook to ensure object is owned by request.user. """ obj = super(SkuDeleteView, self).get_object() # if not obj.op_user == self.request.user.id: # raise Http404 return obj def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context def get_success_url(self): return reverse('sku:sku-list') class SkuFileUploadView(View): def get(self, request): return render(request, 'sku_upload.html', { 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), }) def post(self, request): error = valid_file(request) if error: return render(request, 'sku_upload.html', { 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'error': error, }) uploaded_file = request.FILES['document'] excel_data = xlrd.open_workbook(filename=None, file_contents=uploaded_file.read()) table = excel_data.sheet_by_index(0) n_rows = table.nrows # 行数 if valid_excel_data(table): error = 'Uploading Failure. length/width/high/weight must be more than zero. ' \ 'There are some error in the uploading File - ' + \ uploaded_file.name + '. ' return render(request, 'sku_upload.html', { 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'error': error, }) try: with transaction.atomic(): for i in range(1, n_rows): rowValues = table.row_values(i) Sku.objects.create(sku_no=rowValues[0], sku_name=rowValues[1], sku_length=rowValues[2], sku_width=rowValues[3], sku_high=rowValues[4], sku_weight=rowValues[5], is_ok='1', custom_id=request.user.id ) except Exception as e: error = 'Sku No can no be duplication. There are some error in the uploading Files - ' + \ uploaded_file.name + '. ' return render(request, 'sku_upload.html', { 'menu_active': MY_MENU_LOCAL, 'menu_grant': get_user_grant_list(request.user.id), 'error': error, }) return HttpResponseRedirect(reverse('sku:sku-list')) class UserListView(ListView): model = Sku template_name = 'sku_list.html' paginate_by = 10 def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['menu_active'] = MY_MENU_LOCAL context['menu_grant'] = get_user_grant_list(self.request.user.id) return context
[ "thomaslzbuk@gmail.com" ]
thomaslzbuk@gmail.com
b6c2f9db34a01f882c5076597ae485bcdb696cf9
b597aca9f8ed351075ab6609a654ecf4d6f3d07e
/calc_pppkes_ind_stats.py
4511c06677c688b277f1d104ecb8c3f01720a3d8
[]
no_license
mcbarlowe/daily_scrape
e44b6d55304c63f2fa70a946cc5de49e9dcf56ed
209f415179b40b1252a0ebc0eea045e3def34c30
refs/heads/master
2022-01-08T18:17:34.765264
2018-12-20T03:40:03
2018-12-20T03:40:03
185,601,600
7
2
null
null
null
null
UTF-8
Python
false
false
50,181
py
''' This script calculates individual stats for both home and away team given the strength state passed to the functions. It works for all strength states except for all situations. Harry Shomers skater totals include the goalie so if you wanted 5v5 you would actually pass 6 for each skaters. In cases where the strength state is not even the first number passed will be the first number in the strength state i.e. 5 and 6 would be equivalent to 4v5 and 6 and 5 is 5v4 etc. ''' import pandas as pd import numpy as np import calc_all_sits_ind_stats as es_metrics def calc_adj_ind_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num): ''' function to calculate individual shot metrics and return a data frame with them Inputs: pbp_df - play by play dataframe Ouputs: ind_shots_df - df with calculated iSF, iCF, iFF need to add ixG to this later once xg model is finished ''' corsi = ['SHOT', 'BLOCK', 'MISS', 'GOAL'] fenwick = ['SHOT', 'MISS', 'GOAL'] shot = ['SHOT', 'GOAL'] home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_corsi = home_5v4_df[(home_5v4_df.event.isin(corsi)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_corsi'].sum().reset_index() home_fenwick = home_5v4_df[(home_5v4_df.event.isin(fenwick)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_fenwick'].sum().reset_index() home_xg = home_5v4_df[(home_5v4_df.event.isin(fenwick)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_xg'].sum().reset_index() home_shot = home_5v4_df[(home_5v4_df.event.isin(corsi)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_shot'].sum().reset_index() home_corsi.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF'] home_fenwick.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iFF'] home_shot.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iSF'] home_xg.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'ixg'] home_metrics_df = home_corsi.merge(home_fenwick, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.merge(home_shot, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.merge(home_xg, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.fillna(0) away_corsi = away_5v4_df[(away_5v4_df.event.isin(corsi)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_corsi'].sum().reset_index() away_fenwick = away_5v4_df[(away_5v4_df.event.isin(fenwick)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_fenwick'].sum().reset_index() away_xg = away_5v4_df[(away_5v4_df.event.isin(fenwick)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['adj_xg'].sum().reset_index() away_shot = away_5v4_df[(away_5v4_df.event.isin(corsi)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_shot'].sum().reset_index() away_corsi.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF'] away_fenwick.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iFF'] away_shot.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iSF'] away_xg.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'ixg'] away_metrics_df = away_corsi.merge(away_fenwick, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.merge(away_shot, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.merge(away_xg, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.fillna(0) metrics_df = pd.concat([away_metrics_df, home_metrics_df], sort=False) metrics_df.loc[:, ('player_id', 'game_id','iCF', 'iFF', 'iSF')] = \ metrics_df.loc[:, ('player_id', 'game_id', 'iCF', 'iFF', 'iSF')].astype(int) return metrics_df def calc_ind_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num): ''' function to calculate individual shot metrics and return a data frame with them Inputs: pbp_df - play by play dataframe Ouputs: ind_shots_df - df with calculated iSF, iCF, iFF need to add ixG to this later once xg model is finished ''' corsi = ['SHOT', 'BLOCK', 'MISS', 'GOAL'] fenwick = ['SHOT', 'MISS', 'GOAL'] shot = ['SHOT', 'GOAL'] home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_corsi = home_5v4_df[(home_5v4_df.event.isin(corsi)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_corsi'].sum().reset_index() home_fenwick = home_5v4_df[(home_5v4_df.event.isin(fenwick)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_fenwick'].sum().reset_index() home_xg = home_5v4_df[(home_5v4_df.event.isin(fenwick)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['xg'].sum().reset_index() home_shot = home_5v4_df[(home_5v4_df.event.isin(corsi)) & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_shot'].sum().reset_index() home_corsi.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF'] home_fenwick.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iFF'] home_shot.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iSF'] home_xg.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'ixg'] home_metrics_df = home_corsi.merge(home_fenwick, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.merge(home_shot, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.merge(home_xg, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_metrics_df = home_metrics_df.fillna(0) away_corsi = away_5v4_df[(away_5v4_df.event.isin(corsi)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_corsi'].sum().reset_index() away_fenwick = away_5v4_df[(away_5v4_df.event.isin(fenwick)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_fenwick'].sum().reset_index() away_xg = away_5v4_df[(away_5v4_df.event.isin(fenwick)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['xg'].sum().reset_index() away_shot = away_5v4_df[(away_5v4_df.event.isin(corsi)) & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_shot'].sum().reset_index() away_corsi.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF'] away_fenwick.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iFF'] away_shot.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iSF'] away_xg.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'ixg'] away_metrics_df = away_corsi.merge(away_fenwick, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.merge(away_shot, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.merge(away_xg, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_metrics_df = away_metrics_df.fillna(0) metrics_df = pd.concat([away_metrics_df, home_metrics_df], sort=False) metrics_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF')] = \ metrics_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF')].astype(int) return metrics_df def calc_ind_hits(pbp_df, pp_skaters_num, pk_skaters_num): ''' function calculates hits for and against from the pbp_df. Input: pbp_df - play by play dataframe Output: hit_df - dataframe of each players hits stats ''' home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_hit_for = home_5v4_df[(home_5v4_df.event == 'HIT') & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() home_hit_for.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iHF'] home_hit_against = home_5v4_df[(home_5v4_df.event == 'HIT') & ((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() home_hit_against.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iHA'] home_hit_df = home_hit_for.merge(home_hit_against, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_hit_df = home_hit_df.fillna(0) away_hit_for = away_5v4_df[(away_5v4_df.event == 'HIT') & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() away_hit_for.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iHF'] away_hit_against = away_5v4_df[(away_5v4_df.event == 'HIT') & ((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() away_hit_against.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iHA'] away_hit_df = away_hit_for.merge(away_hit_against, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_hit_df = away_hit_df.fillna(0) hit_df_list = [home_hit_df, away_hit_df] hit_df = pd.concat(hit_df_list, sort=False).reset_index() hit_df = hit_df[['season', 'game_id', 'date', 'player_id', 'player_name', 'iHF', 'iHA']] hit_df.loc[:, ('season', 'game_id', 'player_id', 'iHF', 'iHA')] = \ hit_df.loc[:, ('season', 'game_id', 'player_id', 'iHF', 'iHA')].astype(int) return hit_df def calc_pp_gata(pbp_df, pp_skaters_num, pk_skaters_num): ''' function calculates giveaways and takeaways from the pbp_df. Input: pbp_df - play by play dataframe Output: hit_df - dataframe of each players GA/TA stats ''' home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_ga = home_5v4_df[(home_5v4_df.event == 'GIVE') & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() home_ga.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iGA'] home_ta = home_5v4_df[(home_5v4_df.event == 'TAKE') & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() home_ta.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iTA'] home_gata = home_ga.merge(home_ta, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') home_gata = home_gata.fillna(0) away_ga = away_5v4_df[(away_5v4_df.event == 'GIVE') & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() away_ga.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iGA'] away_ta = away_5v4_df[(away_5v4_df.event == 'TAKE') & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() away_ta.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iTA'] away_gata = away_ga.merge(away_ta, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_gata = away_gata.fillna(0) gata = [home_gata, away_gata] gata_df = pd.concat(gata, sort=False) gata_df.loc[:, ('season', 'game_id', 'player_id', 'iGA', 'iTA')] = \ gata_df.loc[:, ('season', 'game_id', 'player_id', 'iGA', 'iTA')].astype(int) gata_df = gata_df[['season', 'game_id', 'date', 'player_id', 'player_name', 'iGA', 'iTA']] return gata_df def calc_pp_blocks(pbp_df, pp_skaters_num, pk_skaters_num): ''' function to calculate a players blocks while on the pp Inputs: pbp_df - dataframe of play by play data Outputs: blk_df - dataframe of blocks by players on the power play ''' home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_blk_df = home_5v4_df[(home_5v4_df.event == 'BLOCK') & ((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() away_blk_df = away_5v4_df[(away_5v4_df.event == 'BLOCK') & ((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() blk_list = [home_blk_df, away_blk_df] blk_df = pd.concat(blk_list, sort=False) blk_df.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'BLK'] blk_df.loc[:, ('season', 'game_id', 'player_id', 'BLK')] = \ blk_df.loc[:, ('season', 'game_id', 'player_id', 'BLK')].astype(int) return blk_df def calc_pp_faceoffs(pbp_df, pp_skaters_num, pk_skaters_num): ''' calculate the faceoffs won and lost by a player whose team is on the power player Inputs: pbp_df - play by play dataframe pp_skaters_num - number of skaters for team on the power play pk_skaters_num - number of skaters for team on the penalty kill Outputs fo_df - dataframe of FOW and FOL for teams on the PP ''' home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.away_goalie.isna())] home_fo_won = home_5v4_df[(home_5v4_df.event == 'FAC') & ((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() home_fo_won.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'FOW'] home_fo_lost = home_5v4_df[(home_5v4_df.event == 'FAC') & ((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) | (home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() home_fo_lost.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'FOL'] home_5v4_fo_df = home_fo_won.merge(home_fo_lost, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_fo_won = away_5v4_df[(away_5v4_df.event == 'FAC') & ((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name']).size().\ reset_index() away_fo_won.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'FOW'] away_fo_lost = away_5v4_df[(away_5v4_df.event == 'FAC') & ((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) | (away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name']).size().\ reset_index() away_fo_lost.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'FOL'] away_5v4_fo_df = away_fo_won.merge(away_fo_lost, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') fo_dfs = [home_5v4_fo_df, away_5v4_fo_df] fo_5v4 = pd.concat(fo_dfs, sort=False) fo_5v4 = fo_5v4.fillna(0) fo_5v4 = fo_5v4[['season', 'game_id', 'date', 'player_id', 'player_name', 'FOW', 'FOL']] fo_5v4.loc[:, ('season', 'game_id', 'player_id', 'FOW', 'FOL')] = \ fo_5v4.loc[:, ('season', 'game_id', 'player_id', 'FOW', 'FOL')].astype(int) return fo_5v4 def calc_pp_ind_points(pbp_df, pp_skaters_num, pk_skaters_num): ''' This function calculates the individual goals and assists scored while at a strength state of 5v4 Input: pbp_df - play by play dataframe Output: five_v_4_df - play by play dataframe of events taken at 5v4 strength ''' home_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.home_team) & (pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (~pbp_df.home_goalie.isna())] away_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.away_team) & (pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (~pbp_df.home_goalie.isna())] home_pp_points = es_metrics.calc_ind_points(home_pp_df) away_pp_points = es_metrics.calc_ind_points(away_pp_df) pts_pp = [home_pp_points, away_pp_points] pts_pp_df = pd.concat(pts_pp, sort=False) pts_pp_df = pts_pp_df[['season', 'game_id', 'date', 'player_id', 'player_name', 'g', 'a1', 'a2']] pts_pp_df.loc[:, ('season', 'game_id')] = pts_pp_df.loc[:, ('season', 'game_id')].astype(int) return pts_pp_df def calc_pp_penalties(pbp_df, pp_skaters_num, pk_skaters_num): ''' function to calculate penalties drawn and taken for teams on the ''' home_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.home_team) & (pbp_df.home_players == pp_skaters_num) & (pbp_df.away_players == pk_skaters_num) & (pbp_df.is_penalty > 0) & (~pbp_df.home_goalie.isna())] away_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.away_team) & (pbp_df.home_players == pk_skaters_num) & (pbp_df.away_players == pp_skaters_num) & (pbp_df.is_penalty > 0) & (~pbp_df.home_goalie.isna())] home_pent = home_pp_df[(home_pp_df.event == 'PENL') & ((home_pp_df.p1_id == home_pp_df.homeplayer1_id) | (home_pp_df.p1_id == home_pp_df.homeplayer2_id) | (home_pp_df.p1_id == home_pp_df.homeplayer3_id) | (home_pp_df.p1_id == home_pp_df.homeplayer4_id) | (home_pp_df.p1_id == home_pp_df.homeplayer5_id) | (home_pp_df.p1_id == home_pp_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_penalty'].sum().\ reset_index() home_pent.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iPENT'] home_pend = home_pp_df[(home_pp_df.event == 'PENL') & ((home_pp_df.p2_id == home_pp_df.homeplayer1_id) | (home_pp_df.p2_id == home_pp_df.homeplayer2_id) | (home_pp_df.p2_id == home_pp_df.homeplayer3_id) | (home_pp_df.p2_id == home_pp_df.homeplayer4_id) | (home_pp_df.p2_id == home_pp_df.homeplayer5_id) | (home_pp_df.p2_id == home_pp_df.homeplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name'])['is_penalty'].sum().\ reset_index() home_pend.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iPEND'] home_pp_penl = home_pent.merge(home_pend, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') away_pent = away_pp_df[(away_pp_df.event == 'PENL') & ((away_pp_df.p1_id == away_pp_df.awayplayer1_id) | (away_pp_df.p1_id == away_pp_df.awayplayer2_id) | (away_pp_df.p1_id == away_pp_df.awayplayer3_id) | (away_pp_df.p1_id == away_pp_df.awayplayer4_id) | (away_pp_df.p1_id == away_pp_df.awayplayer5_id) | (away_pp_df.p1_id == away_pp_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p1_id', 'p1_name'])['is_penalty'].sum().\ reset_index() away_pent.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iPENT'] away_pend = away_pp_df[(away_pp_df.event == 'PENL') & ((away_pp_df.p2_id == away_pp_df.awayplayer1_id) | (away_pp_df.p2_id == away_pp_df.awayplayer2_id) | (away_pp_df.p2_id == away_pp_df.awayplayer3_id) | (away_pp_df.p2_id == away_pp_df.awayplayer4_id) | (away_pp_df.p2_id == away_pp_df.awayplayer5_id) | (away_pp_df.p2_id == away_pp_df.awayplayer6_id))].\ groupby(['season', 'game_id', 'date', 'p2_id', 'p2_name'])['is_penalty'].sum().\ reset_index() away_pend.columns = ['season', 'game_id', 'date', 'player_id', 'player_name', 'iPEND'] away_pp_penl = away_pent.merge(away_pend, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') penl_dfs = [home_pp_penl, away_pp_penl] pp_penl_dfs = pd.concat(penl_dfs, sort=False) pp_penl_dfs = pp_penl_dfs.fillna(0) pp_penl_dfs = pp_penl_dfs[['season', 'game_id', 'date', 'player_id', 'player_name', 'iPENT', 'iPEND']] pp_penl_dfs.loc[:, ('season', 'game_id', 'player_id', 'iPENT', 'iPEND')] = \ pp_penl_dfs.loc[:, ('season', 'game_id', 'player_id', 'iPENT', 'iPEND')].astype(int) return pp_penl_dfs def calc_ppespk_ind_metrics(pbp_df, pp_skaters_num, pk_skaters_num, calc_blk=calc_pp_blocks, \ calc_fo=calc_pp_faceoffs, calc_points=calc_pp_ind_points, calc_penalties=calc_pp_penalties, calc_hits=calc_ind_hits, calc_shot_metrics=calc_ind_shot_metrics, calc_gata=calc_pp_gata): ''' this function calculates the individual metrics of each players contribution during the game Input: pbp_df - play by play df pp_skaters_num - the first number of the strength state wanted for 5v5 would be 6 because of the scraper for 4v5 would be five pk_skaters_num - the second number of the strength state wanted for 5v5 would be 6 because of the scraper for 4v5 would be six Output: player_df - individual player stats df ''' #calculate each individual stats data frames and then join them all together #will pull in teams with the on ice measures points_df = calc_points(pbp_df, pp_skaters_num, pk_skaters_num) metrics_df = calc_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num) penalty_df = calc_penalties(pbp_df, pp_skaters_num, pk_skaters_num) hit_df = calc_hits(pbp_df, pp_skaters_num, pk_skaters_num) gata_df = calc_gata(pbp_df, pp_skaters_num, pk_skaters_num) fo_df = calc_fo(pbp_df, pp_skaters_num, pk_skaters_num) blk_df = calc_blk(pbp_df, pp_skaters_num, pk_skaters_num) ind_stats_df = metrics_df.merge(points_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(penalty_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(hit_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(gata_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(fo_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(blk_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.fillna(0) ind_stats_df.loc[:, ('player_id', 'iCF', 'iFF', 'iSF', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK')] = \ ind_stats_df.loc[:, ('player_id', 'iCF', 'iFF', 'iSF', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK')].astype(int) ind_stats_df = ind_stats_df[['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF', 'iFF', 'iSF', 'ixg', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK']] ind_stats_df = ind_stats_df[ind_stats_df.player_id != 0] ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.home_goalie_id.unique())] ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.away_goalie_id.unique())] return ind_stats_df.reset_index(drop=True) def calc_adj_ppespk_ind_metrics(pbp_df, pp_skaters_num, pk_skaters_num, calc_blk=calc_pp_blocks, \ calc_fo=calc_pp_faceoffs, calc_points=calc_pp_ind_points, calc_penalties=calc_pp_penalties, calc_hits=calc_ind_hits, calc_shot_metrics=calc_adj_ind_shot_metrics, calc_gata=calc_pp_gata): ''' this function calculates the individual metrics of each players contribution during the game Input: pbp_df - play by play df pp_skaters_num - the first number of the strength state wanted for 5v5 would be 6 because of the scraper for 4v5 would be five pk_skaters_num - the second number of the strength state wanted for 5v5 would be 6 because of the scraper for 4v5 would be six Output: player_df - individual player stats df ''' #calculate each individual stats data frames and then join them all together #will pull in teams with the on ice measures points_df = calc_points(pbp_df, pp_skaters_num, pk_skaters_num) metrics_df = calc_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num) penalty_df = calc_penalties(pbp_df, pp_skaters_num, pk_skaters_num) hit_df = calc_hits(pbp_df, pp_skaters_num, pk_skaters_num) gata_df = calc_gata(pbp_df, pp_skaters_num, pk_skaters_num) fo_df = calc_fo(pbp_df, pp_skaters_num, pk_skaters_num) blk_df = calc_blk(pbp_df, pp_skaters_num, pk_skaters_num) ind_stats_df = metrics_df.merge(points_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(penalty_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(hit_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(gata_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(fo_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.merge(blk_df, on=['season', 'game_id', 'date', 'player_id', 'player_name'], how='outer') ind_stats_df = ind_stats_df.fillna(0) ind_stats_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK')] = \ ind_stats_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK')].astype(int) ind_stats_df = ind_stats_df[['season', 'game_id', 'date', 'player_id', 'player_name', 'iCF', 'iFF', 'iSF', 'ixg', 'g', 'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA', 'iGA', 'iTA', 'FOW', 'FOL', 'BLK']] ind_stats_df = ind_stats_df[ind_stats_df.player_id != 0] ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.home_goalie_id.unique())] ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.away_goalie_id.unique())] return ind_stats_df.reset_index(drop=True) def main(): return if __name__ == '__main__': main()
[ "mcbarlowe@gmail.com" ]
mcbarlowe@gmail.com
88e5196904ade4057d99bc9906f441c9cf1edb74
93257ad72659cf766e9d99fe24666f434c4ae40d
/users/migrations/0027_auto_20190814_1449.py
8885e223728fd46844deeb633bb1aac30e809e89
[]
no_license
AkshatLal16/myblog
6ce01acabb91e75fe78b23a9b2ce7ed746fe2a08
66c71e16b8979711f5006d70895234dba034b7c5
refs/heads/master
2020-07-15T13:09:28.915594
2019-09-27T07:37:41
2019-09-27T07:37:41
205,569,274
1
2
null
null
null
null
UTF-8
Python
false
false
1,354
py
# Generated by Django 2.2.1 on 2019-08-14 09:19 import datetime from django.conf import settings from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('users', '0026_auto_20190813_1522'), ] operations = [ migrations.RemoveField( model_name='profile', name='email', ), migrations.RemoveField( model_name='profile', name='name', ), migrations.RemoveField( model_name='profile', name='password', ), migrations.RemoveField( model_name='profile', name='username', ), migrations.AddField( model_name='profile', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AlterField( model_name='story', name='date_posted', field=models.DateTimeField(default=datetime.datetime(2019, 8, 14, 9, 19, 33, 292612, tzinfo=utc)), ), ]
[ "akshat@henryharvin.in" ]
akshat@henryharvin.in
d8f85667c90be108d54a8e925957d55331a8e7d5
5fc3a5fb73e4fef5a022f9f2fee83775af3d7b71
/ems-cloud-mgmt-sdk/python/setup.py
d71a3cde5bf7d1107297182b72a15b7f587676ae
[ "MIT" ]
permissive
byblakeorriver/coinapi-sdk
f030267f4283a8ae9217217668bdbcfc75c2b89c
d786aed283c562b030a13d4efd3adbebe53d9b27
refs/heads/master
2023-03-10T21:31:38.381408
2023-02-23T04:54:38
2023-02-23T04:54:38
292,376,196
0
0
MIT
2020-12-31T17:03:42
2020-09-02T19:29:17
C++
UTF-8
Python
false
false
7,715
py
# coding: utf-8 """ EMS - REST API This section will provide necessary information about the `CoinAPI EMS REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540) ### Endpoints <table> <thead> <tr> <th>Deployment method</th> <th>Environment</th> <th>Url</th> </tr> </thead> <tbody> <tr> <td>Managed Cloud</td> <td>Production</td> <td>Use <a href=\"#ems-docs-sh\">Managed Cloud REST API /v1/locations</a> to get specific endpoints to each server site where your deployments span</td> </tr> <tr> <td>Managed Cloud</td> <td>Sandbox</td> <td><code>https://ems-gateway-aws-eu-central-1-dev.coinapi.io/</code></td> </tr> <tr> <td>Self Hosted</td> <td>Production</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> <tr> <td>Self Hosted</td> <td>Sandbox</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> </tbody> </table> ### Authentication If the software is deployed as `Self-Hosted` then API do not require authentication as inside your infrastructure, your company is responsible for the security and access controls. <br/><br/> If the software is deployed in our `Managed Cloud`, there are 2 methods for authenticating with us, you only need to use one: 1. Custom authorization header named `X-CoinAPI-Key` with the API Key 2. Query string parameter named `apikey` with the API Key 3. <a href=\"#certificate\">TLS Client Certificate</a> from the `Managed Cloud REST API` (/v1/certificate/pem endpoint) while establishing a TLS session with us. #### Custom authorization header You can authorize by providing additional custom header named `X-CoinAPI-Key` and API key as its value. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY`, then the authorization header you should send to us will look like: <br/><br/> `X-CoinAPI-Key: 73034021-THIS-IS-SAMPLE-KEY` <aside class=\"success\">This method is recommended by us and you should use it in production environments.</aside> #### Query string authorization parameter You can authorize by providing an additional parameter named `apikey` with a value equal to your API key in the query string of your HTTP request. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY` and that you want to request all balances, then your query string should look like this: <br/><br/> `GET /v1/balances?apikey=73034021-THIS-IS-SAMPLE-KEY` <aside class=\"notice\">Query string method may be more practical for development activities.</aside> # noqa: E501 The version of the OpenAPI document: v1 Contact: support@coinapi.io Generated by: https://openapi-generator.tech """ from setuptools import setup, find_packages # noqa: H301 NAME = "openapi-client" VERSION = "1.0.0" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = [ "certifi >= 14.5.14", "frozendict ~= 2.3.4", "python-dateutil ~= 2.7.0", "setuptools >= 21.0.0", "typing_extensions ~= 4.3.0", "urllib3 ~= 1.26.7", ] setup( name=NAME, version=VERSION, description="EMS - REST API", author="COINAPI LTD", author_email="support@coinapi.io", url="", keywords=["OpenAPI", "OpenAPI-Generator", "EMS - REST API"], python_requires=">=3.7", install_requires=REQUIRES, packages=find_packages(exclude=["test", "tests"]), include_package_data=True, license="28961", long_description="""\ This section will provide necessary information about the &#x60;CoinAPI EMS REST API&#x60; protocol. &lt;br/&gt; This API is also available in the Postman application: &lt;a href&#x3D;\&quot;https://postman.coinapi.io/\&quot; target&#x3D;\&quot;_blank\&quot;&gt;https://postman.coinapi.io/&lt;/a&gt; &lt;br/&gt;&lt;br/&gt; Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540) ### Endpoints &lt;table&gt; &lt;thead&gt; &lt;tr&gt; &lt;th&gt;Deployment method&lt;/th&gt; &lt;th&gt;Environment&lt;/th&gt; &lt;th&gt;Url&lt;/th&gt; &lt;/tr&gt; &lt;/thead&gt; &lt;tbody&gt; &lt;tr&gt; &lt;td&gt;Managed Cloud&lt;/td&gt; &lt;td&gt;Production&lt;/td&gt; &lt;td&gt;Use &lt;a href&#x3D;\&quot;#ems-docs-sh\&quot;&gt;Managed Cloud REST API /v1/locations&lt;/a&gt; to get specific endpoints to each server site where your deployments span&lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td&gt;Managed Cloud&lt;/td&gt; &lt;td&gt;Sandbox&lt;/td&gt; &lt;td&gt;&lt;code&gt;https://ems-gateway-aws-eu-central-1-dev.coinapi.io/&lt;/code&gt;&lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td&gt;Self Hosted&lt;/td&gt; &lt;td&gt;Production&lt;/td&gt; &lt;td&gt;IP Address of the &lt;code&gt;ems-gateway&lt;/code&gt; container/excecutable in the closest server site to the caller location&lt;/td&gt; &lt;/tr&gt; &lt;tr&gt; &lt;td&gt;Self Hosted&lt;/td&gt; &lt;td&gt;Sandbox&lt;/td&gt; &lt;td&gt;IP Address of the &lt;code&gt;ems-gateway&lt;/code&gt; container/excecutable in the closest server site to the caller location&lt;/td&gt; &lt;/tr&gt; &lt;/tbody&gt; &lt;/table&gt; ### Authentication If the software is deployed as &#x60;Self-Hosted&#x60; then API do not require authentication as inside your infrastructure, your company is responsible for the security and access controls. &lt;br/&gt;&lt;br/&gt; If the software is deployed in our &#x60;Managed Cloud&#x60;, there are 2 methods for authenticating with us, you only need to use one: 1. Custom authorization header named &#x60;X-CoinAPI-Key&#x60; with the API Key 2. Query string parameter named &#x60;apikey&#x60; with the API Key 3. &lt;a href&#x3D;\&quot;#certificate\&quot;&gt;TLS Client Certificate&lt;/a&gt; from the &#x60;Managed Cloud REST API&#x60; (/v1/certificate/pem endpoint) while establishing a TLS session with us. #### Custom authorization header You can authorize by providing additional custom header named &#x60;X-CoinAPI-Key&#x60; and API key as its value. Assuming that your API key is &#x60;73034021-THIS-IS-SAMPLE-KEY&#x60;, then the authorization header you should send to us will look like: &lt;br/&gt;&lt;br/&gt; &#x60;X-CoinAPI-Key: 73034021-THIS-IS-SAMPLE-KEY&#x60; &lt;aside class&#x3D;\&quot;success\&quot;&gt;This method is recommended by us and you should use it in production environments.&lt;/aside&gt; #### Query string authorization parameter You can authorize by providing an additional parameter named &#x60;apikey&#x60; with a value equal to your API key in the query string of your HTTP request. Assuming that your API key is &#x60;73034021-THIS-IS-SAMPLE-KEY&#x60; and that you want to request all balances, then your query string should look like this: &lt;br/&gt;&lt;br/&gt; &#x60;GET /v1/balances?apikey&#x3D;73034021-THIS-IS-SAMPLE-KEY&#x60; &lt;aside class&#x3D;\&quot;notice\&quot;&gt;Query string method may be more practical for development activities.&lt;/aside&gt; # noqa: E501 """ )
[ "support@coinapi.io" ]
support@coinapi.io
2fea730fbc2ed8ead8cdf20b0fe1527890efd6c7
eee6dd18897d3118f41cb5e6f93f830e06fbfe2f
/venv/lib/python3.6/site-packages/scipy/sparse/bsr.py
1627132c92f71edcadf2178965702a5a2e4adba9
[]
no_license
georgeosodo/ml
2148ecd192ce3d9750951715c9f2bfe041df056a
48fba92263e9295e9e14697ec00dca35c94d0af0
refs/heads/master
2020-03-14T11:39:58.475364
2018-04-30T13:13:01
2018-04-30T13:13:01
131,595,044
0
0
null
null
null
null
UTF-8
Python
false
false
23,024
py
"""Compressed Block Sparse Row matrix format""" __docformat__ = "restructuredtext en" __all__ = ['bsr_matrix', 'isspmatrix_bsr'] from warnings import warn import numpy as np from .data import _data_matrix, _minmax_mixin from .compressed import _cs_matrix from .base import isspmatrix, _formats, spmatrix from .sputils import isshape, getdtype, to_native, upcast, get_index_dtype from . import _sparsetools from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1, bsr_matmat_pass2, bsr_transpose, bsr_sort_indices) class bsr_matrix(_cs_matrix, _minmax_mixin): """Block Sparse Row matrix This can be instantiated in several ways: bsr_matrix(D, [blocksize=(R,C)]) where D is a dense matrix or 2-D ndarray. bsr_matrix(S, [blocksize=(R,C)]) with another sparse matrix S (equivalent to S.tobsr()) bsr_matrix((M, N), [blocksize=(R,C), dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)]) where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` bsr_matrix((data, indices, indptr), [shape=(M, N)]) is the standard BSR representation where the block column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding block values are stored in ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements data Data array of the matrix indices BSR format index array indptr BSR format index pointer array blocksize Block size of the matrix has_sorted_indices Whether indices are sorted Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. **Summary of BSR format** The Block Compressed Row (BSR) format is very similar to the Compressed Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense sub matrices like the last example below. Block matrices often arise in vector-valued finite element discretizations. In such cases, BSR is considerably more efficient than CSR and CSC for many sparse arithmetic operations. **Blocksize** The blocksize (R,C) must evenly divide the shape of the matrix (M,N). That is, R and C must satisfy the relationship ``M % R = 0`` and ``N % C = 0``. If no blocksize is specified, a simple heuristic is applied to determine an appropriate blocksize. Examples -------- >>> from scipy.sparse import bsr_matrix >>> bsr_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 0, 1, 2, 2, 2]) >>> col = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3 ,4, 5, 6]) >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray() array([[1, 1, 0, 0, 2, 2], [1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 3, 3], [0, 0, 0, 0, 3, 3], [4, 4, 5, 5, 6, 6], [4, 4, 5, 5, 6, 6]]) """ format = 'bsr' def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): _data_matrix.__init__(self) if isspmatrix(arg1): if isspmatrix_bsr(arg1) and copy: arg1 = arg1.copy() else: arg1 = arg1.tobsr(blocksize=blocksize) self._set_self(arg1) elif isinstance(arg1,tuple): if isshape(arg1): # it's a tuple of matrix dimensions (M,N) self.shape = arg1 M,N = self.shape # process blocksize if blocksize is None: blocksize = (1,1) else: if not isshape(blocksize): raise ValueError('invalid blocksize=%s' % blocksize) blocksize = tuple(blocksize) self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float)) R,C = blocksize if (M % R) != 0 or (N % C) != 0: raise ValueError('shape must be multiple of blocksize') # Select index dtype large enough to pass array and # scalar parameters to sparsetools idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C)) self.indices = np.zeros(0, dtype=idx_dtype) self.indptr = np.zeros(M//R + 1, dtype=idx_dtype) elif len(arg1) == 2: # (data,(row,col)) format from .coo import coo_matrix self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)) elif len(arg1) == 3: # (data,indices,indptr) format (data, indices, indptr) = arg1 # Select index dtype large enough to pass array and # scalar parameters to sparsetools maxval = 1 if shape is not None: maxval = max(shape) if blocksize is not None: maxval = max(maxval, max(blocksize)) idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True) self.indices = np.array(indices, copy=copy, dtype=idx_dtype) self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data)) else: raise ValueError('unrecognized bsr_matrix constructor usage') else: # must be dense try: arg1 = np.asarray(arg1) except: raise ValueError("unrecognized form for" " %s_matrix constructor" % self.format) from .coo import coo_matrix arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) self._set_self(arg1) if shape is not None: self.shape = shape # spmatrix will check for errors else: if self.shape is None: # shape not already set, try to infer dimensions try: M = len(self.indptr) - 1 N = self.indices.max() + 1 except: raise ValueError('unable to infer matrix dimensions') else: R,C = self.blocksize self.shape = (M*R,N*C) if self.shape is None: if shape is None: # TODO infer shape here raise ValueError('need to infer shape') else: self.shape = shape if dtype is not None: self.data = self.data.astype(dtype) self.check_format(full_check=False) def check_format(self, full_check=True): """check whether the matrix format is valid *Parameters*: full_check: True - rigorous check, O(N) operations : default False - basic check, O(1) operations """ M,N = self.shape R,C = self.blocksize # index arrays should have integer data types if self.indptr.dtype.kind != 'i': warn("indptr array has non-integer dtype (%s)" % self.indptr.dtype.name) if self.indices.dtype.kind != 'i': warn("indices array has non-integer dtype (%s)" % self.indices.dtype.name) idx_dtype = get_index_dtype((self.indices, self.indptr)) self.indptr = np.asarray(self.indptr, dtype=idx_dtype) self.indices = np.asarray(self.indices, dtype=idx_dtype) self.data = to_native(self.data) # check array shapes if self.indices.ndim != 1 or self.indptr.ndim != 1: raise ValueError("indices, and indptr should be 1-D") if self.data.ndim != 3: raise ValueError("data should be 3-D") # check index pointer if (len(self.indptr) != M//R + 1): raise ValueError("index pointer size (%d) should be (%d)" % (len(self.indptr), M//R + 1)) if (self.indptr[0] != 0): raise ValueError("index pointer should start with 0") # check index and data arrays if (len(self.indices) != len(self.data)): raise ValueError("indices and data should have the same size") if (self.indptr[-1] > len(self.indices)): raise ValueError("Last value of index pointer should be less than " "the size of index and data arrays") self.prune() if full_check: # check format validity (more expensive) if self.nnz > 0: if self.indices.max() >= N//C: raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max())) if self.indices.min() < 0: raise ValueError("column index values must be >= 0") if np.diff(self.indptr).min() < 0: raise ValueError("index pointer values must form a " "non-decreasing sequence") # if not self.has_sorted_indices(): # warn('Indices were not in sorted order. Sorting indices.') # self.sort_indices(check_first=False) def _get_blocksize(self): return self.data.shape[1:] blocksize = property(fget=_get_blocksize) def getnnz(self, axis=None): if axis is not None: raise NotImplementedError("getnnz over an axis is not implemented " "for BSR format") R,C = self.blocksize return int(self.indptr[-1] * R * C) getnnz.__doc__ = spmatrix.getnnz.__doc__ def __repr__(self): format = _formats[self.getformat()][1] return ("<%dx%d sparse matrix of type '%s'\n" "\twith %d stored elements (blocksize = %dx%d) in %s format>" % (self.shape + (self.dtype.type, self.nnz) + self.blocksize + (format,))) def diagonal(self): """Returns the main diagonal of the matrix """ M,N = self.shape R,C = self.blocksize y = np.empty(min(M,N), dtype=upcast(self.dtype)) _sparsetools.bsr_diagonal(M//R, N//C, R, C, self.indptr, self.indices, np.ravel(self.data), y) return y ########################## # NotImplemented methods # ########################## def getdata(self,ind): raise NotImplementedError def __getitem__(self,key): raise NotImplementedError def __setitem__(self,key,val): raise NotImplementedError ###################### # Arithmetic methods # ###################### def matvec(self, other): return self * other def matmat(self, other): return self * other def _mul_vector(self, other): M,N = self.shape R,C = self.blocksize result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) bsr_matvec(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel(), other, result) return result def _mul_multivector(self,other): R,C = self.blocksize M,N = self.shape n_vecs = other.shape[1] # number of column vectors result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) bsr_matvecs(M//R, N//C, n_vecs, R, C, self.indptr, self.indices, self.data.ravel(), other.ravel(), result.ravel()) return result def _mul_sparse_matrix(self, other): M, K1 = self.shape K2, N = other.shape R,n = self.blocksize # convert to this format if isspmatrix_bsr(other): C = other.blocksize[1] else: C = 1 from .csr import isspmatrix_csr if isspmatrix_csr(other) and n == 1: other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion else: other = other.tobsr(blocksize=(n,C)) idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=(M//R)*(N//C)) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) csr_matmat_pass1(M//R, N//C, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype), indptr) bnnz = indptr[-1] idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=bnnz) indptr = indptr.astype(idx_dtype) indices = np.empty(bnnz, dtype=idx_dtype) data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) bsr_matmat_pass2(M//R, N//C, R, C, n, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), np.ravel(self.data), other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype), np.ravel(other.data), indptr, indices, data) data = data.reshape(-1,R,C) # TODO eliminate zeros return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C)) ###################### # Conversion methods # ###################### def tobsr(self, blocksize=None, copy=False): """Convert this matrix into Block Sparse Row Format. With copy=False, the data/indices may be shared between this matrix and the resultant bsr_matrix. If blocksize=(R, C) is provided, it will be used for determining block size of the bsr_matrix. """ if blocksize not in [None, self.blocksize]: return self.tocsr().tobsr(blocksize=blocksize) if copy: return self.copy() else: return self def tocsr(self, copy=False): return self.tocoo(copy=False).tocsr(copy=copy) # TODO make this more efficient tocsr.__doc__ = spmatrix.tocsr.__doc__ def tocsc(self, copy=False): return self.tocoo(copy=False).tocsc(copy=copy) tocsc.__doc__ = spmatrix.tocsc.__doc__ def tocoo(self, copy=True): """Convert this matrix to COOrdinate format. When copy=False the data array will be shared between this matrix and the resultant coo_matrix. """ M,N = self.shape R,C = self.blocksize indptr_diff = np.diff(self.indptr) if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize: # Check for potential overflow indptr_diff_limited = indptr_diff.astype(np.intp) if np.any(indptr_diff_limited != indptr_diff): raise ValueError("Matrix too big to convert") indptr_diff = indptr_diff_limited row = (R * np.arange(M//R)).repeat(indptr_diff) row = row.repeat(R*C).reshape(-1,R,C) row += np.tile(np.arange(R).reshape(-1,1), (1,C)) row = row.reshape(-1) col = (C * self.indices).repeat(R*C).reshape(-1,R,C) col += np.tile(np.arange(C), (R,1)) col = col.reshape(-1) data = self.data.reshape(-1) if copy: data = data.copy() from .coo import coo_matrix return coo_matrix((data,(row,col)), shape=self.shape) def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError(("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.")) R, C = self.blocksize M, N = self.shape NBLK = self.nnz//(R*C) if self.nnz == 0: return bsr_matrix((N, M), blocksize=(C, R), dtype=self.dtype, copy=copy) indptr = np.empty(N//C + 1, dtype=self.indptr.dtype) indices = np.empty(NBLK, dtype=self.indices.dtype) data = np.empty((NBLK, C, R), dtype=self.data.dtype) bsr_transpose(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel(), indptr, indices, data.ravel()) return bsr_matrix((data, indices, indptr), shape=(N, M), copy=copy) transpose.__doc__ = spmatrix.transpose.__doc__ ############################################################## # methods that examine or modify the internal data structure # ############################################################## def eliminate_zeros(self): R,C = self.blocksize M,N = self.shape mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks nonzero_blocks = mask.nonzero()[0] if len(nonzero_blocks) == 0: return # nothing to do self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] # modifies self.indptr and self.indices *in place* _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr, self.indices, mask) self.prune() def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together The is an *in place* operation """ if self.has_canonical_format: return self.sort_indices() R, C = self.blocksize M, N = self.shape # port of _sparsetools.csr_sum_duplicates n_row = M // R nnz = 0 row_end = 0 for i in range(n_row): jj = row_end row_end = self.indptr[i+1] while jj < row_end: j = self.indices[jj] x = self.data[jj] jj += 1 while jj < row_end and self.indices[jj] == j: x += self.data[jj] jj += 1 self.indices[nnz] = j self.data[nnz] = x nnz += 1 self.indptr[i+1] = nnz self.prune() # nnz may have changed self.has_canonical_format = True def sort_indices(self): """Sort the indices of this matrix *in place* """ if self.has_sorted_indices: return R,C = self.blocksize M,N = self.shape bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) self.has_sorted_indices = True def prune(self): """ Remove empty space after all non-zero elements. """ R,C = self.blocksize M,N = self.shape if len(self.indptr) != M//R + 1: raise ValueError("index pointer has invalid length") bnnz = self.indptr[-1] if len(self.indices) < bnnz: raise ValueError("indices array has too few elements") if len(self.data) < bnnz: raise ValueError("data array has too few elements") self.data = self.data[:bnnz] self.indices = self.indices[:bnnz] # utility functions def _binopt(self, other, op, in_shape=None, out_shape=None): """Apply the binary operation fn to two sparse matrices.""" # Ideally we'd take the GCDs of the blocksize dimensions # and explode self and other to match. other = self.__class__(other, blocksize=self.blocksize) # e.g. bsr_plus_bsr, etc. fn = getattr(_sparsetools, self.format + op + self.format) R,C = self.blocksize max_bnnz = len(self.data) + len(other.data) idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=max_bnnz) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) indices = np.empty(max_bnnz, dtype=idx_dtype) bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] if op in bool_ops: data = np.empty(R*C*max_bnnz, dtype=np.bool_) else: data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) fn(self.shape[0]//R, self.shape[1]//C, R, C, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype), np.ravel(other.data), indptr, indices, data) actual_bnnz = indptr[-1] indices = indices[:actual_bnnz] data = data[:R*C*actual_bnnz] if actual_bnnz < max_bnnz/2: indices = indices.copy() data = data.copy() data = data.reshape(-1,R,C) return self.__class__((data, indices, indptr), shape=self.shape) # needed by _data_matrix def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied. """ if copy: return self.__class__((data,self.indices.copy(),self.indptr.copy()), shape=self.shape,dtype=data.dtype) else: return self.__class__((data,self.indices,self.indptr), shape=self.shape,dtype=data.dtype) # # these functions are used by the parent class # # to remove redudancy between bsc_matrix and bsr_matrix # def _swap(self,x): # """swap the members of x if this is a column-oriented matrix # """ # return (x[0],x[1]) def isspmatrix_bsr(x): return isinstance(x, bsr_matrix)
[ "georgeosodo2010@gmail.com" ]
georgeosodo2010@gmail.com
02745dd02ec7954ea531da8ddfb292e43a976771
8a102033a266d39128e4b64aa0780cf67055e196
/1330.py
3fe9a718323d5727aeb4c2c1501dafb25b860ada
[]
no_license
yuseungwoo/baekjoon
4dec0798b8689b9378121b9d178713c9cf14a53f
099031e2c4401e27edcdc05bd6c9e6a558b09bb9
refs/heads/master
2020-09-03T15:25:40.764723
2018-10-08T02:35:27
2018-10-08T02:35:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
129
py
# coding: utf-8 a, b = map(int, input().split()) if a > b: print('>') if a < b: print('<') if a == b: print('==')
[ "blacksangi14@naver.com" ]
blacksangi14@naver.com
ec48f6b5a8782524afec24561f65b13e75302679
0d3f9dc797c0ad1d72bfe25390284a7a2231ef75
/chap4/45.py
ece8c684a747cd9a410d081d8c61fbdde63d8aad
[]
no_license
mocas-usr/tensorflow_primary
b71b90cc4c0690316ebafd20aa7f7644bc16e839
e854f11026aea863a701e5691e1068b2648b4002
refs/heads/master
2022-11-02T18:04:31.538146
2019-11-01T08:25:35
2019-11-01T08:25:35
218,905,003
0
1
null
2022-10-24T03:15:05
2019-11-01T03:17:26
Python
UTF-8
Python
false
false
1,328
py
# -*- coding: utf-8 -*- """ Created on Wed Sep 25 14:59:46 2019 @author: HAX """ import tensorflow as tf import numpy as np import matplotlib.pyplot as plt plotdata = { "batchsize":[], "loss":[] } def moving_average(a, w=10): if len(a) < w: return a[:] return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)] #生成模拟数据 train_X = np.linspace(-1, 1, 100) train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3 # y=2x,但是加入了噪声 #图形显示 plt.plot(train_X, train_Y, 'ro', label='Original data') plt.legend() plt.show() tf.reset_default_graph()##全部释放资源图 ##定义ip和端口 strps_hosts='localhost:1681' strworker_hosts='localhost:1682,loaclhost:1683' ##定义角色名称 strjob_name='ps' task_index=0 #将字符串转换成数组 ps_hosts=strps_hosts.split(',') worker_hosts=strworker_hosts.split(',') print(ps_hosts) cluster_spec=tf.train.ClusterSpec({'ps':ps_hosts,'worker':worker_hosts}) ##创建server server=tf.train.Server({'ps':ps_hosts,'worker':worker_hosts},job_name=strjob_name,task_index=task_index) ##ps使用join进行等待 if strjob_name=='ps': print('wait') server.join() with tf.device(tf.train.replica_device_setter(worker_device='/job'))
[ "wangyuhang_mocas@163.com" ]
wangyuhang_mocas@163.com
014a6b6fc7c93c425ce7da5ad70dfce4b7273ee8
52b5773617a1b972a905de4d692540d26ff74926
/.history/largestTime_20200903122053.py
697fb00998e96926352a6433e5a6da6d088d57dd
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
552
py
from itertools import permutations def Time(A): # getting the different permutations # get the one that falls between 0000 and 2359 # then place the semi colon in the proper place # otherwise return an empty string A = [str(i) for i in A] perm = permutations(A) newArray = [] for i in list(perm): string = "".join(i) newArray.append(string) newArray = [int(i) for i in newArray] for i in newArray: if i > 0000 and i =<2359: Time([1,2,3,4])
[ "mary.jereh@gmail.com" ]
mary.jereh@gmail.com
356a84ee74b58014400d3f8cd8a3a8ef23a75015
46738f59d6358042f879567788cfd8ae8192a7b6
/lambda/save_sample_data/save_sample_data.py
6e94006a92e7fed7df4f113536d6fbdd063da3cc
[]
no_license
perezbarbosa/hosting-compare
8fb2d6f45b37d7101da592950749dbf3f2c30667
9c0cd1ed0e274b568277d153ab2f22bf45a8e75d
refs/heads/master
2023-05-06T02:52:35.792340
2021-06-03T17:18:31
2021-06-03T17:18:31
273,340,583
1
0
null
null
null
null
UTF-8
Python
false
false
1,202
py
import boto3 import json from pprint import pprint import sys def handler(event, context): """ Loads sample data into local database Expects to receive a payload with a list of json objects formatted as dynamodb.put_item expects https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.put_item """ if event['body']: body = json.loads(event['body']) out = {} out['headers'] = { 'Content-Type': 'application/json', } # Using a local docker network to access to dynamodb container by its name dynamodb = boto3.client('dynamodb', endpoint_url='http://dynamodb:8000') try: for entry in body: pprint(entry) response = dynamodb.put_item( TableName='HostingList', Item=entry, ) out['statusCode'] = 200 out['body'] = { 'message': response, } except: print("Unexpected error") pprint(sys.exc_info()) out['statusCode'] = 500 out['body'] = { 'message': 'Unexpected error', } return out
[ "noreply@github.com" ]
perezbarbosa.noreply@github.com
acb4347eff7bdd262db2056c03fe1aae4543962b
30e2b17d954a73d6a6eade6ba12c6fdb3af57068
/HackerRanck_Python_Challenges/Solutions/stringClassif.py
5dcb1bc07c994dea19461e9dc93709d8759922a5
[]
no_license
rock-feller/HackerRanck_Python_Challenges
9c8a1447af7ee50faba9a05875e4f9eb1345d1eb
35e176d0a498f537bf2b2f32907755c9a0f07a75
refs/heads/master
2020-07-02T08:59:51.201894
2019-08-09T14:09:35
2019-08-09T14:09:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
575
py
"""author = Rockefeller""" import string def unik(li): el = [] for i in li: if i not in el: el.append(i) return el def perm(x , y): al = string.ascii_lowercase ind = [] ind2= [] s_x =unik(x) s_y =unik(y) for i in range(min(len(s_x) , len(s_y))): ind.append(al.index(s_x[i]) - al.index(s_y[i])) for x_i , y_i in zip(x , y): ind2.append(al.index(x_i) - al.index(y_i)) if ind==unik(ind2): print ("same class") else: print("different class")
[ "noreply@github.com" ]
rock-feller.noreply@github.com
47213a723487f5382748a8d76a7546ee674ea1f5
a26d91163fe40924c7c4f9d94fcd973989b68983
/watchlist_app/migrations/0003_alter_movie_description.py
bed775bad8d30539b2b34f84c48a3511902e2b22
[]
no_license
rcoffie/Django-Rest-Tut
a840ecb838098ed2d525c1b5321f042e0d29c5fb
9925bfb11b92a49aa6973e3929b2d05d9528ee27
refs/heads/master
2023-08-25T06:43:41.019767
2021-10-27T15:27:06
2021-10-27T15:27:06
409,567,488
0
0
null
null
null
null
UTF-8
Python
false
false
384
py
# Generated by Django 3.2.5 on 2021-09-23 11:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('watchlist_app', '0002_rename_movies_movie'), ] operations = [ migrations.AlterField( model_name='movie', name='description', field=models.TextField(), ), ]
[ "rcoffie22@yahoo.com" ]
rcoffie22@yahoo.com
78ce4126165b88ced683f46c15d42dfb92e4f168
f7c19bd02cfc09992d804ae35e293323d2ea99e4
/classviews/migrations/0001_initial.py
fdb331a483de7545d040ba72ef362511027e225c
[]
no_license
jesusjamz100/cms_blog
e98c927c5f93ca12ef6f71748620b0168f25dcb8
c08d6e1a063236bfd59d11e630254aa0043eaf17
refs/heads/master
2020-04-18T19:19:11.057386
2019-02-02T23:25:07
2019-02-02T23:25:07
167,709,187
0
0
null
null
null
null
UTF-8
Python
false
false
672
py
# Generated by Django 2.1.5 on 2019-01-28 02:10 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Contact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('email', models.EmailField(max_length=254)), ('address', models.CharField(max_length=100)), ('phone', models.CharField(max_length=50)), ], ), ]
[ "jesusjamz100@gmail.com" ]
jesusjamz100@gmail.com
23e6461e758dbf032f5c96edf2252ec91d06e177
cbd3c85d385d065ff48bfa8a6321d4bf160acb09
/abjadext/cli/get_text_editor.py
923d3bb47c4ecdda2a782bea3166a3c3b3177547
[ "MIT" ]
permissive
DaviRaubach/abjad-ext-cli
4b36424a9930bc9d04e53b763c8c63804baede2b
4c2fa22f317ef0a3d5f0d6a8ea92f4375b8fc8eb
refs/heads/master
2022-10-17T21:27:42.324333
2020-06-18T19:29:10
2020-06-18T19:29:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
242
py
import os import abjad def get_text_editor(): text_editor = abjad.configuration["text_editor"] if text_editor is not None: return text_editor elif os.name == "posix": return "vi" else: return "edit"
[ "trevor.baca@gmail.com" ]
trevor.baca@gmail.com
41812c6d4cc481ed2d7caedd0323b6ca88aa5b06
e7003f13ad2e4b8cfeeb3d7bf6c5a393e8f38bf7
/custom_components/sbanken/config_flow.py
da9dfdbc7429a3cbf5f541836a10efc984187ceb
[ "MIT" ]
permissive
toringer/home-assistant-sbanken
5a620d49abb1807bbda968d2c561eec84f0361c7
7540960042df455cc7ce672d7891c134a9976251
refs/heads/master
2023-02-21T13:07:14.543789
2023-02-12T14:36:08
2023-02-12T14:36:08
124,766,061
3
5
MIT
2022-03-17T14:25:03
2018-03-11T14:42:43
Python
UTF-8
Python
false
false
5,249
py
"""Config flow for Sbanken integration.""" from __future__ import annotations import logging from typing import Any import voluptuous as vol from homeassistant import config_entries from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import FlowResult from homeassistant.core import callback from .sbanken_api import SbankenApi from .const import ( DOMAIN, CONF_CLIENT_ID, CONF_NUMBER_OF_TRANSACTIONS, CONF_SECRET, TITLE, CannotConnect, InvalidAuth, ) _LOGGER = logging.getLogger(__name__) STEP_USER_DATA_SCHEMA = vol.Schema( { vol.Required(CONF_CLIENT_ID): str, vol.Required(CONF_SECRET): str, vol.Required(CONF_NUMBER_OF_TRANSACTIONS, default=10): int, } ) async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]: """Validate the user input allows us to connect.""" api = SbankenApi(data[CONF_CLIENT_ID], data[CONF_SECRET]) session = await hass.async_add_executor_job(api.get_session) if not session.authorized: raise InvalidAuth customer_info = await hass.async_add_executor_job(api.get_customer_information) return {"title": TITLE, "customer_id": customer_info["customerId"]} @config_entries.HANDLERS.register(DOMAIN) class SbankenConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Sbanken.""" VERSION = 1 async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle the initial step.""" if user_input is None: return self.async_show_form( step_id="user", data_schema=STEP_USER_DATA_SCHEMA ) errors = {} try: info = await validate_input(self.hass, user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: unique_id = info["customer_id"] await self.async_set_unique_id(unique_id) self._abort_if_unique_id_configured() return self.async_create_entry( title=info["title"], data=user_input, options=user_input, ) return self.async_show_form( step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors ) @staticmethod @callback def async_get_options_flow(config_entry): return SbankenOptionsFlowHandler(config_entry) class SbankenOptionsFlowHandler(config_entries.OptionsFlow): """Sbanken config flow options handler.""" def __init__(self, config_entry): self.options = config_entry.options self.data = config_entry.data async def async_step_init(self, _user_input=None): """Manage the options.""" return await self.async_step_user() async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" errors = {} if user_input is not None: try: info = await validate_input(self.hass, user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=info["title"], data=user_input) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required( CONF_CLIENT_ID, default=user_input[CONF_CLIENT_ID], ): str, vol.Required( CONF_SECRET, default=user_input[CONF_SECRET], ): str, vol.Required( CONF_NUMBER_OF_TRANSACTIONS, default=user_input[CONF_NUMBER_OF_TRANSACTIONS], ): int, } ), errors=errors, ) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required( CONF_CLIENT_ID, default=self.options.get(CONF_CLIENT_ID), ): str, vol.Required( CONF_SECRET, default=self.options.get(CONF_SECRET), ): str, vol.Required( CONF_NUMBER_OF_TRANSACTIONS, default=self.options.get(CONF_NUMBER_OF_TRANSACTIONS), ): int, } ), )
[ "toringe@redalen.no" ]
toringe@redalen.no
fab423ac20ec004dccb37459a3fcde5478b15d1e
ac5042582ec4fb8f128a97ead4bb59d6bbf6cef5
/tests/utils/test_prolang.py
f11790d225e44c2626e5efd7ca18fd0c49a53acc
[ "MIT" ]
permissive
optittm/bugprediction
4be0e4c532e06c27ac4c9a3e7812bc9fc4f5a113
161628d504627d0623b584e7e92bb3130a24b8ef
refs/heads/main
2023-08-22T04:20:57.039589
2023-07-19T09:39:46
2023-07-19T09:39:46
522,447,765
2
2
MIT
2023-09-14T10:10:08
2022-08-08T07:28:57
Python
UTF-8
Python
false
false
612
py
from tests.__fixtures__ import * def test_guess_programing_language(): """ Guess what is the programming language from a file extension >>>guess_programing_language("php") PHP >>>guess_programing_language(".php") PHP >>>guess_programing_language(".hidden/test.h") C >>>guess_programing_language("") None >>>guess_programing_language("java") Java >>>guess_programing_language("c++") C++ >>>guess_programing_language("c") C >>>guess_programing_language("class") None >>>guess_programing_language("cpp") C++ """ pass
[ "benjamin.balet@gmail.com" ]
benjamin.balet@gmail.com
703a8e40bd746970ed7d5c2e13f250617fe1a660
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02574/s746022410.py
9331bad8a322a0b5502729d4fc4e2aa050191d05
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
1,135
py
import math,itertools,fractions,heapq,collections,bisect,sys,queue,copy sys.setrecursionlimit(10**7) inf=10**20 mod=10**9+7 dd=[(-1,0),(0,1),(1,0),(0,-1)] ddn=[(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)] def LI(): return [int(x) for x in sys.stdin.readline().split()] # def LF(): return [float(x) for x in sys.stdin.readline().split()] def I(): return int(sys.stdin.readline()) def F(): return float(sys.stdin.readline()) def LS(): return sys.stdin.readline().split() def S(): return input() def main(): N=I() A=LI() g=0 for x in A: g=math.gcd(g,x) if g>1: return 'not coprime' sosu=[0]*1000100 for x in A: if x==1: continue sosu[x]+=1 if sosu[x]>1: return 'setwise coprime' for y in range(2,int(math.sqrt(x))+1): if x%y!=0: continue z=x//y if y==z: sosu[y]+=1 if sosu[y]>1: return 'setwise coprime' else: sosu[y]+=1 if sosu[y]>1: return 'setwise coprime' sosu[z]+=1 if sosu[z]>1: return 'setwise coprime' return 'pairwise coprime' # main() print(main())
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
9207f63b377b4990be34f2882127edcd256361e6
70e1d7c3e375ecff09df36e5a4ceda5691221968
/tmp.py
1936e8cb7425e44fc01199bc2937e82f0e03ce0a
[ "Apache-2.0" ]
permissive
doublechenching/ship_detection
5b91aa4a7fbe6eb5a88389d1a517641a53740890
1ba4926e0d28043863df05ae8afc3d5b336b350d
refs/heads/master
2020-04-06T17:53:23.855070
2018-11-15T08:47:02
2018-11-15T08:47:02
157,676,999
8
0
null
null
null
null
UTF-8
Python
false
false
1,213
py
Nucleoplasm 12885 0.25 Cytosol 8228 0.16 Plasma membrane 3777 0.07 Nucleoli 3621 0.07 Mitochondria 2965 0.06 Golgi apparatus 2822 0.06 Nuclear bodies 2513 0.05 Nuclear speckles 1858 0.04 Nucleoli fibrillar center 1561 0.03 Centrosome 1482 0.03 Nuclear membrane 1254 0.02 Intermediate filaments 1093 0.02 Microtubules 1066 0.02 Endoplasmic reticulum 1008 0.02 Microtubule organizing center 902 0.02 Cell junctions 802 0.02 Actin filaments 688 0.01 Focal adhesion sites 537 0.01 Cytokinetic bridge 530 0.01 Cytoplasmic bodies 328 0.01 Aggresome 322 0.01 Mitotic spindle 210 0.00 Lipid droplets 172 0.00 Peroxisomes 53 0.00 Endosomes 45 0.00 Lysosomes 28 0.00 Microtubule ends 21 0.00 Rods & rings 11 0.00
[ "vichenqin@gmail.com" ]
vichenqin@gmail.com
1259e4c9fd64611ee9165faa91df8c43bf9b224d
d77d0433c9db4b0f529c4066011c6b5eb037981b
/mpi-proxy-split/test/mana_test.py
b4683cd4ea33c53253eadb04c1fbe541d3d38ddc
[]
no_license
mpickpt/mana
bb11f4da82e01c2f6b37f0cb357bf1303cd720c1
8f341500f259187588dbede15bf152a0771a7022
refs/heads/main
2023-09-01T08:42:51.518528
2023-08-14T21:38:16
2023-08-31T18:25:31
179,736,099
26
23
null
2023-09-14T03:12:09
2019-04-05T18:37:56
C++
UTF-8
Python
false
false
3,058
py
#!/usr/bin/env python3 import argparse import sys import subprocess ''' This util is designed to be an argument parsing utility for C/C++ tests. ''' class CustomParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) def main(): parser = CustomParser(description='Run a MANA Test') parser.add_argument('-i', '--iterations', metavar='I', help='Number of iterations for test') parser.add_argument('test', metavar='T', help='Path to test case to run') parser.add_argument('-n','--num_ranks', metavar='N', help='Number of ranks\ for test', required=True) parser.add_argument('-m','--mana_bin', metavar='M', help='Absolute \ path to mana_bin folder', default= '', required=False) parser.add_argument('-r','--mpirun', help='Use mpirun instead\ of srun', action="store_true") parser.add_argument('-a', '--args', help='Arguments to pass to test', default='') args = parser.parse_args() if args.mana_bin == '': mana_coordinator_path = f'mana_coordinator' mana_launch_path = f'mana_launch' else: mana_coordinator_path = f'{args.mana_bin}/mana_coordinator' mana_launch_path = f'{args.mana_bin}/mana_launch' print(f'{mana_coordinator_path}') coord_child = subprocess.run([f'{mana_coordinator_path}']) run = 'srun' if args.mpirun: run = 'mpirun' if args.iterations == None: print(f'{run} -n {args.num_ranks} {mana_launch_path} ' f'{args.test}.mana.exe {arg.args}') test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}', f'{mana_launch_path}', f'{args.test}.mana.exe' f'{args.args}'], stdout = subprocess.DEVNULL) else: if args.args == '': print(f'{run} -n {args.num_ranks} {mana_launch_path} ' f'{args.test}.mana.exe {args.iterations} {args.args}') test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}', f'{mana_launch_path}', f'{args.test}.mana.exe', f'{args.iterations}', f'{args.args}'], stdout = subprocess.DEVNULL) else: print(f'{run} -n {args.num_ranks} {mana_launch_path} ' f'{args.test}.mana.exe -i {args.iterations} {args.args}') test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}', f'{mana_launch_path}', f'{args.test}.mana.exe', '-i' f'{args.iterations}', f'{args.args}'], stdout = subprocess.DEVNULL) if __name__ == "__main__": main()
[ "107428972+chirag-singh-memverge@users.noreply.github.com" ]
107428972+chirag-singh-memverge@users.noreply.github.com
2f56eca6cc281d7aae80bae6b6e73ce086e853ba
64564ad34eb88c9b366b23c21e90684dbbf0ba94
/gen-py/livetex/livetex_service/__init__.py
e790cd5d759e1a7017e92a7f4b883ce35961affd
[]
no_license
hobbit-vt/livetex-fake-services
53481fcc02190a14648ac6950eeb39b31087f9f2
b595d912307148d1bc6b162f63608369256d9c43
refs/heads/master
2021-01-25T07:34:36.583297
2014-10-17T09:51:11
2014-10-17T09:51:11
23,706,391
1
1
null
2014-10-23T15:16:52
2014-09-05T14:42:37
Python
UTF-8
Python
false
false
58
py
# -*- coding: utf-8 -*- __all__ = ['ttypes', 'constants']
[ "viktor.g@livetex.ru" ]
viktor.g@livetex.ru
5d360aaf3e6c4ec30b0ffd85d0c0fbd4ac6c452c
501d8f799185f58af7ad864962f29ba3066a3a66
/python/locate.py
3dc7eb55c09872ed04993bce4e9ebd5b41d9887e
[]
no_license
liuyujyyz/learning
55c866c3a0c4db3d4d5475c049785a3ad856aec3
299914b27fad9e3fc9175d68905760d939b06d2a
refs/heads/master
2021-01-19T08:10:37.553429
2020-08-13T07:42:14
2020-08-13T07:42:14
87,607,931
0
0
null
null
null
null
UTF-8
Python
false
false
3,605
py
import cv2 import numpy as np from decorators import timer from cluster import Kmeans from tqdm import tqdm def extract(img): img = cv2.resize(img, (16, 16)) U, S, V = np.linalg.svd(img) return V[0] @timer def divide(img, window, stride=1): h, w, _ = img.shape parth, partw = window out = [] outImg = [] steph = (h-parth)//stride stepw = (w-partw)//stride boxes = [] for i in range(steph): for j in range(stepw): tmpImg = img[stride*i:stride*i+parth,stride*j:stride*j+partw] U = np.concatenate([extract(tmpImg[:,:,0]),extract(tmpImg[:,:,1]),extract(tmpImg[:,:,2])], axis=0) #U = extract(tmpImg[:,:,0])+extract(tmpImg[:,:,1])+extract(tmpImg[:,:,2]) out.append(U) outImg.append(tmpImg) boxes.append((stride*i, stride*j, stride*i+parth, stride*j+partw)) out = np.array(out) outImg = np.array(outImg) boxes = np.array(boxes) return out, outImg, boxes def get_rep(filename, ID): img = cv2.imread(filename) rep, imgset, boxes = divide(img, (45, 45), 10) rep2, imgset2, boxes2 = divide(img, (90, 90), 20) rep3, imgset3, boxes3 = divide(img, (30, 30), 10) rep4, imgset4, boxes4 = divide(img, (60, 60), 20) rep = np.concatenate([rep, rep2, rep3, rep4], axis=0) boxes = np.concatenate([boxes, boxes2, boxes3, boxes4], axis=0) fileIndex = ID*np.ones((rep.shape[0],), dtype='int') return img, rep, boxes, fileIndex def findBackground(cato, index): return (cato.sum()*2 < cato.shape[0]) if __name__ == '__main__': reps = [] imgsets = [] boxess = [] fileIndexs = [] imgs = [] dists = [] numImg = 10 for i in tqdm(range(numImg)): img, rep, boxes, fileIndex = get_rep('../data/cat/2/pic%s.jpg'%(i), i) imgs.append(img) for j in range(3): cato, dist = Kmeans(rep, 2) if cato.sum() == 0: from IPython import embed embed() tag = int(cato.sum()*2 < cato.shape[0]) if j > 0: tag = 1 - tag idx = np.where(cato == tag)[0] rep = rep[idx] boxes = boxes[idx] fileIndex = fileIndex[idx] dist = dist[idx] reps.append(rep) boxess.append(boxes) fileIndexs.append(fileIndex) dists.append(dist) rep = np.concatenate(reps, axis=0) boxes = np.concatenate(boxess, axis=0) fileIndex = np.concatenate(fileIndexs, axis=0) dist = np.concatenate(dists, axis=0) while True: if rep.shape[0] < 10 * numImg: break cato, dist = Kmeans(rep, 2) tag = findBackground(cato, fileIndex) tag = 1 - tag print(set(cato), tag) idx = np.where(cato == tag)[0] nrep = rep[idx] nbox = boxes[idx] nfile = fileIndex[idx] ndist = dist[idx] count = [(nfile==i).sum() for i in range(numImg)] if min(count) > 0: rep = nrep boxes = nbox fileIndex = nfile dist =ndist else: print(count) break maxi = dist.max() mini = dist.min() mean = dist.mean() ratio = 255 * (dist - mini) / (mean - mini) for i in range(rep.shape[0]): if dist[i] > mean: continue cv2.rectangle(imgs[fileIndex[i]], (boxes[i][1], boxes[i][0]), (boxes[i][3], boxes[i][2]), (int(ratio[i]),0,0), 1) for i in range(numImg): cv2.imshow('x', imgs[i]) cv2.waitKey(0)
[ "liuyujyyz@gmail.com" ]
liuyujyyz@gmail.com
6bac759d5e99e5cea3ef506eb72cad61410bf459
2a1b7c51756d692bc2d06d57f5e99d89e114a2e9
/ANPP_code/preprocess/AliRepeat/convert_to_norm.py
aed6cc67fbb72482cd3a9a074e6657c8e216beb3
[]
no_license
AnonymousOpenResearch/ANPP
fb66b506af9eef2f714d537c87af40f2a3256129
e5a416a69998fdeda91aa767b0a430892f47c53c
refs/heads/master
2023-01-11T12:29:15.289027
2020-11-06T13:51:05
2020-11-06T13:51:05
271,594,404
5
0
null
null
null
null
UTF-8
Python
false
false
2,128
py
import numpy as np import pandas as pd import os import sys import pickle file_user_log = 'user_log_format1.csv' file_user_purchase = "user_purchase.csv" file_item_info = "item.csv" min_user_count = 5 min_item_count = 5 purchase_action_type = 2 arr = sys.argv folder_dataset = arr[1] file_pkl_review = arr[2] file_pkl_meta = arr[3] year = '2014' def norm_timestamp(t): s = str(t) y = year if(len(s) < 4): s = "0" + s m = s[0:2] d = s[-2:] return "-".join([y, m, d]) def filt_purchase(folder, file_src, file_dst): print("filt_purchase...") file_src = os.path.join(folder, file_src) file_dst = os.path.join(folder, file_dst) df = pd.read_csv(file_src) df = df[df['action_type'] == 2] df.rename(columns={'user_id': 'user', 'item_id': 'item', 'cat_id': 'categories', 'brand_id': 'brand', 'time_stamp': 'timestamp'}, inplace=True) df = df[['user', 'item', 'timestamp', 'categories', 'brand']] df['timestamp'] = df['timestamp'].map(lambda x: norm_timestamp(x)) df = df.sort_values(['user', 'timestamp']) df.to_csv(file_dst, index=False) print("filt_purchase done!") def norm_order_data(folder, file_src, file_pkl_review, file_pkl_meta): print("norm_order_data...") file_src = os.path.join(folder, file_src) df = pd.read_csv(file_src) df['title'] = "" #filt speical data df = df[df['timestamp'] < '2014-11-01'] #filt df = df.groupby('item').filter(lambda x: len(x) >= min_item_count) df = df.groupby('user').filter(lambda x: len(x) >= min_user_count) user_df = df[['user', 'item', 'timestamp']].drop_duplicates() meta_df = df[['item', 'categories', 'title', 'brand']].drop_duplicates() file_pkl_review = os.path.join(folder, file_pkl_review) with open(file_pkl_review, 'wb') as f: pickle.dump(user_df, f, pickle.HIGHEST_PROTOCOL) file_pkl_meta = os.path.join(folder, file_pkl_meta) with open(file_pkl_meta, 'wb') as f: pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL) print("norm_order_data done!") def main(): #filt_purchase(folder_dataset, file_user_log, file_user_purchase) norm_order_data(folder_dataset, file_user_purchase, file_pkl_review, file_pkl_meta) main()
[ "guyulongthu@gmail.com" ]
guyulongthu@gmail.com
f825ef4df82e8584a89a77987d0e9dca9f38a446
23d43f570f5c99c9fea510bda5579116ea7fd1e5
/main.py
9d4fa83ab7f5e9cfda00a02b770230fe512e5cea
[ "Apache-2.0" ]
permissive
AM6SoftwareCompany/Azkhar
4647e8469d5511c06b2ea6895fd4b7034b4f6057
d86a4bd58ac956640bb1bee5fce64ff647c94a1b
refs/heads/main
2023-06-09T22:57:08.494783
2021-07-09T10:01:04
2021-07-09T10:01:04
353,202,325
0
0
null
null
null
null
UTF-8
Python
false
false
23,861
py
import time import datetime import webbrowser import pyperclip import pyautogui AzkharAlsabah = [ "اللَّهُمَّ أنْتَ رَبِّي لا إلَهَ إلَّا أنْتَ، خَلَقْتَنِي وأنا عَبْدُكَ، وأنا علَى عَهْدِكَ ووَعْدِكَ ما اسْتَطَعْتُ، أعُوذُ بكَ مِن شَرِّ ما صَنَعْتُ، أبُوءُ لكَ بنِعْمَتِكَ عَلَيَّ، وأَبُوءُ لكَ بذَنْبِي فاغْفِرْ لِي، فإنَّه لا يَغْفِرُ الذُّنُوبَ إلَّا أنْت", 'أَصبَحْنا على فِطرةِ الإسلامِ، وعلى كَلِمةِ الإخلاصِ، وعلى دِينِ نَبيِّنا محمَّدٍ صلَّى اللهُ عليه وسلَّمَ، وعلى مِلَّةِ أبِينا إبراهيمَ، حَنيفًا مُسلِمًا، وما كان مِنَ المُشرِكينَ', 'سبحانَ اللَّهِ وبحمدِه لا قوَّةَ إلَّا باللَّهِ ما شاءَ اللَّهُ كانَ وما لم يشأ لم يَكن أعلمُ أنَّ اللَّهَ على كلِّ شيءٍ قديرٌ وأنَّ اللَّهَ قد أحاطَ بِكلِّ شيءٍ علمًا', 'قال رسول الله صلى الله عليه وسلم: (مَن قال: بسمِ اللهِ الذي لا يَضرُ مع اسمِه شيءٌ في الأرضِ ولا في السماءِ وهو السميعُ العليمِ، ثلاثُ مراتٍ، لم تصبْه فجأةُ بلاءٍ حتى يُصبحَ)', 'قال رسول الله صلى الله عليه وسلم: (مَن قالَ حينَ يصبحُ وحينَ يُمسي: سبحانَ اللَّهِ وبحمدِهِ مائةَ مرَّةٍ: لم يأتِ أحدٌ يومَ القيامةِ بأفضلَ ممَّا جاءَ بِهِ، إلَّا أحدٌ قالَ مثلَ ما قالَ، أو زادَ علَيهِ)', 'اللهمَّ إني أسألُك العفوَ والعافيةَ، في الدنيا والآخرةِ، اللهمَّ إني أسألُك العفوَ والعافيةَ، في دِيني ودنيايَ وأهلي ومالي، اللهمَّ استُرْ عوراتي، وآمِنْ روعاتي، واحفظني من بين يدي، ومن خلفي، وعن يميني، وعن شمالي، ومن فوقي، وأعوذُ بك أن أُغْتَالَ من تحتي', 'للَّهمَّ بِكَ أصبَحنا، وبِكَ أمسَينا، وبِكَ نحيا وبِكَ نموتُ وإليكَ المصيرُ', 'اللهمَّ إنِّي أعوذُ بك من الهمِّ والحزنِ، والعجزِ والكسلِ، والبُخلِ والجُبنِ، وضَلَعِ الدَّينِ، وغَلَبَةِ الرجالِ', 'اللَّهمَّ إنِّي أسألُكَ خيرَ هذا اليومِ فتحَه، ونصرَه، ونورَه، وبرَكتَه، وَهدايتَهُ، وأعوذُ بِكَ من شرِّ ما فيهِ وشرِّ ما بعدَه', 'اللَّهُمَّ إنِّي أسألُكَ العافيةَ في الدُّنيا والآخِرةِ، اللَّهُمَّ إنِّي أسألُكَ العَفوَ والعافيةَ في دِيني ودُنيايَ، وأهْلي ومالي، اللَّهُمَّ استُرْ عَوْراتي، وآمِنْ رَوْعاتي، اللَّهُمَّ احْفَظْني من بينِ يَدَيَّ، ومن خَلْفي، وعن يَميني، وعن شِمالي، ومن فَوْقي، وأعوذُ بعَظَمتِكَ أنْ أُغْتالَ من تَحْتي', 'اللهم إنا نعوذُ بك من أن نُشرِكَ بك شيئًا نعلَمُه، و نستغفرُك لما لا نعلمُه', 'يا حيُّ يا قيُّومُ، برَحمتِكَ أستَغيثُ، أصلِح لي شأني كُلَّهُ، ولا تَكِلني إلى نَفسي طرفةَ عينٍ', 'اللَّهمَّ ما أصبحَ بي من نعمةٍ أو بأحدٍ من خلقِكَ فمنكَ وحدَكَ لا شريكَ لكَ فلكَ الحمدُ ولكَ الشُّكرُ', 'اللَّهمَّ عالِمَ الغَيبِ والشَّهادةِ، فاطرَ السَّمواتِ والأرضِ، رَبَّ كلِّ شيءٍ ومَليكَهُ، أشهدُ أن لا إلَهَ إلَّا أنتَ، أعوذُ بِكَ مِن شرِّ نفسي وشرِّ الشَّيطانِ وشِركِهِ', '(حَسبيَ اللهُ لا إلهَ إلَّا هو، عليه تَوكَّلْتُ، وهو ربُّ العَرشِ العَظيمِ)، سَبعَ مراتٍ', '(سُبْحَانَ اللهِ وَبِحَمْدِهِ، عَدَدَ خَلْقِهِ وَرِضَا نَفْسِهِ وَزِنَةَ عَرْشِهِ وَمِدَادَ كَلِمَاتِهِ)، وهي تُقال ثلاث مرات', 'سبحانَ اللَّهِ وبحمدِهِ وهي تُقال مئةَ مرَّةٍ', 'اللَّهُمَّ إنِّي أصبَحتُ أُشهِدُك، وأُشهِدُ حَمَلةَ عَرشِكَ، ومَلائِكَتَك، وجميعَ خَلقِكَ: أنَّكَ أنتَ اللهُ لا إلهَ إلَّا أنتَ، وأنَّ مُحمَّدًا عبدُكَ ورسولُكَ', 'رَضيتُ باللَّهِ ربًّا، وبالإسلامِ دينًا، وبِمُحمَّدٍ رسولًا', 'اللَّهمَّ عافِني في بدَني اللَّهمَّ عافِني في سمعي اللَّهمَّ عافِني في بصري لا إلهَ إلَّا أنت. اللَّهمَّ إنِّي أعوذُ بِكَ منَ الكُفْرِ والفقرِ اللَّهمَّ إنِّي أعوذُ بكَ من عذابِ القبرِ لا إلهَ إلَّا أنت تعيدُها ثَلاثَ مرَّاتٍ', 'أَصْبَحْنَا وَأَصْبَحَ المُلْكُ لِلَّهِ وَالْحَمْدُ لِلَّهِ لا إلَهَ إلَّا اللَّهُ، وَحْدَهُ لا شَرِيكَ له . له المُلْكُ وَلَهُ الحَمْدُ وَهو علَى كُلِّ شيءٍ قَدِيرٌ، رَبِّ أَسْأَلُكَ خَيْرَ ما في هذِه اللَّيْلَةِ وَخَيْرَ ما بَعْدَهَا، وَأَعُوذُ بكَ مِن شَرِّ ما في هذِه اللَّيْلَةِ وَشَرِّ ما بَعْدَهَا، رَبِّ أَعُوذُ بكَ مِنَ الكَسَلِ وَسُوءِ الكِبَرِ، رَبِّ أَعُوذُ بكَ مِن عَذَابٍ في النَّارِ وَعَذَابٍ في القَبْرِ', 'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا صَلَّيْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ، اللَّهُمَّ بَارِكْ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا بَارَكْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ (مَن صلى عَلَيَّ حين يُصْبِحُ عَشْرًا ، وحين يُمْسِي عَشْرًا أَدْرَكَتْه شفاعتي يومَ القيامةِ)', 'أستغفرُ اللهَ العظيمَ الذي لا إلهَ إلَّا هو الحيَّ القيومَ وأتوبُ إليه', 'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًا', 'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق', 'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق', 'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق', 'من قال إذا أصبَح: لا إلهَ إلَّا اللهُ وحدَه لا شريكَ له له الملكُ وله الحمدُ وهو على كلِّ شيءٍ قديرٌ عشْرَ مرَّاتٍ كُتِب له بهنَّ عشْرُ حسناتٍ ومُحي بهنَّ عنه عشْرُ سيِّئاتٍ ورُفِع له بهن عشْرُ درجاتٍ وكُنَّ له عَدْلَ عِتاقةِ أربعِ رقابٍ وكُنَّ له حرَسًا مِن الشَّيطانِ حتَّى يُمسيَ', 'آية الكرسي: (اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ الْحَيُّ الْقَيُّومُ ۚ لَا تَأْخُذُهُ سِنَةٌ وَلَا نَوْمٌ ۚ لَّهُ مَا فِي السَّمَاوَاتِ وَمَا فِي الْأَرْضِ ۗ مَن ذَا الَّذِي يَشْفَعُ عِندَهُ إِلَّا بِإِذْنِهِ ۚ يَعْلَمُ مَا بَيْنَ أَيْدِيهِمْ وَمَا خَلْفَهُمْ ۖ وَلَا يُحِيطُونَ بِشَيْءٍ مِّنْ عِلْمِهِ إِلَّا بِمَا شَاءَ ۚ وَسِعَ كُرْسِيُّهُ السَّمَاوَاتِ وَالْأَرْضَ ۖ وَلَا يَئُودُهُ حِفْظُهُمَا ۚ وَهُوَ الْعَلِيُّ الْعَظِيمُ)', "سورة الإخلاص: (قُلْ هُوَ اللَّهُ أَحَدٌ* اللَّهُ الصَّمَدُ* لَمْ يَلِدْ وَلَمْ يُولَدْ* وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ) ثلاثا", 'سورة الفلق: (قُلْ أَعُوذُ بِرَبِّ الْفَلَقِ* مِن شَرِّ مَا خَلَقَ* وَمِن شَرِّ غَاسِقٍ إِذَا وَقَبَ* وَمِن شَرِّ النَّفَّاثَاتِ فِي الْعُقَدِ* وَمِن شَرِّ حَاسِدٍ إِذَا حَسَدَ) ثلاثا', 'سورة الناس: (قُلْ أَعُوذُ بِرَبِّ النَّاسِ* مَلِكِ النَّاسِ* إِلَٰهِ النَّاسِ* مِن شَرِّ الْوَسْوَاسِ الْخَنَّاسِ* الَّذِي يُوَسْوِسُ فِي صُدُورِ النَّاسِ* مِنَ الْجِنَّةِ وَالنَّاسِ) ثلاثا', 'قوله تعالى: (رَبِّ أَعُوذُ بِكَ مِنْ هَمَزَاتِ الشَّيَاطِينِ وَأَعُوذُ بِكَ رَبِّ أَنْ يَحْضُرُونِ)', 'قوله تعالى: (رَبِّ أَعُوذُ بِكَ مِنْ هَمَزَاتِ الشَّيَاطِينِ وَأَعُوذُ بِكَ رَبِّ أَنْ يَحْضُرُونِ)قوله تعالى: (حَسْبِيَ اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ ۖ عَلَيْهِ تَوَكَّلْتُ ۖ وَهُوَ رَبُّ الْعَرْشِ الْعَظِيمِ).' ] # ======================================================================================================================================================================================================================================================================================================================================================= AzkharAlMasaa = [ 'اللَّهمَّ إنِّي عَبدُك، وابنُ عبدِك، وابنُ أمتِك، ناصِيَتي بيدِكَ، ماضٍ فيَّ حكمُكَ، عدْلٌ فيَّ قضاؤكَ، أسألُكَ بكلِّ اسمٍ هوَ لكَ سمَّيتَ بهِ نفسَك، أو أنزلْتَه في كتابِكَ، أو علَّمتَه أحدًا من خلقِك، أو استأثرتَ بهِ في علمِ الغيبِ عندَك، أن تجعلَ القُرآنَ ربيعَ قلبي، ونورَ صَدري، وجَلاءَ حَزَني، وذَهابَ هَمِّي', 'اللَّهمَّ إنِّي أسأَلُكَ مِن الخيرِ كلِّه عاجلِه وآجلِه ما علِمْتُ منه وما لَمْ أعلَمْ وأعوذُ بكَ مِن الشَّرِّ كلِّه عاجلِه وآجلِه ما علِمْتُ منه وما لَمْ أعلَمْ، اللَّهمَّ إنِّي أسأَلُكَ مِن الخيرِ ما سأَلكَ عبدُك ونَبيُّكَ وأعوذُ بكَ مِن الشَّرِّ ما عاذ به عبدُك ونَبيُّكَ وأسأَلُكَ الجنَّةَ وما قرَّب إليها مِن قولٍ وعمَلٍ وأعوذُ بكَ مِن النَّارِ وما قرَّب إليها مِن قولٍ وعمَلٍ وأسأَلُكَ أنْ تجعَلَ كلَّ قضاءٍ قضَيْتَه لي خيرًا', '(بسمِ اللهِ الذي لا يَضرُ مع اسمِه شيءٌ في الأرضِ ولا في السماءِ وهو السميعُ العليمِ)، وتُقال ثلاث مرات', 'رَضِيتُ بِاللهِ رَبًّا، وَبِالْإِسْلَامِ دِينًا، وَبِمُحَمَّدٍ صَلَّى اللهُ عَلَيْهِ وَسَلَّمَ نَبِيًّا وَرَسُولًا', 'اللَّهمَّ بِكَ أمسَينا وبِكَ أصبَحنا وبِكَ نحيا وبِكَ نموتُ وإليكَ المصير', 'اللَّهمَّ ما أمسى بي مِن نعمةٍ أو بأحَدٍ مِن خَلْقِكَ، فمنكَ وحدَكَ لا شريكَ لكَ، فلَكَ الحمدُ ولكَ الشُّكرُ، فقد أدى شُكْرَ ذلكَ اليومِ', 'سبحانَ اللَّهِ وبحمدِهِ وهي تُقال مئةَ مرَّةٍ', '(سُبْحَانَ اللهِ وَبِحَمْدِهِ، عَدَدَ خَلْقِهِ وَرِضَا نَفْسِهِ وَزِنَةَ عَرْشِهِ وَمِدَادَ كَلِمَاتِهِ)، وهي تُقال ثلاث مرات', 'اللَّهُمَّ إنِّي أمسيت أُشهِدُك، وأُشهِدُ حَمَلةَ عَرشِكَ، ومَلائِكَتَك، وجميعَ خَلقِكَ: أنَّكَ أنتَ اللهُ لا إلهَ إلَّا أنتَ، وأنَّ مُحمَّدًا عبدُكَ ورسولُكَ', 'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا صَلَّيْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ، اللَّهُمَّ بَارِكْ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا بَارَكْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ (مَن صلى عَلَيَّ حين يُصْبِحُ عَشْرًا ، وحين يُمْسِي عَشْرًا أَدْرَكَتْه شفاعتي يومَ القيامةِ)', 'لا إلهَ إلَّا اللهُ وحدَه لا شريكَ له له الملكُ وله الحمدُ وهو على كلِّ شيءٍ قديرٌ', 'أمسَيْنا على فِطرةِ الإسلامِ وعلى كَلِمةِ الإخلاصِ وعلى دينِ نبيِّنا محمَّدٍ صلَّى اللهُ عليه وسلَّم وعلى مِلَّةِ أبينا إبراهيمَ حنيفًا مسلمًا وما كان مِنَ المشركينَ', '(اللَّهمَّ عافِني في بدَني اللَّهمَّ عافِني في سمعي اللَّهمَّ عافِني في بصري لا إلهَ إلَّا أنت، اللَّهمَّ إنِّي أعوذُ بِكَ منَ الكُفْرِ والفقرِ اللَّهمَّ إنِّي أعوذُ بكَ من عذابِ القبرِ لا إلهَ إلَّا أنت) وتقال ثَلاثَ مرَّاتٍ', 'اللهم إنا نعوذُ بك من أن نُشرِكَ بك شيئًا نعلَمُه، و نستغفرُك لما لا نعلمُه', 'أستغفرُ اللهَ العظيمَ الذي لا إلهَ إلَّا هو الحيَّ القيومَ وأتوبُ إليه', 'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًا', 'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًايا حيُّ يا قيُّومُ، برَحمتِكَ أستَغيثُ، أصلِح لي شأني كُلَّهُ، ولا تَكِلني إلى نَفسي طرفةَ عينٍ', 'اللَّهمَّ عالِمَ الغَيبِ والشَّهادةِ، فاطرَ السَّمواتِ والأرضِ، رَبَّ كلِّ شيءٍ ومَليكَهُ، أشهدُ أن لا إلَهَ إلَّا أنتَ، أعوذُ بِكَ مِن شرِّ نفسي وشرِّ الشَّيطانِ وشِركِهِ', 'اللهمَّ فاطرَ السمواتِ والأرضِ، عالمَ الغيبِ والشهادةِ، لا إلهَ إلَّا أنتَ ربَّ كلِّ شيءٍ ومَليكَه، أعوذُ بك من شرِّ نفسي ومن شرِّ الشيطانِ وشرَكِه، وأنْ أقترفَ على نفسي سوءًا أو أجرَّهُ إلى مسلمٍ', 'اللهمَّ إنِّي أعوذُ بك من الهمِّ والحزنِ، والعجزِ والكسلِ، والبُخلِ والجُبنِ، وضَلَعِ الدَّينِ، وغَلَبَةِ الرجالِ', 'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق', 'اللهمَّ إني أسألُك العفوَ والعافيةَ، في الدنيا والآخرةِ، اللهمَّ إني أسألُك العفوَ والعافيةَ، في دِيني ودنيايَ وأهلي ومالي، اللهمَّ استُرْ عوراتي، وآمِنْ روعاتي، واحفظني من بين يدي، ومن خلفي، وعن يميني، وعن شمالي، ومن فوقي، وأعوذُ بك أن أُغْتَالَ من تحتي', 'أَمْسَيْنَا وَأَمْسَى المُلْكُ لِلَّهِ، وَالْحَمْدُ لِلَّهِ لا إلَهَ إلَّا اللَّهُ، وَحْدَهُ لا شَرِيكَ له، له المُلْكُ وَلَهُ الحَمْدُ وَهو علَى كُلِّ شيءٍ قَدِيرٌ، رَبِّ أَسْأَلُكَ خَيْرَ ما في هذِه اللَّيْلَةِ وَخَيْرَ ما بَعْدَهَا، وَأَعُوذُ بكَ مِن شَرِّ ما في هذِه اللَّيْلَةِ وَشَرِّ ما بَعْدَهَا، رَبِّ أَعُوذُ بكَ مِنَ الكَسَلِ وَسُوءِ الكِبَرِ، رَبِّ أَعُوذُ بكَ مِن عَذَابٍ في النَّارِ وَعَذَابٍ في القَبْرِ', 'اللَّهُمَّ أنْتَ رَبِّي لا إلَهَ إلَّا أنْتَ، خَلَقْتَنِي وأنا عَبْدُكَ، وأنا علَى عَهْدِكَ ووَعْدِكَ ما اسْتَطَعْتُ، أعُوذُ بكَ مِن شَرِّ ما صَنَعْتُ، أبُوءُ لكَ بنِعْمَتِكَ عَلَيَّ، وأَبُوءُ لكَ بذَنْبِي فاغْفِرْ لِي، فإنَّه لا يَغْفِرُ الذُّنُوبَ إلَّا أنْتَ', 'اللَّهمَّ إنِّي أسألُكَ خيرَ هذه الليلة فتحَها، ونصرَها، ونورَها، وبرَكتَها، وَهداها، وأعوذُ بِكَ من شرِّ ما فيها وشرِّ ما بعدَها', 'آية الكرسي: (اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ الْحَيُّ الْقَيُّومُ ۚ لَا تَأْخُذُهُ سِنَةٌ وَلَا نَوْمٌ ۚ لَّهُ مَا فِي السَّمَاوَاتِ وَمَا فِي الْأَرْضِ ۗ مَن ذَا الَّذِي يَشْفَعُ عِندَهُ إِلَّا بِإِذْنِهِ ۚ يَعْلَمُ مَا بَيْنَ أَيْدِيهِمْ وَمَا خَلْفَهُمْ ۖ وَلَا يُحِيطُونَ بِشَيْءٍ مِّنْ عِلْمِهِ إِلَّا بِمَا شَاءَ ۚ وَسِعَ كُرْسِيُّهُ السَّمَاوَاتِ وَالْأَرْضَ ۖ وَلَا يَئُودُهُ حِفْظُهُمَا ۚ وَهُوَ الْعَلِيُّ الْعَظِيمُ)', "قال تعالى في سورة البقرة أيضاً: (آمَنَ الرَّسُولُ بِمَا أُنزِلَ إِلَيْهِ مِن رَّبِّهِ وَالْمُؤْمِنُونَ ۚ كُلٌّ آمَنَ بِاللَّهِ وَمَلَائِكَتِهِ وَكُتُبِهِ وَرُسُلِهِ لَا نُفَرِّقُ بَيْنَ أَحَدٍ مِّن رُّسُلِهِ ۚ وَقَالُوا سَمِعْنَا وَأَطَعْنَا ۖ غُفْرَانَكَ رَبَّنَا وَإِلَيْكَ الْمَصِيرُ*لَا يُكَلِّفُ اللَّهُ نَفْسًا إِلَّا وُسْعَهَا ۚ لَهَا مَا كَسَبَتْ وَعَلَيْهَا مَا اكْتَسَبَتْ ۗ رَبَّنَا لَا تُؤَاخِذْنَا إِن نَّسِينَا أَوْ أَخْطَأْنَا ۚ رَبَّنَا وَلَا تَحْمِلْ عَلَيْنَا إِصْرًا كَمَا حَمَلْتَهُ عَلَى الَّذِينَ مِن قَبْلِنَا ۚ رَبَّنَا وَلَا تُحَمِّلْنَا مَا لَا طَاقَةَ لَنَا بِهِ ۖ وَاعْفُ عَنَّا وَاغْفِرْ لَنَا وَارْحَمْنَا ۚ أَنتَ مَوْلَانَا فَانصُرْنَا عَلَى الْقَوْمِ الْكَافِرِينَ)", "سورة الإخلاص: (قُلْ هُوَ اللَّهُ أَحَدٌ* اللَّهُ الصَّمَدُ* لَمْ يَلِدْ وَلَمْ يُولَدْ* وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ) ثلاثا", 'سورة الفلق: (قُلْ أَعُوذُ بِرَبِّ الْفَلَقِ* مِن شَرِّ مَا خَلَقَ* وَمِن شَرِّ غَاسِقٍ إِذَا وَقَبَ* وَمِن شَرِّ النَّفَّاثَاتِ فِي الْعُقَدِ* وَمِن شَرِّ حَاسِدٍ إِذَا حَسَدَ) ثلاثا', 'سورة الناس: (قُلْ أَعُوذُ بِرَبِّ النَّاسِ* مَلِكِ النَّاسِ* إِلَٰهِ النَّاسِ* مِن شَرِّ الْوَسْوَاسِ الْخَنَّاسِ* الَّذِي يُوَسْوِسُ فِي صُدُورِ النَّاسِ* مِنَ الْجِنَّةِ وَالنَّاسِ) ثلاثا' ] def story(PageName, Text): pyautogui.moveTo(950, 300, duration=1) time.sleep(2) pyautogui.click() pyautogui.moveTo(900, 200, duration=1) time.sleep(2) pyautogui.click() pyautogui.write(PageName) time.sleep(2) pyautogui.moveTo(970, 270, duration=1) time.sleep(6) pyautogui.click() pyautogui.moveTo(1000, 500, duration=1) time.sleep(2) pyautogui.click() pyautogui.moveTo(150, 400, duration=1) time.sleep(2) pyautogui.click() # Store our string to the clipboard pyperclip.copy(Text) # Hotkey the paste command pyautogui.hotkey("ctrl", "v") pyautogui.moveTo(250, 700, duration=1) time.sleep(2) pyautogui.click() x = int(input('Enter the type (0 for test, 1 for AzkharAlsabah, 2 for AzkharAlMasaa): ')) if x == 0: webbrowser.open_new('https://business.facebook.com/creatorstudio/home') time.sleep(10) story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Done on {datetime.datetime.now().time()}✔') elif x == 1: webbrowser.open_new('https://business.facebook.com/creatorstudio/home') time.sleep(10) story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Starts') for i in AzkharAlsabah: story('apocryphon', i) time.sleep(2) story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Done on {datetime.datetime.now().time()}✔') elif x == 2: webbrowser.open_new('https://business.facebook.com/creatorstudio/home') time.sleep(10) story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlMasaa Starts') for i in AzkharAlMasaa: story('apocryphon', i) time.sleep(2) story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlMasaa Done on {datetime.datetime.now().time()}✔')
[ "ahmed.abo.sita66@gmail.com" ]
ahmed.abo.sita66@gmail.com
c9708fe103af2012e13994b656c45ba4a852077c
abad82a1f487c5ff2fb6a84059a665aa178275cb
/Codewars/8kyu/8kyu-interpreters-hq9-plus/Python/solution1.py
bdd53cce40278d9d04a75b8b2e61e0cc09d79511
[ "MIT" ]
permissive
RevansChen/online-judge
8ae55f136739a54f9c9640a967ec931425379507
ad1b07fee7bd3c49418becccda904e17505f3018
refs/heads/master
2021-01-19T23:02:58.273081
2019-07-05T09:42:40
2019-07-05T09:42:40
88,911,035
9
0
null
null
null
null
UTF-8
Python
false
false
487
py
# Python - 3.6.0 gets = lambda i: 's' if i != 1 else '' HQ9 = { 'H': 'Hello World!', 'Q': 'Q', '9': '\n'.join( f'{i} bottle{gets(i)} of beer on the wall, {i} bottle{gets(i)} of beer.\nTake one down and pass it around, {i - 1 if i > 1 else "no more"} bottle{gets(i - 1)} of beer on the wall.' for i in range(99, 0, -1) ) + '\nNo more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.' }.get
[ "d79523@hotmail.com" ]
d79523@hotmail.com
e7f2596520d3677c4a237c7632d8e1b51aad8246
dcefbb67cfdc837a5b1016ea674ead66263f0af2
/algorithm/BOJ_2442.py
8e5b65e98dfb9ba6de506f7620715ecfc2770547
[]
no_license
SeungYeopB/weekend-study
0a5d5bdbb00a7d81f2ec7c6c5b2fc7b96d92c296
02651855bb91e26784611bbed34a01023f4ef307
refs/heads/master
2023-06-23T15:52:54.475077
2021-07-23T07:57:16
2021-07-23T07:57:16
382,514,062
0
0
null
null
null
null
UTF-8
Python
false
false
156
py
N = int(input()) for i in range(1,N+1): for j in range(N-i): print(" ",end="") for j in range(2*i-1): print("*",end="") print()
[ "study0610@naver.com" ]
study0610@naver.com
5a11bc517beebdc39fe586b47fc5f2a103dbdbb6
a5de047e66a44459cbde9062d8d8d6c2da11d750
/db_session.py
84820499406b2f08a6647a194a09f4b0d3ee539a
[]
no_license
MrRomacka/diary
515b1f4fc7732c936464827c1431ab14f300b32d
219f9c0472ed933d71b7412e79b8ea6a8a77eb5d
refs/heads/main
2023-01-22T14:23:02.251413
2020-11-13T13:41:42
2020-11-13T13:41:42
312,364,025
0
0
null
null
null
null
UTF-8
Python
false
false
840
py
import sqlalchemy as sa import sqlalchemy.orm as orm from sqlalchemy.orm import Session import sqlalchemy.ext.declarative as dec SqlAlchemyBase = dec.declarative_base() __factory = None def global_init(db_file: str): global __factory if __factory: return if not db_file or not db_file.strip(): raise Exception('Необходимо указать файл базы данных.') conn_str = f'sqlite:///{db_file.strip()}?check_same_thread=False' print(f"Подключение к базе данных по адресу {conn_str}") engine = sa.create_engine(conn_str, echo=False) __factory = orm.sessionmaker(bind=engine) SqlAlchemyBase.metadata.create_all(engine) def create_session() -> Session: global __factory return __factory()
[ "noreply@github.com" ]
MrRomacka.noreply@github.com
b1718080973873c3be8fe613fdb2207a4ef59185
86bebcb32810b3af337f6fe0e4ce30ebb7fa7a16
/ServerlessDjango/asgi.py
a30b4fcadfd0186624b55569e6e08e9bf7709d32
[]
no_license
NimishVerma/ServerlessDjangoTemplate
7bc665d043d656c4abaf1b076a38935f98390d41
9f1f8fba7a37867f90369feac0d4f215ec3ab53d
refs/heads/master
2023-04-01T12:07:31.012179
2021-04-05T02:54:02
2021-04-05T02:54:02
354,104,152
2
0
null
null
null
null
UTF-8
Python
false
false
409
py
""" ASGI config for ServerlessDjango project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ServerlessDjango.settings') application = get_asgi_application()
[ "nimishverma@ymail.com" ]
nimishverma@ymail.com
92ad7068066c80a09e52e4efc9930ae95ea93b66
0223a85b6a3fecd453b9bd61368cb51c2978c30f
/Courses/1 month/4 week/day 2/Задача №2.py
7b91a00c66d20af8e6336a4673389637c2148b5c
[ "MIT" ]
permissive
emir-naiz/first_git_lesson
4cb502d13ead7f459a8e57333581d5256fd8de39
1fecf712290f6da3ef03deff518870d91638eb69
refs/heads/main
2023-01-05T12:29:02.905718
2020-11-10T06:00:19
2020-11-10T06:00:19
306,527,911
0
0
null
null
null
null
UTF-8
Python
false
false
301
py
# Функция которая перебирает все четные числа до указанного числа def find_even_nums(number): even_list = [] for i in range(1, number): if i % 2 == 0: even_list.append(i) return even_list print(find_even_nums(10))
[ "naizabekoff@mail.ru" ]
naizabekoff@mail.ru
49125a103d0ef8ad23344162256cf34b29c740c5
5c0506e42fc7f0325728994223f1b0be4f1187fc
/summa_py_textrank.py
2fd1d59fa66724ab7ba0f6a9607be02ff57006a6
[]
no_license
Trevahok/summarizer
602d492385c3130c6c9f11dd82e71177541ede73
cfd134e79ec5dfac3530081c6863421ab667207d
refs/heads/master
2020-03-19T20:36:21.680650
2018-06-12T06:54:36
2018-06-12T06:54:36
136,908,134
0
0
null
null
null
null
UTF-8
Python
false
false
1,124
py
from urllib.request import urlopen from summa.summarizer import summarize from sys import argv from bs4 import BeautifulSoup as bs import PyPDF2 def from_link(): page=urlopen(argv[1]) soup=bs(page,'lxml') text=soup.find_all('p') text='\n'.join([ i.text for i in text]) print(summarize(text,ratio=0.2)) def from_pdf(): pdfdoc = open(argv[1], 'rb') pdfReader = PyPDF2.PdfFileReader(pdfdoc) count = pdfReader.numPages for i in range(count): page = pdfReader.getPage(i) print('Page Number: ',i,'\n') print(summarize(page.extractText(),ratio=0.2)) print('\n\n') def from_txt(): file=open(argv[1],'r') text=file.read() print(summarize(text,ratio=0.2)) if __name__=="__main__": try: filetype = argv[2] if filetype=='url': from_link() elif filetype=='pdf': from_pdf() else: from_txt() except IndexError: print("\nUsage:\n \tsummarize 'http:// url.to.summarize' url \n or \n \tsummarize 'path/to/file/file.pdf' pdf \n or \n \tsummarize 'path/to/file/file.txt' txt ")
[ "vighneshss@gmail.com" ]
vighneshss@gmail.com
e5d23091a1fc43d867a7d92b6d2aeaccf1d37eca
09724c51f0012474eb322a676fd112d1bc102bb6
/CodeBuilder/Valids.py
bb671f0840c37c2cab29f8c586e3505cbaf0c0cc
[]
no_license
danmarshall208/CodeBuilder
53d4b72476d704a8290aeb5925975f10a184817f
526f0d37b9ef11484eada691275b9ce71f5c2fab
refs/heads/master
2021-05-09T07:53:15.845690
2018-01-29T11:33:51
2018-01-29T11:33:51
119,374,191
0
0
null
null
null
null
UTF-8
Python
false
false
467
py
valid_var_names = ['a', 'b', 'c', 'd', 'e'] valid_func_names = ['A', 'B', 'C', 'D', 'E'] valid_funcs = ['chr', 'float', 'ord', 'print', 'round', 'str', 'sum'] #valid_funcs = ['abs', 'bool', 'chr', 'float', 'id', 'len', 'ord', 'print', 'round', 'str', 'sum', 'type'] operations = ['object', 'add', 'subtract', 'multiply', 'divide'] #operations = ['object', 'add', 'subtract', 'multiply', 'divide', 'equals', 'not_equals'] objects = ['int', 'string', 'var', 'function']
[ "danmarshall208@hotmail.com" ]
danmarshall208@hotmail.com
b2432c7ce576836fc769e1c9a990bb2a1b00d91c
ef243d91a1826b490e935fa3f3e6c29c3cc547d0
/cv2/cv2/MergeExposures.py
7d68ec4c5e8da4d27c6ad8ddb544c23ea3973a7e
[]
no_license
VentiFang/Python_local_module
6b3d0b22399e817057dfd15d647a14bb1e41980e
c44f55379eca2818b29732c2815480ee755ae3fb
refs/heads/master
2020-11-29T11:24:54.932967
2019-12-25T12:57:14
2019-12-25T12:57:14
230,101,875
0
0
null
null
null
null
UTF-8
Python
false
false
1,989
py
# encoding: utf-8 # module cv2.cv2 # from F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd # by generator 1.147 """ Python wrapper for OpenCV. """ # imports import cv2.cv2 as # F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd import cv2.Error as Error # <module 'cv2.Error'> import cv2.cuda as cuda # <module 'cv2.cuda'> import cv2.detail as detail # <module 'cv2.detail'> import cv2.dnn as dnn # <module 'cv2.dnn'> import cv2.fisheye as fisheye # <module 'cv2.fisheye'> import cv2.flann as flann # <module 'cv2.flann'> import cv2.instr as instr # <module 'cv2.instr'> import cv2.ipp as ipp # <module 'cv2.ipp'> import cv2.ml as ml # <module 'cv2.ml'> import cv2.ocl as ocl # <module 'cv2.ocl'> import cv2.ogl as ogl # <module 'cv2.ogl'> import cv2.samples as samples # <module 'cv2.samples'> import cv2.utils as utils # <module 'cv2.utils'> import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'> import cv2 as __cv2 class MergeExposures(__cv2.Algorithm): # no doc def process(self, src, times, response, dst=None): # real signature unknown; restored from __doc__ """ process(src, times, response[, dst]) -> dst . @brief Merges images. . . @param src vector of input images . @param dst result image . @param times vector of exposure time values for each image . @param response 256x1 matrix with inverse camera response function for each pixel value, it should . have the same number of channels as images. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass
[ "5149528+ventifang@user.noreply.gitee.com" ]
5149528+ventifang@user.noreply.gitee.com
3f0e72af94251d7862f5e10f270251b0a1d37a48
d3f0a0a8a4508dbc7ddd3c6251760672368c2cc9
/word2vec_classif.py
e798ef6bcbba52c0ac03982abdb4582e689926b2
[]
no_license
Jey1kRey/projet6
13883b1eac53dcea6639bde8461b48977e1428d1
d7ae9b798fc631277fb62d1e14b57f25393b2822
refs/heads/master
2020-04-20T17:11:51.781883
2019-02-03T19:04:06
2019-02-03T19:04:06
168,981,810
0
1
null
null
null
null
UTF-8
Python
false
false
3,622
py
# -*- coding: utf-8 -*- """ Created on Fri Nov 30 08:42:39 2018 @author: Jérôme """ import pandas as pd import gensim import numpy as np from gensim.models import word2vec import re from nltk.corpus import stopwords from sklearn.preprocessing import MultiLabelBinarizer from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsRestClassifier #model=gensim.models.Word2Vec.load('model_wv_150.bin') model=gensim.models.Word2Vec.load('model_wv_complet.bin') #w2v = dict(zip(model.wv.index2word, model.wv.syn0)) vocabulaire=model.wv.syn0 #print(len(w2v)) #print(len(model.wv.index2word)) #print(len(model.wv.vocab.keys())) #df=pd.read_csv('base_texte.csv',sep=',') df=pd.read_csv('base_totale.csv',sep=',', engine='python') dftags=pd.read_csv('dico_tags.csv', sep=',') df_test=pd.read_csv('base_test.csv', sep=',', engine='python') df=df.fillna('') questions=df['Body'].iloc[0:20] indices=questions.index df_tags=df['Tags'].iloc[0:20] ''' normalisation du texte ''' def dico_tag(texte): dico=[] for x in texte : dico.append(x) return dico dico=dico_tag(dftags.TagName) def tokeniz(texte): regex=re.compile("[^a-zA-Z]") text_modif=regex.sub(" ", texte) texte_lower=text_modif.lower() phrase=texte_lower.split() for i in list(phrase): if i in stopwords.words('english'): phrase.remove(i) for i in list(phrase): if i not in dico: phrase.remove(i) mots=" ".join(phrase) return mots def nettoyage_dataframe(data): texte=data.apply(tokeniz) return texte df_question=nettoyage_dataframe(questions) question_test=df_question.iloc[0] print(question_test) def net_tg(texte): regex=re.compile("[^a-zA-Z]") text_modif=regex.sub(" ", texte) texte_lower=text_modif.lower() return texte_lower def net_df_tg(data): texte=data.apply(net_tg) return texte y_tg=net_df_tg(df_tags) def recup_vecteurs(corpus, model): index2word_set = set(model.wv.vocab.keys()) # words known to model featureVec = np.zeros(model.vector_size, dtype="float32") liste_vecteur=[] for word in corpus : if word in index2word_set: featureVec = np.add(featureVec, model[word]) liste_vecteur.append(featureVec) return liste_vecteur def creation_corpus(questions): liste_questions=[] for element in questions : mots_wv=recup_vecteurs(element, model) liste_questions.append(mots_wv) return liste_questions x_train, x_test, y_train, y_test = train_test_split(questions, y_tg, train_size=0.7) liste_xtrain=creation_corpus(x_train) liste_xtest=creation_corpus(x_test) train=np.vstack(liste_xtrain) test=np.vstack(liste_xtest) #☻essai=pd.DataFrame(liste_finale, index=indices) #print(essai) #vecteurs_corpus=recup_vecteurs(question_test, model) #print(len(vecteurs_corpus)) #print(vecteurs_corpus) #x_train, x_test, y_train, y_test = train_test_split(test, y_tg, train_size=0.7) foret=OneVsRestClassifier(RandomForestClassifier()) foret.fit(train, y_train) #print(foret.score(x_test,y_test)) #☺print(foret.predict(x_test[10:15])) #print(y_test[10:15]) ''' x_train, x_test, y_train, y_test = train_test_split(vocabulaire, y_tg, train_size=0.7) foret=OneVsRestClassifier(RandomForestClassifier()) foret.fit(x_train, y_train) print(foret.score(x_test,y_test)) print(foret.predict(x_test[10:15])) print(y_test[10:15]) '''
[ "jeyonizuka@hotmail.com" ]
jeyonizuka@hotmail.com
56fbf2a47fa9865416f2c8ff06113e4b3ebbf002
934f170481a5f3807b14823f9e704fd877044d30
/SAGAN.py
5af51b32adb641eff5c37319b7332b3957d40e21
[]
no_license
JustinLion83/Anime-GAN-tensorflow
c234bd28e197a801460683d07aa35d2d80cb96f9
e3a5fd726aeaf08d01445d8176468d84cd3295f4
refs/heads/master
2020-07-03T06:36:25.765500
2019-08-17T19:37:04
2019-08-17T19:37:04
201,822,987
0
0
null
2019-08-11T22:45:42
2019-08-11T22:45:42
null
UTF-8
Python
false
false
12,068
py
from layers import * import numpy as np import time from utils import util import os class SAGAN_model(object): def __init__(self, args): self.args = args self.d_loss_log = [] self.g_loss_log = [] self.layer_num = int(np.log2(self.args.img_size[0])) - 3 # inputs self.is_training = tf.placeholder_with_default(False, (), name='is_training') self.inputs = tf.placeholder(tf.float32, [None, self.args.img_size[0], self.args.img_size[1], self.args.img_size[2]], name='inputs') self.z = tf.placeholder(tf.float32, [None, 1, 1, self.args.z_dim], name='z') # noise # output of D for real images real_logits = self.discriminator(self.inputs) # output of D for fake images self.fake_images = self.generator(self.z) fake_logits = self.discriminator(self.fake_images, reuse=True) # get loss for discriminator self.d_loss = self.discriminator_loss(d_logits_real=real_logits, d_logits_fake=fake_logits) # get loss for generator self.g_loss = self.generator_loss(d_logits_fake=fake_logits) # divide trainable variables into a group for D and a group for G t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if 'discriminator' in var.name] g_vars = [var for var in t_vars if 'generator' in var.name] # global step self.global_step = tf.get_variable('global_step', initializer=tf.constant(0), trainable=False) self.add_step = self.global_step.assign(self.global_step + 1) # optimizers self.d_lr = tf.train.exponential_decay(self.args.d_lr, tf.maximum(self.global_step - self.args.decay_start_steps, 0), self.args.decay_steps, self.args.decay_rate) self.g_lr = tf.train.exponential_decay(self.args.g_lr, tf.maximum(self.global_step - self.args.decay_start_steps, 0), self.args.decay_steps, self.args.decay_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): d_grads = tf.gradients(self.d_loss, d_vars) d_opt = tf.train.AdamOptimizer(self.d_lr, beta1=self.args.beta1, beta2=self.args.beta2) self.train_d = d_opt.apply_gradients(zip(d_grads, d_vars)) g_grads = tf.gradients(self.g_loss, g_vars) g_opt = tf.train.AdamOptimizer(self.g_lr, beta1=self.args.beta1, beta2=self.args.beta2) self.train_g = g_opt.apply_gradients(zip(g_grads, g_vars)) # EMA for generator with tf.variable_scope("EMA_Weights"): if self.args.ema_decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(self.args.ema_decay, num_updates=self.global_step) with tf.control_dependencies([self.train_g]): self.ema_train_g = self.var_ema.apply(tf.trainable_variables(scope='generator')) # assign ema weights self.assign_vars = [] for var in tf.trainable_variables(scope='generator'): v = self.var_ema.average(var) if v is not None: self.assign_vars.append(tf.assign(var, v)) def discriminator_loss(self, d_logits_real, d_logits_fake): real_loss = tf.reduce_mean(tf.nn.relu(1.0 - d_logits_real)) fake_loss = tf.reduce_mean(tf.nn.relu(1.0 + d_logits_fake)) loss = real_loss + fake_loss return loss def generator_loss(self, d_logits_fake): loss = -tf.reduce_mean(d_logits_fake) return loss def generator(self, z, reuse=False): with tf.variable_scope("generator", reuse=reuse): ch = self.args.g_filters x = spectral_deconv2d(z, filters=ch, kernel_size=4, stride=1, is_training=self.is_training, padding='VALID', use_bias=False, scope='deconv2d') x = batch_norm(x, self.is_training, scope='batch_norm') x = tf.nn.leaky_relu(x, alpha=0.2) for i in range(self.layer_num // 2): with tf.variable_scope('layer' + str(i)): if self.args.up_sample: x = up_sample(x, scale_factor=2) x = spectral_conv2d(x, filters=ch // 2, kernel_size=3, stride=1, is_training=self.is_training, padding='SAME', scope='up_conv2d_' + str(i)) else: x = spectral_deconv2d(x, filters=ch // 2, kernel_size=4, stride=2, is_training=self.is_training, use_bias=False, scope='deconv2d_' + str(i)) x = batch_norm(x, self.is_training, scope='batch_norm_' + str(i)) x = tf.nn.leaky_relu(x, alpha=0.2) ch = ch // 2 # Self Attention x = attention(x, ch, is_training=self.is_training, scope="attention", reuse=reuse) for i in range(self.layer_num // 2, self.layer_num): with tf.variable_scope('layer' + str(i)): if self.args.up_sample: x = up_sample(x, scale_factor=2) x = spectral_conv2d(x, filters=ch // 2, kernel_size=3, stride=1, is_training=self.is_training, padding='SAME', scope='up_conv2d_' + str(i)) else: x = spectral_deconv2d(x, filters=ch // 2, kernel_size=4, stride=2, is_training=self.is_training, use_bias=False, scope='deconv2d_' + str(i)) x = batch_norm(x, self.is_training, scope='batch_norm_' + str(i)) x = tf.nn.leaky_relu(x, alpha=0.2) ch = ch // 2 if self.args.up_sample: x = up_sample(x, scale_factor=2) x = spectral_conv2d(x, filters=self.args.img_size[2], kernel_size=3, stride=1, is_training=self.is_training, padding='SAME', scope='G_conv_logit') else: x = spectral_deconv2d(x, filters=self.args.img_size[2], kernel_size=4, stride=2, is_training=self.is_training, use_bias=False, scope='G_deconv_logit') x = tf.nn.tanh(x) return x def discriminator(self, x, reuse=False): with tf.variable_scope("discriminator", reuse=reuse): ch = self.args.d_filters x = spectral_conv2d(x, filters=ch, kernel_size=4, stride=2, is_training=self.is_training, padding='SAME', use_bias=False, scope='conv2d') x = tf.nn.leaky_relu(x, alpha=0.2) for i in range(self.layer_num // 2): x = spectral_conv2d(x, filters=ch * 2, kernel_size=4, stride=2, is_training=self.is_training, padding='SAME', use_bias=False, scope='conv2d_' + str(i)) x = batch_norm(x, self.is_training, scope='batch_norm' + str(i)) x = tf.nn.leaky_relu(x, alpha=0.2) ch = ch * 2 # Self Attention x = attention(x, ch, is_training=self.is_training, scope="attention", reuse=reuse) for i in range(self.layer_num // 2, self.layer_num): x = spectral_conv2d(x, filters=ch * 2, kernel_size=4, stride=2, is_training=self.is_training, padding='SAME', use_bias=False, scope='conv2d_' + str(i)) x = batch_norm(x, self.is_training, scope='batch_norm' + str(i)) x = tf.nn.leaky_relu(x, alpha=0.2) ch = ch * 2 x = spectral_conv2d(x, filters=1, kernel_size=4, padding='VALID', stride=1, is_training=self.is_training, use_bias=False, scope='D_logit') x = tf.squeeze(x, axis=[1, 2]) return x def preprocess(self, x): x = x / 127.5 - 1 return x def train_epoch(self, sess, saver, train_next_element, i_epoch, n_batch, truncated_norm, z_fix=None): t_start = None global_step = 0 for i_batch in range(n_batch): if i_batch == 1: t_start = time.time() batch_imgs = sess.run(train_next_element) batch_imgs = self.preprocess(batch_imgs) batch_z = truncated_norm.rvs([self.args.batch_size, 1, 1, self.args.z_dim]) feed_dict_ = {self.inputs: batch_imgs, self.z: batch_z, self.is_training: True} # update D network _, d_loss, d_lr, g_lr = sess.run([self.train_d, self.d_loss, self.d_lr, self.g_lr], feed_dict=feed_dict_) self.d_loss_log.append(d_loss) # update G network g_loss = None if i_batch % self.args.n_critic == 0: if self.args.ema_decay is not None: _, g_loss, _, global_step = sess.run( [self.ema_train_g, self.g_loss, self.add_step, self.global_step], feed_dict=feed_dict_) else: _, g_loss, _, global_step = sess.run([self.train_g, self.g_loss, self.add_step, self.global_step], feed_dict=feed_dict_) self.g_loss_log.append(g_loss) last_train_str = "[epoch:%d/%d, global_step:%d] -d_loss:%.3f - g_loss:%.3f -d_lr:%.e -g_lr:%.e" % ( i_epoch + 1, int(self.args.epochs), global_step, d_loss, g_loss, d_lr, g_lr) if i_batch > 0: last_train_str += (' -ETA:%ds' % util.cal_ETA(t_start, i_batch, n_batch)) if (i_batch + 1) % 20 == 0 or i_batch == 0: tf.logging.info(last_train_str) # show fake_imgs if global_step % self.args.show_steps == 0: tf.logging.info('generating fake imgs in steps %d...' % global_step) # do ema if self.args.ema_decay is not None: # save temp weights for generator saver.save(sess, os.path.join(self.args.checkpoint_dir, 'temp_model.ckpt')) sess.run(self.assign_vars, feed_dict={self.inputs: batch_imgs, self.z:batch_z, self.is_training: False}) tf.logging.info('After EMA...') if z_fix is not None: show_z = z_fix else: show_z = truncated_norm.rvs([self.args.batch_size, 1, 1, self.args.z_dim]) fake_imgs = sess.run(self.fake_images, feed_dict={self.z: show_z}) manifold_h = int(np.floor(np.sqrt(self.args.sample_num))) util.save_images(fake_imgs, [manifold_h, manifold_h], image_path=os.path.join(self.args.result_dir, 'fake_steps_' + str(global_step) + '.jpg')) if self.args.ema_decay is not None: # restore temp weights for generator saver.restore(sess, os.path.join(self.args.checkpoint_dir, 'temp_model.ckpt')) tf.logging.info('Recover weights over...') return global_step, self.d_loss_log, self.g_loss_log
[ "ccjdurandal422@163.com" ]
ccjdurandal422@163.com
874570360011745971d66be0be7251d3126ff31a
658b10bae84ea77a13d25273266170d499bdad59
/blog/migrations/0001_initial.py
45e06cefd195c38b8cf0542f61d8802de048fd2c
[]
no_license
du4ok/my-first-blog
9f1e117f41d4f8de4ccec82993d4498d4c464dd6
a5b4a3ab870fa17b9e67793c89811500be56ea0e
refs/heads/master
2020-07-20T17:46:00.119085
2016-11-14T20:31:22
2016-11-14T20:31:22
73,744,108
0
0
null
null
null
null
UTF-8
Python
false
false
717
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-10-02 14:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='posts', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('author', models.CharField(max_length=30)), ('title', models.CharField(max_length=100)), ('bodytext', models.TextField()), ('timestamp', models.DateTimeField()), ], ), ]
[ "iriver87@gmail.com" ]
iriver87@gmail.com
a34e7441458961ad75e663aec37218d89eee0fd6
d400110ac8637883daa86aff7bce7fe49ad7f916
/option.py
f5dae12031a9a66329aae84b829bfd4ab96ab63d
[ "MIT" ]
permissive
zdddw/SISN-Face-Hallucination
59e7ba74f60c57e115bda26b15a7cdd543d7fef6
a36d189e6e890f3a01e2a027ec54eec7b2db23a0
refs/heads/main
2023-09-03T20:36:51.860967
2021-11-08T08:07:20
2021-11-08T08:07:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,711
py
import argparse def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=1) # models parser.add_argument("--pretrain", type=str) parser.add_argument("--model", type=str, default="SISN") # augmentations parser.add_argument("--use_moa", action="store_true") parser.add_argument("--augs", nargs="*", default=["none"]) parser.add_argument("--prob", nargs="*", default=[1.0]) parser.add_argument("--mix_p", nargs="*") parser.add_argument("--alpha", nargs="*", default=[1.0]) parser.add_argument("--aux_prob", type=float, default=1.0) parser.add_argument("--aux_alpha", type=float, default=1.2) # dataset parser.add_argument("--dataset_root", type=str, default="dataset/FFHQ/1024X1024") parser.add_argument("--dataset", type=str, default="FSR") parser.add_argument("--train_val_range", type=str, default="1-850/851-950") parser.add_argument("--scale", type=int, default=4) # training setups parser.add_argument("--lr", type=float, default=2e-4) parser.add_argument("--decay", type=str, default="25-50-75") parser.add_argument("--gamma", type=int, default=0.5) parser.add_argument("--patch_size", type=int, default=32) parser.add_argument("--batch_size", type=int, default=10) parser.add_argument("--max_steps", type=int, default=700000) parser.add_argument("--eval_steps", type=int, default=1000) parser.add_argument("--num_workers", type=int, default=2) parser.add_argument("--gclip", type=int, default=0) # misc parser.add_argument("--test_only", action="store_true") parser.add_argument("--save_result", action="store_true") parser.add_argument("--ckpt_root", type=str, default="./pt") parser.add_argument("--save_root", type=str, default="./output") return parser.parse_args() def make_template(opt): opt.strict_load = opt.test_only opt.num_groups = 10 opt.num_blocks = 10 opt.num_channels = 64 opt.reduction = 16 opt.res_scale = 1.0 opt.max_steps = 1000000 opt.decay = "50-100-150-200-250-300-350-400" opt.gclip = 0.5 if opt.pretrain else opt.gclip # evaluation setup opt.crop = 6 if "FSR" in opt.dataset else 0 opt.crop += opt.scale opt.eval_y_only = False # default augmentation policies if opt.use_moa: opt.augs = ["blend", "rgb", "mixup", "cutout", "cutmix", "cutmixup", "cutblur"] opt.prob = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] opt.alpha = [0.6, 1.0, 1.2, 0.001, 0.7, 0.7, 0.7] opt.aux_prob, opt.aux_alpha = 1.0, 1.2 opt.mix_p = None def get_option(): opt = parse_args() make_template(opt) return opt
[ "noreply@github.com" ]
zdddw.noreply@github.com
a21f249ea161330a50ec05a4b83dc19d497cf0ba
c174895f234affe017233c64d90dd522b77aea46
/core_1024/items.py
29a4f00bd290f9661d10266ee66b1ef537477602
[]
no_license
starplanet/core_1024
316321efd7c8aa0cbd1b6a90e0efa41f1c2386ca
d83e618a3f01d08e9c06ff0c79a6c617f226c07a
refs/heads/master
2016-09-13T20:28:48.621859
2016-05-06T08:43:44
2016-05-06T08:43:44
58,192,243
0
0
null
null
null
null
UTF-8
Python
false
false
548
py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class Core1024Item(scrapy.Item): # define the fields for your item here like: page_url = scrapy.Field() title = scrapy.Field() name = scrapy.Field() av_girl = scrapy.Field() av_format = scrapy.Field() av_size = scrapy.Field() image_urls = scrapy.Field() images = scrapy.Field() download_url = scrapy.Field() image_path = scrapy.Field()
[ "zhangjinjie@jinjiedeMacBook-Pro.local" ]
zhangjinjie@jinjiedeMacBook-Pro.local
fc788246df6e41781ddf6e555e8b07bfc9c393f7
4ac0643056138d9caf903b9ad051c75cf1447df6
/src/learn/__init__.py
47ea3bb4ca97dc7623be8755d2c7d20aca658fc8
[ "MIT" ]
permissive
ssd04/ml-project-template
361a5123518259020811f0b0a760b6e55ae81148
cea040176c620fa27b7537c7c9ced50a78fb591e
refs/heads/master
2023-08-25T18:34:57.973080
2021-10-26T21:23:05
2021-10-26T21:23:05
414,691,263
0
0
null
null
null
null
UTF-8
Python
false
false
717
py
from .classification.randomforest import RandomForest from .classification.xgboost import XGBoost from .regression.logistic import LogisticR from .regression.xgboost import XGBoostR class GetModel: @classmethod def get_model(cls, alg, conf=None): if alg == "random_forest": model = RandomForest(conf=conf) elif alg == "logistic_regression": model = LogisticR(conf=conf) elif alg == "dummy_classifier": model = Dummy(conf=conf) elif alg == "xgboost": model = XGBoost(conf=conf) elif alg == "xgboost_regressor": model = XGBoostR(conf=conf) else: raise ValueError(alg) return model
[ "dariussuirab@gmail.com" ]
dariussuirab@gmail.com
77f295368703e4c0c9e03efee7b637781e2efdc9
17ed1551bbb4435b6d816408bb69eafb27c7cc6d
/growl-http-proxy.py
f0d9a9ed5fe38e8c708b8549e25685afdb332c93
[ "WTFPL" ]
permissive
brunobord/growl-http-proxy
8622a2dc20e0bbcbf5c7481c659ab517c83a8be9
4e575e1ae9d395055eab9c3ef7f693068de88c1a
refs/heads/master
2021-03-12T19:19:58.518156
2012-10-15T22:14:27
2012-10-15T22:14:27
6,206,709
2
0
null
null
null
null
UTF-8
Python
false
false
1,446
py
#!/usr/bin/env python #-*- coding: utf-8 -*- import argparse from flask import Flask, request, abort, render_template import Growl import os app = Flask(__name__) APP_NAME = 'growl-http-proxy' NOTIFICATIONS = ['update'] DEFAULT_ICON_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'icon.png') def send_notification(title, message, sticky, icon, notification, priority): notifier = Growl.GrowlNotifier(APP_NAME, NOTIFICATIONS, applicationIcon=icon) notifier.register() notifier.notify(notification, title, message, sticky=sticky, priority=priority) @app.route('/', methods=['POST']) def send(): data = request.json or request.form or {} if 'title' not in data or 'message' not in data: abort(400) # Bad request sticky = data.get('sticky') or False icon_path = DEFAULT_ICON_PATH icon = Growl.Image.imageFromPath(icon_path) notification = data.get('notification') or 'update' priority = int(data.get('priority') or 1) send_notification(data.get('title'), data.get('message'), sticky, icon, notification, priority) return 'Message sent\n' @app.route('/', methods=['GET']) def index(): return render_template('index.html') if __name__ == '__main__': parser = argparse.ArgumentParser('Growl HTTP Proxy') parser.add_argument('-d', '--debug', action="store_true", default=False) args = parser.parse_args() app.run(debug=args.debug)
[ "bruno@jehaisleprintemps.net" ]
bruno@jehaisleprintemps.net
d40d4c0886ebeb7c5e6c46de7f421799756c92b7
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_318/ch23_2019_03_27_15_00_44_973644.py
e268beaac58b3819e4295d3aa5d048c89b2d4156
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
223
py
def verifica_idade(x): if(x>20): print("Liberado EUA e BRASIL") return x if(x<21 and x>17): print("Liberado BRASIL") return x if(x<18): print("Nao esta liberado") return x
[ "you@example.com" ]
you@example.com
a2ebf8c419bd0328ba823dd774d8ac2ef77bcca7
a2b7e42a51003e4d97addbc7b5e269f0e7cc2b52
/SMTP/smtp.py
9087c60d0a73075b99d3ee449106ff2a5a5d18b1
[]
no_license
aadamsaleem/Networking
9160219d4cd868fbce396324cec2797bf2d31dbf
a67976f53ac3009e2fedee138a27099c7ecf8eb3
refs/heads/master
2016-08-12T08:30:37.572085
2016-02-13T22:35:38
2016-02-13T22:35:38
51,668,474
0
0
null
null
null
null
UTF-8
Python
false
false
3,391
py
import socket import base64 msg = "\r\n I love computer networks!" endmsg = "\r\n.\r\n" # Choose a mail server (e.g. Google mail server) and call it mailserver #Fill in start mailserver = "smtp.gmail.com" port = 587 #Fill in end # Create socket called clientSocket and establish a TCP connection with mailserver #Fill in start clientSocket = socket.socket() clientSocket.connect((mailserver, port)) #Fill in end recv = clientSocket.recv(1024) print recv if recv[:3] != '220': print '220 reply not received from server.' # Send HELO command and print server response. heloCommand = 'HELO Alice\r\n' clientSocket.send(heloCommand) recv1 = clientSocket.recv(1024) print recv1 if recv1[:3] != '250': print '250 reply not received from server.' #ADDITIONAL FOR GMAIL SERVER START recipient = "<as9749@nyu.edu>" sender = "<aadam.saleem@gmail.com>" username = "aadam.saleem" password = 'zenith2horizon' #Request an encrypted connection startTlsCommand = 'STARTTLS\r\n' clientSocket.send(startTlsCommand) tlsReply = clientSocket.recv(1024) print tlsReply if tlsReply[:3] != '220': print '220 reply not received from server' #Encrypt the socket sslClientSocket = socket.ssl(clientSocket) #Send the AUTH LOGIN command and print server response. authCommand = 'AUTH LOGIN\r\n' sslClientSocket.write(authCommand) authReply = sslClientSocket.read(1024) print authReply if authReply[:3] != '334': print '334 reply not received from server' #Send username and print server response. username = base64.b64encode(username) + '\r\n' sslClientSocket.write(username) usernameReply = sslClientSocket.read(1024) print usernameReply if usernameReply[:3] != '334': print '334 reply not received from server' #Send password and print server response. password = base64.b64encode(password) + '\r\n' sslClientSocket.write(password) passwordReply = sslClientSocket.read(1024) print passwordReply if passwordReply[:3] != '235': print '235 reply not received from server' #ADDITIONAL FOR GMAIL SERVER END # Send MAIL FROM command and print server response. #Fill in start mailFromCommand = 'MAIL FROM: ' + sender + '\r\n' sslClientSocket.write(mailFromCommand) reply = sslClientSocket.read(1024) print reply if reply[:3] != '250': print '250 reply not received from server.' #Fill in end # Send RCPT TO command and print server response. #Fill in start rcptToCommand = 'RCPT TO: ' + recipient + '\r\n' sslClientSocket.write(rcptToCommand) reply = sslClientSocket.read(1024) print reply if reply[:3] != '250': print '250 reply not received from server.' #Fill in end # Send DATA command and print server response. #Fill in start dataCommand = 'DATA\r\n' sslClientSocket.write(dataCommand) reply = sslClientSocket.read(1024) print reply if reply[:3] != '354': print '354 reply not received from server.' #Fill in end # Send message data. #Fill in start sslClientSocket.write(msg) #Fill in end # Message ends with a single period. #Fill in start sslClientSocket.write(endmsg) reply = sslClientSocket.read(1024) print reply if reply[:3] != '250': print '250 reply not received from server.' #Fill in end # Send QUIT command and get server response. #Fill in start quitCommand = 'QUIT\r\n' sslClientSocket.write(quitCommand) reply = sslClientSocket.read(1024) print reply if reply[:3] != '221': print '221 reply not received from server.' clientSocket.close() #Fill in end
[ "aadam_saleem@yahoo.co.in" ]
aadam_saleem@yahoo.co.in
971c56d879a5de0f10578db694c2688ca82f4d73
87a83b426988e5d0762a48cffb73fe48deb985d9
/roadrepair.py
9b996b5d99df7e0dd25ffbaeb7963240f4d4db23
[]
no_license
biswassampad/coding_challenges
82049f31ffc87ffc8bd3d920cb0983e1ea711cf8
2a1d0e70231a7553b2c8093209741332df14b4be
refs/heads/master
2022-12-31T21:54:12.358748
2020-10-26T07:17:49
2020-10-26T07:17:49
307,289,888
0
0
null
null
null
null
UTF-8
Python
false
false
284
py
def main(): crew_id = int(input("get the crewId")) job_id = int(input("get the task id")) distance = getMinCost(crew_id,job_id) print('success') def getMinCost(crew_id,job_id): distance = job_id - crew_id return distance if __name__ == "__main__": main()
[ "biswa.satpathy@iserveu.in" ]
biswa.satpathy@iserveu.in
3f982e8a36a779567542f4c382cd555febeef961
ed10dc841d5b4f6a038e8f24f603750992d9fae9
/lldb/test/API/lang/objc/foundation/TestFoundationDisassembly.py
bf9a40fc8da9b49c77e740cb835ab78aef313bfc
[ "NCSA", "Apache-2.0", "LLVM-exception" ]
permissive
WYK15/swift-Ollvm10
90c2f0ade099a1cc545183eba5c5a69765320401
ea68224ab23470963b68dfcc28b5ac769a070ea3
refs/heads/main
2023-03-30T20:02:58.305792
2021-04-07T02:41:01
2021-04-07T02:41:01
355,189,226
5
0
null
null
null
null
UTF-8
Python
false
false
5,449
py
""" Test the lldb disassemble command on foundation framework. """ import unittest2 import os import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil @skipUnlessDarwin class FoundationDisassembleTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) NO_DEBUG_INFO_TESTCASE = True @expectedFailureDarwin('rdar://problem/54977700') @skipIfAsan def test_foundation_disasm(self): """Do 'disassemble -n func' on each and every 'Code' symbol entry from the Foundation.framework.""" self.build() # Enable synchronous mode self.dbg.SetAsync(False) # Create a target by the debugger. target = self.dbg.CreateTarget(self.getBuildArtifact("a.out")) self.assertTrue(target, VALID_TARGET) # Now launch the process, and do not stop at entry point. process = target.LaunchSimple( None, None, self.get_process_working_directory()) self.assertTrue(process, PROCESS_IS_VALID) foundation_framework = None for module in target.modules: if module.file.basename == "Foundation": foundation_framework = module.file.fullpath break self.assertTrue( foundation_framework is not None, "Foundation.framework path located") self.runCmd("image dump symtab '%s'" % foundation_framework) raw_output = self.res.GetOutput() # Now, grab every 'Code' symbol and feed it into the command: # 'disassemble -n func'. # # The symbol name is on the last column and trails the flag column which # looks like '0xhhhhhhhh', i.e., 8 hexadecimal digits. codeRE = re.compile(r""" \ Code\ {9} # ' Code' followed by 9 SPCs, .* # the wildcard chars, 0x[0-9a-f]{8} # the flag column, and \ (.+)$ # finally the function symbol. """, re.VERBOSE) for line in raw_output.split(os.linesep): match = codeRE.search(line) if match: func = match.group(1) self.runCmd('image lookup -s "%s"' % func) self.runCmd('disassemble -n "%s"' % func) @skipIfAsan def test_simple_disasm(self): """Test the lldb 'disassemble' command""" self.build() # Create a target by the debugger. target = self.dbg.CreateTarget(self.getBuildArtifact("a.out")) self.assertTrue(target, VALID_TARGET) # Stop at +[NSString stringWithFormat:]. symbol_name = "+[NSString stringWithFormat:]" break_results = lldbutil.run_break_set_command( self, "_regexp-break %s" % (symbol_name)) lldbutil.check_breakpoint_result( self, break_results, symbol_name=symbol_name, num_locations=1) # Stop at -[MyString initWithNSString:]. lldbutil.run_break_set_by_symbol( self, '-[MyString initWithNSString:]', num_expected_locations=1, sym_exact=True) # Stop at the "description" selector. lldbutil.run_break_set_by_selector( self, 'description', num_expected_locations=1, module_name='a.out') # Stop at -[NSAutoreleasePool release]. break_results = lldbutil.run_break_set_command( self, "_regexp-break -[NSAutoreleasePool release]") lldbutil.check_breakpoint_result( self, break_results, symbol_name='-[NSAutoreleasePool release]', num_locations=1) self.runCmd("run", RUN_SUCCEEDED) # First stop is +[NSString stringWithFormat:]. self.expect( "thread backtrace", "Stop at +[NSString stringWithFormat:]", substrs=["Foundation`+[NSString stringWithFormat:]"]) # Do the disassemble for the currently stopped function. self.runCmd("disassemble -f") self.runCmd("process continue") # Skip another breakpoint for +[NSString stringWithFormat:]. self.runCmd("process continue") # Followed by a.out`-[MyString initWithNSString:]. self.expect( "thread backtrace", "Stop at a.out`-[MyString initWithNSString:]", substrs=["a.out`-[MyString initWithNSString:]"]) # Do the disassemble for the currently stopped function. self.runCmd("disassemble -f") self.runCmd("process continue") # Followed by -[MyString description]. self.expect("thread backtrace", "Stop at -[MyString description]", substrs=["a.out`-[MyString description]"]) # Do the disassemble for the currently stopped function. self.runCmd("disassemble -f") self.runCmd("process continue") # Skip another breakpoint for -[MyString description]. self.runCmd("process continue") # Followed by -[NSAutoreleasePool release]. self.expect("thread backtrace", "Stop at -[NSAutoreleasePool release]", substrs=["Foundation`-[NSAutoreleasePool release]"]) # Do the disassemble for the currently stopped function. self.runCmd("disassemble -f")
[ "wangyankun@ishumei.com" ]
wangyankun@ishumei.com
c46b8a2458a636eea1bde0cc9df07da0126d1e1c
0c13891448e6c3136e2f651c776d1d11edee2577
/src/template_method.py
91d30a13953d467cfa30df4a7500ae59f97997f2
[ "MIT" ]
permissive
MrRezoo/design-patterns-python
31cb7b73ae05c5bd361eb3455df234c20529f465
8f8e2501ad8e05f1a75ce5be659d926c0ec99698
refs/heads/master
2023-08-01T22:01:01.186910
2021-10-02T07:57:49
2021-10-02T07:57:49
349,936,987
8
1
MIT
2021-04-07T14:55:10
2021-03-21T08:13:44
Python
UTF-8
Python
false
false
1,178
py
""" Behavioral pattern: Template method Example: when we have static job between several classes use one ABC class """ from abc import ABC, abstractmethod class Top(ABC): def template_method(self): self.first_common() self.second_common() self.third_require() self.fourth_require() self.hook() def first_common(self): print('I am first common...') def second_common(self): print('I am second common') @abstractmethod def third_require(self): pass @abstractmethod def fourth_require(self): pass def hook(self): pass class One(Top): def third_require(self): print('This is Third require from One...') def fourth_require(self): print('This is Fourth require from One...') def hook(self): print('This is Hook from One') class Two(Top): def third_require(self): print('This is Third require from Two...') def fourth_require(self): print('This is Fourth require from Two...') def client(class_): class_.template_method() if __name__ == '__main__': client(Two())
[ "rezam578@gmail.com" ]
rezam578@gmail.com
4e49caf60862a2ba3e950e0b6ddc11b9b60b362c
d82c5af93dca23ee5b94458886e904c0fec6d9be
/appStatus/apps.py
77b478ffcae53c40e159584871c57699c7a060c1
[]
no_license
saiken86807/college-application-portal
ec6b5280f39c2da4063a60cf09a195cc73f9edee
c1f88809b90f70b8862c6d1b599c75aded0de1d7
refs/heads/master
2022-12-27T09:01:53.309254
2020-09-21T00:04:34
2020-09-21T00:04:34
290,922,203
0
0
null
null
null
null
UTF-8
Python
false
false
93
py
from django.apps import AppConfig class AppstatusConfig(AppConfig): name = 'appStatus'
[ "stefanieaiken@Stefanies-MacBook-Air.local" ]
stefanieaiken@Stefanies-MacBook-Air.local
05a6db46acd058f904eeb21596b3a1e9c2fec67c
62d489ced99e830920da02a0ba62572f144833cd
/Ques_21.py
122805da78c4a6a67adf44ea9bf2a8962c96565d
[]
no_license
HarshSharma12/ProjectEulerPython
e8cc11d44a4ea917b5e7669667161aa1c635a0cd
c300a3d34fc99a0aa08047f1195eceaefd7d68c9
refs/heads/master
2016-09-06T17:07:23.183383
2014-10-12T13:56:51
2014-10-12T13:56:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,252
py
# -*- coding: utf-8 -*- """ Created on Thu Sep 11 12:48:17 2014 @author: Harsh Sharma Amicable numbers Problem 21 Published on Friday, 5th July 2002, 06:00 pm; Solved by 74130 Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers. For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220. Evaluate the sum of all the amicable numbers under 10000. Answer = 31626 """ from math import sqrt def divisors(n): sqRoot = int(sqrt(n)) arr = [1] for i in range(2,sqRoot+1): if(n%i==0): arr.append(i) arr.append(n/i) arr.sort() return arr def divSum(n): d = divisors(n) x=0 for i in d: x+=i return x total = 0 for i in range(1, 10000): newNum = divSum(i) if (divSum(newNum) == i and newNum!=i): total+=i print 'New = ', newNum print 'Original = ', i print 'Sum = ', total print '\n' print 'Answer = ', total
[ "mail.hs.harsh@gmail.com" ]
mail.hs.harsh@gmail.com
df2331761715f4d8f86e9f4e234a7697f4d42221
b3e95a6c6f7ce283c82c6357d2137ee7db3dbe11
/hamming/hamming.py
7e206bff426b98dda6e0689cf3f076f32fb778ec
[]
no_license
tarunkant/exercism_python
f3338353fc885c508f976bcb7ccd1565c7439522
1772acb5bc500ffa4cadc4be967f5bbd980b6295
refs/heads/master
2021-01-12T01:51:23.186012
2017-03-29T12:47:44
2017-03-29T12:47:44
78,438,642
1
0
null
null
null
null
UTF-8
Python
false
false
382
py
str1= raw_input("enter alphabets for comparing ") str2= raw_input("enter alphabets fom which you want to compare ") count=0 i=0 if len(str1)>=len(str2): for i in range(len(str2)): if str1[i]==str2[i]: count +=1 else: i=+1 else: for i in range(len(str2)): if str1[i]==str2[i]: count +=1 else: i+=1 print"Counting of alphabet which are same is: ",count
[ "tarunkant05@gmail.com" ]
tarunkant05@gmail.com
7259ca3438f85859971f088f4b36164e0b9ba675
38d39a3c50b161e03599456ce8fb335bc3fb1b6e
/create-cpp-data.py
2e5f9fa2a9091c93aca63c7866e22368c1eb757d
[ "LicenseRef-scancode-public-domain", "CC0-1.0" ]
permissive
rymis/tz.js
6e159e85c714286927eb55f52a7bb55d1014dcc4
a1136c58fec5fe9c389b073b62d2296fbb5fe6a6
refs/heads/master
2020-07-14T18:22:01.711073
2019-09-06T15:13:01
2019-09-06T15:13:01
205,372,623
0
0
NOASSERTION
2019-09-06T15:13:02
2019-08-30T11:53:28
Python
UTF-8
Python
false
false
2,612
py
#!/usr/bin/env python import json import os import sys import shutil INPUT_DIR = os.path.abspath(os.path.dirname(sys.argv[0])) OUTPUT_DIR = os.path.join(INPUT_DIR, "output") DATA_JSON = os.path.join(OUTPUT_DIR, "data.json") INPUT_CPP = os.path.join(INPUT_DIR, "tzcpp.cpp.in") OUTPUT_CPP = os.path.join(OUTPUT_DIR, "tzcpp.cpp") INPUT_H = os.path.join(INPUT_DIR, "tzcpp.h") OUTPUT_H = os.path.join(OUTPUT_DIR, "tzcpp.h") def build_cpp(version, tz_version, zones, links): # Process zones zones_list = [] rules_list = [] zones_last = 0 db_list = [] timezones_list = [] rules_map = { } rule_names = { } def rule_str(t): res = ' { %d, "%s", %s },' % (t["o"], t["a"], str(t["d"]).lower()) if t["a"] not in rule_names: rule_names[t["a"]] = set() rule_names[t["a"]].add(res) return res def rule_idx(r): s = rule_str(r) if s not in rules_map: rules_map[s] = len(rules_list) rules_list.append(s) return rules_map[s] for nm, zone in zones.items(): begin = zones_last zones_list.append("// %s" % nm) for idx, t in zip(zone["ltidx"], zone["times"]): zones_last += 1 zones_list.append(' { %d, %d },' % (rule_idx(zone["types"][idx]), t)) db_list.append(' { "%s", %d },' % (nm, len(timezones_list))) timezones_list.append(' { %d, %d, "%s", "%s" },' % (begin, zones_last, zone["rule"], nm)) with open(INPUT_CPP, "rb") as cpp_in: cpp_in_source = cpp_in.read() cpp_in_source = cpp_in_source.replace("// @ZONE_RULES@", "\n".join(rules_list)) cpp_in_source = cpp_in_source.replace("// @ZONES@", "\n".join(zones_list)) cpp_in_source = cpp_in_source.replace("// @TIMEZONES@", "\n".join(timezones_list)) cpp_in_source = cpp_in_source.replace("// @DATABASE@", "\n".join(db_list)) cpp_in_source = cpp_in_source.replace("// @LINKS@", "\n".join([ ' { "%s", "%s" },' % (k, v) for k, v in links.items() ])) cpp_in_source = cpp_in_source.replace("@VERSION@", version) cpp_in_source = cpp_in_source.replace("@DATA_VERSION@", tz_version) return cpp_in_source if __name__ == '__main__': data_json = json.load(sys.stdin) links = data_json["links"] zones = data_json["zones"] version = data_json["version"] tz_version = data_json["tzversion"] cpp_source = build_cpp(version, tz_version, zones, links) with open(OUTPUT_CPP, "wb") as cpp_out: cpp_out.write(cpp_source) shutil.copy(INPUT_H, OUTPUT_H)
[ "mikhail.ryzhov@kiwi.com" ]
mikhail.ryzhov@kiwi.com
1b7268552686962f7e73c57ce2e1e80d69a6e9a8
bfbe8a27ce6f46a7f2d03731b1de1e80cc6056c9
/projects/programmers/easy/x_만큼_간격이_있는_n_개의_숫자.py
1e54da1f59097679b655cb4c4b31a202adbfe45a
[]
no_license
paige0701/algorithms-and-more
95175a18fd9d4a41659c50e3c5e314fe2bb23b8b
763a4009f8fa87c24552b5e77375c72896672b58
refs/heads/master
2021-06-11T04:19:29.559758
2021-04-07T11:29:01
2021-04-07T11:29:01
184,376,577
0
0
null
null
null
null
UTF-8
Python
false
false
191
py
def test(a,b): print([a*i for i in range(1, b+1)]) def main(): a = int(input('input a: \n')) b = int(input('input b: \n')) test(a,b) if __name__ == '__main__': main()
[ "paigechoi0701@gmail.com" ]
paigechoi0701@gmail.com