blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c01c22fde0bafa7b2cf2ea3083d8131e8b95ac9
|
42b94a81ab09a41dc0b4d4607405a8af1fbd61f2
|
/send_keys_from_image.py
|
729601c3dadd3f38644179b37378c4a4c8b178e9
|
[] |
no_license
|
solominh/Python-SendKeysFromImage
|
e88ca725c5402751850e745c8b4ce7b0fa80e035
|
40b2a8157d49a59a7a6e192e3ed8daf58d437650
|
refs/heads/master
| 2021-01-12T01:18:21.297572
| 2017-01-09T09:04:28
| 2017-01-09T09:04:28
| 78,368,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
import keyboard
import pyautogui
import cv2
from get_image_region import draw_region
from subprocess import call
def main():
# Take screenshot
screenshot_path = './screenshot.png'
pyautogui.screenshot(screenshot_path)
# Draw image region
image = cv2.imread(screenshot_path)
ref_points = draw_region(image)
print(ref_points)
# Sanity check
if not ref_points:
print('Region not selected')
return False
# Save cropped image
cropped_image = image[ref_points['topleft'][1]:ref_points['bottomright'][1],
ref_points['topleft'][0]:ref_points['bottomright'][0]]
cv2.imwrite(screenshot_path, cropped_image)
# Convert image to text
text_ouput_path = './text_from_image'
call(["tesseract", screenshot_path, text_ouput_path])
return True
def send_keys():
with open('./text_from_image.txt') as f:
first_time = True
for line in f:
cleaned_line = line.strip()
if first_time:
first_time = False
else:
cleaned_line = ' ' + cleaned_line
print(cleaned_line)
keyboard.write(cleaned_line)
|
[
"hoangminhftu2@gmail.com"
] |
hoangminhftu2@gmail.com
|
607f0c745c7df74bf1cbfc3ebac73ac5b92debb3
|
8d03310627f1f625eddda8f4a3e680892872e0ec
|
/batemaneq/__init__.py
|
09ee7bcfa329b2d98875fd9beb5ea50bbdbf1f40
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
Rolleroo/batemaneq
|
4da15e4bff32484d27ea9dc2b3338edc4956b0df
|
bd8c24d1f77ccb166b3210d81d9468f7789813ad
|
refs/heads/master
| 2021-02-05T12:43:40.639427
| 2020-02-23T20:47:48
| 2020-02-23T20:47:48
| 243,781,711
| 1
| 0
|
BSD-2-Clause
| 2020-02-28T14:31:36
| 2020-02-28T14:31:36
| null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
# -*- coding: utf-8 -*-
"""
batemaneq provides a Python package for evaluating the Bateman equation
"""
from __future__ import absolute_import
from ._release import __version__
from .bateman import bateman_parent, bateman_full
from ._bateman_double import bateman_parent as bateman_parent_arr
from ._bateman_double import bateman_full as bateman_full_arr
|
[
"bjodah@gmail.com"
] |
bjodah@gmail.com
|
cf305096c429662844324796ad09b0d2a05ed8ed
|
8dcb50552fa9ace220a1e283ffdd9b490cbd3e2c
|
/code/feature_converter.py
|
1b8019bbe4fb255fc36d3259ed356c1fcd5e3902
|
[
"MIT"
] |
permissive
|
katsu1110/DataScienceComp
|
00483505a3671801f8bb45db67aeb0fc5d960e73
|
86edf7cab0af372624bba7f3026a3db32e7cd1d6
|
refs/heads/main
| 2021-07-05T01:15:24.940683
| 2021-06-12T05:45:08
| 2021-06-12T05:45:08
| 243,942,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import PowerTransformer
# categorize features into dense or categorical
def categorize_features(df, target, cat_threshold=12):
cat_features = []
dense_features = []
for f in df.columns.values.tolist():
if f != target:
if (df[f].dtype == "object") | (df[f].dtype == "bool") | (df[f].nunique() <= cat_threshold):
cat_features.append(f)
else:
dense_features.append(f)
features = dense_features + cat_features
print(f"There are {len(features)} features.")
print(f"There are {len(dense_features)} dense features.")
print(f"There are {len(cat_features)} categorical features.")
return features, dense_features, cat_features
# one-hot encoding nan
def nan2onehot(df, features):
isnan_features = []
for f in features:
if df[f].isna().sum() > len(df) * 0.05:
df[f + "_isnan"] = np.zeros(len(df))
df.loc[(df[f].isna().values == True), f + "_isnan"] = 1
isnan_features.append(f + "_isnan")
return df, isnan_features
# outlier remover
def clipper(df, features):
p01 = df[features].quantile(0.01)
p99 = df[features].quantile(0.99)
df[features] = df[features].clip(p01, p99, axis=1)
return df
# to normal dist
def to_normal(train, test, features, method="yeo-johnson"):
# method can be box-cox
pt = PowerTransformer(method=method)
train[features] = pt.fit_transform(train[features])
test[features] = pt.transform(test[features])
return train, test
# remove correlated features
def remove_correlated_features(df, features, threshold=0.999):
counter = 0
to_remove = []
for feat_a in features:
for feat_b in features:
if feat_a != feat_b and feat_a not in to_remove and feat_b not in to_remove:
c = np.corrcoef(df[feat_a], df[feat_b])[0][1]
if c > threshold:
counter += 1
to_remove.append(feat_b)
print('{}: FEAT_A: {} FEAT_B: {} - Correlation: {}'.format(counter, feat_a, feat_b, c))
return to_remove
|
[
"kakawagu@yahoo-corp.jp"
] |
kakawagu@yahoo-corp.jp
|
124c2cc003bb282b7925b04f202d86175c3110a7
|
5d01c0abf77d19c224d0886c461c42dbad496171
|
/python_stack/python/OOP/2_Chaining_Methods.py
|
c3f6540c14112cee827ca452150a1fc642ffc8f4
|
[] |
no_license
|
Jacedeuce/coding_dojo
|
0c1cc1a12b422b3f79047274198fefbc0368127f
|
6d1cf2d054cb2562cd5e0eb51357a32faf49ddfe
|
refs/heads/master
| 2022-12-12T22:47:43.266440
| 2019-07-18T23:09:20
| 2019-07-18T23:09:20
| 190,056,454
| 0
| 0
| null | 2022-12-12T12:55:33
| 2019-06-03T18:05:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
## I just wrote it with the methods prepared for chaining on the last assignment
class User:
def __init__(self, username, email_address):
self.name = username
self.email = email_address
self.account_balance = 0
def make_deposit(self, amount):
self.account_balance += amount
return self
def make_withdrawal(self, amount):
self.account_balance -= amount
return self
def display_user_balance(self):
print(self.name, ":", self.account_balance)
return self
def transfer_money(self, to_user, amount):
self.account_balance -= amount
to_user.make_deposit(amount)
self.display_user_balance()
to_user.display_user_balance()
return self
jason = User("jason", "jason@codingdojo.com")
jacob = User("jake", "jake@codingdojo.com")
brock = User("brock", "brock@codingdojo.com")
jason.make_deposit(10).make_deposit(20).make_deposit(30).make_withdrawal(10).display_user_balance()
jacob.make_deposit(10).make_deposit(20).make_withdrawal(10).make_withdrawal(20).display_user_balance()
brock.make_deposit(10).make_withdrawal(10).make_withdrawal(20).make_withdrawal(30).display_user_balance()
jason.transfer_money(brock, 50)
|
[
"jdhanna@gmail.com"
] |
jdhanna@gmail.com
|
945d9be3f8c30181dec6d9d90930e60f95885cd4
|
b36d169e1353752486441636255d435568ca307d
|
/spikeforest/forestview/spikeforest_view_launchers.py
|
799752c978e88f723fc1bfd9313fe29c641c09d9
|
[
"Apache-2.0"
] |
permissive
|
alexmorley/spikeforest2
|
5d211595f744ed755eea9b9376e31ed6f9a4da12
|
859fc8b843cc7b547ab83d3e0a43bb17230d09b1
|
refs/heads/master
| 2020-04-23T03:40:08.076097
| 2019-06-12T08:11:06
| 2019-06-12T08:11:06
| 170,885,069
| 0
| 0
| null | 2019-02-15T15:21:47
| 2019-02-15T15:21:46
| null |
UTF-8
|
Python
| false
| false
| 11,809
|
py
|
from .spikeforest_views.currentstateview import CurrentStateView
from .spikeforest_views.recordingtableview import RecordingTableView, RecordingSelectComponent
from .spikeforest_views.aggregatedsortingresultstableview import AggregatedSortingResultsTableView
from .recording_views.electrodegeometryview import ElectrodeGeometryView
from .recording_views.timeseriesview import TimeseriesView
from .recording_views.templatesview import TemplatesView
from .recording_views.recordingsummaryview import RecordingSummaryView
from .recording_views.unitstableview import UnitsTableView
from .recording_views.sortingresultstableview import SortingResultsTableView, SortingResultSelectComponent
from .recording_views.sortingresultdetailview import SortingResultDetailView
from .recording_views.featurespaceview import FeatureSpaceView
from .recording_views.clusterview import ClusterView
import vdomr as vd
from mountaintools import client as mt
import json
def get_spikeforest_view_launchers(context):
launchers = []
groups = []
ret = dict(
groups=groups,
launchers=launchers
)
# General
groups.append(dict(name='general', label=''))
launchers.append(dict(
group='general', name='recording-table', label='Recording table',
view_class=RecordingTableView,
context=context, opts=dict(),
enabled=True
))
launchers.append(dict(
group='general', name='current-state', label='Current state',
view_class=CurrentStateView,
context=context, opts=dict(),
enabled=True
))
# MEDIUM TODO: this should be a component rather than a launcher
launchers.append(dict(
group='general', name='recording-select',
component_class=RecordingSelectComponent,
context=context, opts=dict(),
enabled=True
))
recording_context = context.recordingContext(context.currentRecordingId())
# Aggregated sorting results
if context.hasAggregatedSortingResults():
groups.append(dict(name='aggregated_sorting_results', label='Aggregated results'))
launchers.append(dict(
group='aggregated_sorting_results', name='aggregated-results-table', label='Results table',
view_class=AggregatedSortingResultsTableView,
context=context, opts=dict(),
always_open_new=False,
enabled=True
))
# Recording
if recording_context:
groups.append(dict(name='recording', label='Recording', sublabel=context.currentRecordingId()))
launchers.append(dict(
group='recording', name='recording-summary', label='Recording summary',
view_class=RecordingSummaryView,
context=recording_context, opts=dict(),
always_open_new=True,
enabled=(recording_context is not None)
))
launchers.append(dict(
group='recording', name='electrode-geometry', label='Electrode geometry',
view_class=ElectrodeGeometryView,
context=recording_context, opts=dict(),
enabled=(recording_context is not None)
))
launchers.append(dict(
group='recording', name='timeseries', label='Timeseries',
view_class=TimeseriesView,
context=recording_context, opts=dict(),
always_open_new=True,
enabled=(recording_context is not None)
))
if recording_context.hasIntraRecording():
launchers.append(dict(
group='recording', name='intra-timeseries', label='Intra-timeseries',
view_class=TimeseriesView,
context=recording_context.intraRecordingContext(),
always_open_new=True,
enabled=(recording_context is not None)
))
# True sorting
if recording_context and recording_context.trueSortingContext():
true_sorting_context = recording_context.trueSortingContext()
groups.append(dict(name='true-sorting', label='True sorting'))
launchers.append(dict(
group='true-sorting', name='true-templates', label='Templates',
view_class=TemplatesView,
context=true_sorting_context, opts=dict(),
always_open_new=True,
enabled=(true_sorting_context is not None)
))
launchers.append(dict(
group='true-sorting', name='true-units-info', label='Units info',
view_class=UnitsTableView,
context=true_sorting_context, opts=dict(),
always_open_new=True,
enabled=(true_sorting_context is not None)
))
launchers.append(dict(
group='true-sorting', name='feature-space', label='Feature space',
view_class=FeatureSpaceView,
context=true_sorting_context, opts=dict(),
always_open_new=True,
enabled=(len(true_sorting_context.selectedUnitIds()) > 0)
))
launchers.append(dict(
group='true-sorting', name='clusters', label='Clusters',
view_class=ClusterView,
context=true_sorting_context, opts=dict(),
always_open_new=True,
enabled=(len(true_sorting_context.selectedUnitIds()) > 0)
))
dict(name='unit', label='Unit')
launchers.append(dict(
group='true-sorting', name='test', label='Test',
view_class=TemplatesView,
context=true_sorting_context, opts=dict(),
always_open_new=True,
enabled=(true_sorting_context.currentUnitId() is not None)
))
# Sorting results
if recording_context and (len(recording_context.sortingResultNames()) > 0):
groups.append(dict(name='sorting-results', label='Sorting results'))
launchers.append(dict(
group='sorting-results', name='sorting-results-table', label='Sorting results table',
view_class=SortingResultsTableView,
context=recording_context, opts=dict(),
always_open_new=True,
enabled=(len(recording_context.sortingResultNames()) > 0)
))
launchers.append(dict(
group='sorting-results', name='sorting-result-select',
component_class=SortingResultSelectComponent,
context=recording_context, opts=dict(),
always_open_new=True,
enabled=(len(recording_context.sortingResultNames()) > 0)
))
# Sorting result
if recording_context and recording_context.currentSortingResult():
srname = recording_context.currentSortingResult()
sorting_result_context = recording_context.sortingResultContext(srname)
groups.append(dict(name='sorting-result', label='Sorting result', sublabel=srname))
launchers.append(dict(
group='sorting-result', name='sorting-result-details', label='Details',
view_class=SortingResultDetailView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context is not None)
))
launchers.append(dict(
group='sorting-result', name='templates', label='Templates',
view_class=TemplatesView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context is not None)
))
launchers.append(dict(
group='sorting-result', name='units-info', label='Units info',
view_class=UnitsTableView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context is not None)
))
launchers.append(dict(
group='sorting-result', name='feature-space', label='Feature space',
view_class=FeatureSpaceView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(len(sorting_result_context.selectedUnitIds()) > 0)
))
launchers.append(dict(
group='sorting-result', name='clusters', label='Clusters',
view_class=ClusterView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(len(sorting_result_context.selectedUnitIds()) > 0)
))
launchers.append(dict(
group='sorting-result', name='console-out', label='Console output',
view_class=ConsoleOutView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context.consoleOutputPath() is not None)
))
launchers.append(dict(
group='sorting-result', name='exec-stats', label='Execution stats',
view_class=ExecutionStatsView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context.executionStats() is not None)
))
launchers.append(dict(
group='sorting-result', name='comparison-with-truth', label='Comparison with truth',
view_class=ComparisonWithTruthView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context.comparisonWithTruthPath() is not None)
))
dict(name='unit', label='Unit')
launchers.append(dict(
group='sorting-result', name='test', label='Test',
view_class=TemplatesView,
context=sorting_result_context, opts=dict(),
always_open_new=True,
enabled=(sorting_result_context.currentUnitId() is not None)
))
return ret
class ConsoleOutView(vd.Component):
def __init__(self, *, context, opts=None):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
if not context.consoleOutputPath():
self._text = 'no console output found'
else:
self._text = mt.loadText(path=context.consoleOutputPath()) or 'unable to load console output'
def setSize(self, size):
if self._size != size:
self._size = size
def size(self):
return self._size
def tabLabel(self):
return 'Console out'
def render(self):
return vd.components.ScrollArea(vd.pre(self._text), height=self._size[1])
class ExecutionStatsView(vd.Component):
def __init__(self, *, context, opts=None):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
self._stats = context.executionStats()
def setSize(self, size):
if self._size != size:
self._size = size
def size(self):
return self._size
def tabLabel(self):
return 'Exec stats'
def render(self):
if not self._stats:
return vd.div('No stats found')
return vd.div(vd.pre(json.dumps(self._stats, indent=4)))
class ComparisonWithTruthView(vd.Component):
def __init__(self, *, context, opts=None):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
if not context.comparisonWithTruthPath():
self._object = None
else:
self._object = mt.loadObject(path=context.comparisonWithTruthPath())
def setSize(self, size):
if self._size != size:
self._size = size
def size(self):
return self._size
def tabLabel(self):
return 'Comparison with truth'
def render(self):
if not self._object:
return vd.div('Unable to load comparison data.')
return vd.components.ScrollArea(vd.pre(json.dumps(self._object, indent=4)), height=self._size[1])
|
[
"jeremy.magland@gmail.com"
] |
jeremy.magland@gmail.com
|
d37a305a988762a88462d72268ef5b9960e21900
|
f7e9bf6fa18a41b52994b1f16fd55c4d69f33b56
|
/plugins/embed_picasaweb_image/embed_picasaweb_image.py
|
02b13d5d2a5d91850ad78e166be1f0e6b52a1e0a
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
siongui/userpages
|
1716f2d24e52b514ea8534027cec9707783d0d47
|
494b95e61715a49dce6615103a5b19fa05f276f1
|
refs/heads/master
| 2023-07-01T12:52:04.813216
| 2023-06-12T16:31:25
| 2023-06-12T16:31:25
| 13,944,800
| 80
| 39
|
Unlicense
| 2023-08-18T03:51:40
| 2013-10-29T02:39:13
|
Go
|
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# Creating reStructuredText Directives
# @see http://docutils.sourceforge.net/docs/howto/rst-directives.html
from docutils.parsers.rst import directives, Directive
from docutils import nodes
class embed_picasaweb_image(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'album_name' : directives.unchanged,
'css_class' : directives.unchanged,
'description' : directives.unchanged,
'album_url' : directives.uri,
'image_url' : directives.uri,
}
has_content = False
def run(self):
url = directives.uri(self.arguments[0])
album_name = self.options.get('album_name', None)
album_url = self.options.get('album_url', None)
image_url = self.options.get('image_url', None)
css_class = self.options.get('css_class', None)
description = self.options.get('description', u'')
if album_name and album_url:
html = u'<div class="{}"><a href="{}"><image src="{}"></a><div>{}</div><div class="album">From Album: <a href="{}">{}</a></div></div>'.format(
css_class, image_url, url, description, album_url, album_name)
else:
html = u'<div class="{}"><a href="{}"><image src="{}"></a><div>{}</div></div>'.format(
css_class, image_url, url, description)
return [nodes.raw('', html, format='html')]
def register():
directives.register_directive('embed_picasaweb_image', embed_picasaweb_image)
|
[
"siongui@gmail.com"
] |
siongui@gmail.com
|
ff82dba0faaadec9068bbc9b3ccc625a721573a6
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/file/formats/gzip/GZipFileSystemFactory.pyi
|
65e1831d93c9e7d26029af1620bb52a08cc18eb9
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
pyi
|
from typing import List
import ghidra.formats.gfilesystem
import ghidra.formats.gfilesystem.factory
import ghidra.util.task
import java.io
import java.lang
class GZipFileSystemFactory(object, ghidra.formats.gfilesystem.factory.GFileSystemFactoryWithFile, ghidra.formats.gfilesystem.factory.GFileSystemProbeBytesOnly):
MAX_BYTESREQUIRED: int = 65536
PROBE_BYTES_REQUIRED: int = 2
def __init__(self): ...
def create(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: ghidra.formats.gfilesystem.FSRLRoot, __a2: java.io.File, __a3: ghidra.formats.gfilesystem.FileSystemService, __a4: ghidra.util.task.TaskMonitor) -> ghidra.formats.gfilesystem.GFileSystem: ...
def equals(self, __a0: object) -> bool: ...
def getBytesRequired(self) -> int: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def probeStartBytes(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: List[int]) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def bytesRequired(self) -> int: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
20e5e3d3efd7b71f0d1245e11bc770db1c7e042d
|
39f86d230048630cb4f45444d7c354845c285648
|
/powerpc_hardware_manager/powerpc_device.py
|
1b6351b272bdd6de936a4a186020fa74ab36728b
|
[] |
no_license
|
hamzy/powerpc-ironic-hardware-manager
|
132ea44e2ea440f4ab6cc4616860ee3f969f88a4
|
edd55523335bd56d48b97f5b37b1e56087c7f6c0
|
refs/heads/master
| 2020-05-22T06:59:42.088955
| 2016-09-26T18:35:04
| 2016-09-26T18:35:04
| 64,872,603
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,024
|
py
|
# Copyright 2016 International Business Machines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pyudev
import shlex
import netifaces
from oslo_log import log
from oslo_concurrency import processutils
from ironic_python_agent import errors
from ironic_python_agent import hardware
from ironic_python_agent import utils
from ironic_python_agent.hardware import BlockDevice
from ironic_python_agent.hardware import BootInfo
from ironic_python_agent.hardware import CPU
from ironic_python_agent.hardware import Memory
from ironic_python_agent.hardware import NetworkInterface
from ironic_python_agent.hardware import SystemVendorInfo
LOG = log.getLogger()
def _get_device_vendor(dev):
"""Get the vendor name of a given device."""
try:
devname = os.path.basename(dev)
with open('/sys/class/block/%s/device/vendor' % devname, 'r') as f:
return f.read().strip()
except IOError:
LOG.warning("Can't find the device vendor for device %s", dev)
def _udev_settle():
"""Wait for the udev event queue to settle.
Wait for the udev event queue to settle to make sure all devices
are detected once the machine boots up.
"""
try:
utils.execute('udevadm', 'settle')
except processutils.ProcessExecutionError as e:
LOG.warning('Something went wrong when waiting for udev '
'to settle. Error: %s', e)
return
def list_all_block_devices(block_type='disk'):
"""List all physical block devices
The switches we use for lsblk: P for KEY="value" output, b for size output
in bytes, d to exclude dependent devices (like md or dm devices), i to
ensure ascii characters only, and o to specify the fields/columns we need.
Broken out as its own function to facilitate custom hardware managers that
don't need to subclass GenericHardwareManager.
:param block_type: Type of block device to find
:return: A list of BlockDevices
"""
_udev_settle()
columns = ['KNAME', 'MODEL', 'SIZE', 'ROTA', 'TYPE']
report = utils.execute('lsblk', '-Pbdi', '-o{}'.format(','.join(columns)),
check_exit_code=[0])[0]
lines = report.split('\n')
context = pyudev.Context()
devices = []
for line in lines:
device = {}
# Split into KEY=VAL pairs
vals = shlex.split(line)
for key, val in (v.split('=', 1) for v in vals):
device[key] = val.strip()
# Ignore block types not specified
if device.get('TYPE') != block_type:
LOG.debug(
"TYPE did not match. Wanted: {!r} but found: {!r}".format(
block_type, line))
continue
# Ensure all required columns are at least present, even if blank
missing = set(columns) - set(device)
if missing:
raise errors.BlockDeviceError(
'%s must be returned by lsblk.' % ', '.join(sorted(missing)))
name = '/dev/' + device['KNAME']
try:
udev = pyudev.Device.from_device_file(context, name)
# pyudev started raising another error in 0.18
except (ValueError, EnvironmentError, pyudev.DeviceNotFoundError) as e:
LOG.warning("Device %(dev)s is inaccessible, skipping... "
"Error: %(error)s", {'dev': name, 'error': e})
extra = {}
else:
# TODO(lucasagomes): Since lsblk only supports
# returning the short serial we are using
# ID_SERIAL_SHORT here to keep compatibility with the
# bash deploy ramdisk
extra = {key: udev.get('ID_%s' % udev_key) for key, udev_key in
[('wwn', 'WWN'), ('serial', 'SERIAL_SHORT'),
('wwn_with_extension', 'WWN_WITH_EXTENSION'),
('wwn_vendor_extension', 'WWN_VENDOR_EXTENSION')]}
devices.append(BlockDevice(name=name,
model=device['MODEL'],
size=int(device['SIZE']),
rotational=bool(int(device['ROTA'])),
vendor=_get_device_vendor(device['KNAME']),
**extra))
return devices
class PowerPCHardwareManager(hardware.HardwareManager):
""" """
HARDWARE_MANAGER_NAME = "PowerPCHardwareManager"
HARDWARE_MANAGER_VERSION = "1"
SYSTEM_FIRMWARE_VERSION = "IBM-habanero-ibm-OP8_v1.7_1.62"
SYSTEM_FIRMWARE_FILE = "/root/8348_810.1603.20160310b_update.hpm"
def __init__(self):
self.sys_path = '/sys'
def evaluate_hardware_support(self):
"""Declare level of hardware support provided.
Since this example covers a case of supporting a specific device,
this method is where you would do anything needed to initalize that
device, including loading drivers, and then detect if one exists.
In some cases, if you expect the hardware to be available on any node
running this hardware manager, or it's undetectable, you may want to
return a static value here.
Be aware all managers' loaded in IPA will run this method before IPA
performs a lookup or begins heartbeating, so the time needed to
execute this method will make cleaning and deploying slower.
:returns: HardwareSupport level for this manager.
"""
LOG.debug("PowerPCHardwareManager.evaluate_hardware_support:")
return hardware.HardwareSupport.SERVICE_PROVIDER
def list_hardware_info(self):
"""Return full hardware inventory as a serializable dict.
This inventory is sent to Ironic on lookup and to Inspector on
inspection.
:return: a dictionary representing inventory
"""
hardware_info = {}
hardware_info['interfaces'] = self.list_network_interfaces()
hardware_info['cpu'] = self.get_cpus()
hardware_info['disks'] = self.list_block_devices()
hardware_info['memory'] = self.get_memory()
hardware_info['bmc_address'] = self.get_bmc_address()
hardware_info['system_vendor'] = self.get_system_vendor_info()
hardware_info['boot'] = self.get_boot_info()
return hardware_info
def list_network_interfaces(self):
iface_names = os.listdir('{0}/class/net'.format(self.sys_path))
iface_names = [name for name in iface_names if self._is_device(name)]
return [self._get_interface_info(name) for name in iface_names]
def get_cpus(self):
func = "PowerPCHardwareManager.get_cpus"
lines = utils.execute('lscpu')[0]
cpu_info = {k.strip().lower(): v.strip() for k, v in
(line.split(':', 1)
for line in lines.split('\n')
if line.strip())}
# Current CPU frequency can be different from maximum one on modern
# processors
frequency = cpu_info.get('cpu max mhz', cpu_info.get('cpu mhz'))
flags = []
out = utils.try_execute('grep', '-Em1', '^flags', '/proc/cpuinfo')
if out:
try:
# Example output (much longer for a real system):
# flags : fpu vme de pse
flags = out[0].strip().split(':', 1)[1].strip().split()
except (IndexError, ValueError):
LOG.warning('Malformed CPU flags information: %s', out)
else:
LOG.warning('Failed to get CPU flags')
model_name = cpu_info.get('model name')
count = int(cpu_info.get('cpu(s)'))
architecture = cpu_info.get('architecture')
LOG.debug("%s: model_name = %s", func, model_name)
LOG.debug("%s: frequency = %s", func, frequency)
LOG.debug("%s: count = %s", func, count)
LOG.debug("%s: architecturecount = %s", func, architecture)
LOG.debug("%s: flags = %s", func, flags)
return CPU(model_name=model_name,
frequency=frequency,
# this includes hyperthreading cores
count=count,
architecture=architecture,
flags=flags)
def list_block_devices(self):
return list_all_block_devices()
def get_memory(self):
func = "PowerPCHardwareManager.get_memory"
cmd = ("lshw -c memory -short -quiet 2>/dev/null"
"|grep -i 'system memory'")
try:
out, _ = utils.execute(cmd, shell=True)
physical_mb = 0
for line in out.split('\n'):
if len(line.strip ()) == 0:
continue
try:
# /0/5 memory 8165MiB System memory
# /0/1 memory 255GiB System memory
(_, _, memory, _, _) = line.split()
except ValueError:
LOG.debug("%s: \'%s\' bad line", func, line)
raise
if memory.endswith('GiB'):
physical_mb += int(memory[0:-3])*1024
elif memory.endswith('MiB'):
physical_mb += int(memory[0:-3])
else:
LOG.warning("%s: %s bad memory", func, memory)
LOG.warning("%s: line = \'%s\'", func, line)
LOG.debug("%s: physical_mb = %s", func, physical_mb)
return Memory(total=physical_mb, physical_mb=physical_mb)
except (processutils.ProcessExecutionError, OSError) as e:
LOG.warning("%s: Cannot execute %s: %s", func, cmd, e)
return None
def get_bmc_address(self):
# These modules are rarely loaded automatically
utils.try_execute('modprobe', 'ipmi_msghandler')
utils.try_execute('modprobe', 'ipmi_devintf')
utils.try_execute('modprobe', 'ipmi_si')
try:
out, _ = utils.execute(
"ipmitool lan print | grep -e 'IP Address [^S]' "
"| awk '{ print $4 }'", shell=True)
except (processutils.ProcessExecutionError, OSError) as e:
# Not error, because it's normal in virtual environment
LOG.warning("Cannot get BMC address: %s", e)
return
return out.strip()
def get_system_vendor_info(self):
func = "PowerPCHardwareManager.get_system_vendor_info"
cmd = "lshw -quiet | egrep '^ (product|serial):'"
product_name = None
serial_number = None
manufacturer = "IBM"
try:
out, _ = utils.execute(cmd, shell=True)
except (processutils.ProcessExecutionError, OSError) as e:
LOG.warning("Cannot get system vendor information: %s", e)
else:
for line in out.split('\n'):
line_arr = line.split(':', 1)
if len(line_arr) != 2:
continue
if line_arr[0].strip() == 'product':
product_name = line_arr[1].strip()
elif line_arr[0].strip() == 'serial':
serial_number = line_arr[1].strip()
LOG.debug ("%s: product_name = %s", func, product_name)
LOG.debug ("%s: serial_number = %s", func, serial_number)
LOG.debug ("%s: manufacturer = %s", func, manufacturer)
return SystemVendorInfo(product_name=product_name,
serial_number=serial_number,
manufacturer=manufacturer)
def get_boot_info(self):
func = "PowerPCHardwareManager.get_boot_info"
boot_mode = 'uefi' if os.path.isdir('/sys/firmware/efi') else 'bios'
LOG.debug("%s: The current boot mode is %s", func, boot_mode)
pxe_interface = utils.get_agent_params().get('BOOTIF')
return BootInfo(current_boot_mode=boot_mode,
pxe_interface=pxe_interface)
def get_clean_steps(self, node, ports):
"""Get a list of clean steps with priority.
Define any clean steps added by this manager here. These will be mixed
with other loaded managers that support this hardware, and ordered by
priority. Higher priority steps run earlier.
Note that out-of-band clean steps may also be provided by Ironic.
These will follow the same priority ordering even though they are not
executed by IPA.
There is *no guarantee whatsoever* that steps defined here will be
executed by this HardwareManager. When it comes time to run these
steps, they'll be called using dispatch_to_managers() just like any
other IPA HardwareManager method. This means if they are unique to
your hardware, they should be uniquely named. For example,
upgrade_firmware would be a bad step name. Whereas
upgrade_foobar_device_firmware would be better.
:param node: The node object as provided by Ironic.
:param ports: Port objects as provided by Ironic.
:returns: A list of cleaning steps, as a list of dicts.
"""
LOG.debug("PowerPCHardwareManager.get_clean_steps:")
return [{
"step": "upgrade_powerpc_firmware",
"priority": 17,
# Should always be the deploy interface
"interface": "deploy",
# If you need Ironic to coordinate a reboot after this step
# runs, but before continuing cleaning, this should be true.
"reboot_requested": True,
# If it's safe for Ironic to abort cleaning while this step
# runs, this should be true.
"abortable": False
}]
def get_version(self):
"""Get a name and version for this hardware manager.
In order to avoid errors and make agent upgrades painless, cleaning
will check the version of all hardware managers during get_clean_steps
at the beginning of cleaning and before executing each step in the
agent.
The agent isn't aware of the steps being taken before or after via
out of band steps, so it can never know if a new step is safe to run.
Therefore, we default to restarting the whole process.
:returns: a dictionary with two keys: `name` and
`version`, where `name` is a string identifying the hardware
manager and `version` is an arbitrary version string.
"""
LOG.debug("PowerPCHardwareManager.get_version:")
return {
'name': self.HARDWARE_MANAGER_NAME,
'version': self.HARDWARE_MANAGER_VERSION
}
def get_ipv4_addr(self, interface_id):
try:
addrs = netifaces.ifaddresses(interface_id)
return addrs[netifaces.AF_INET][0]['addr']
except (ValueError, IndexError, KeyError):
# No default IPv4 address found
return None
def _is_device(self, interface_name):
device_path = '{0}/class/net/{1}/device'.format(self.sys_path,
interface_name)
return os.path.exists(device_path)
def _get_interface_info(self, interface_name):
addr_path = '{0}/class/net/{1}/address'.format(self.sys_path,
interface_name)
with open(addr_path) as addr_file:
mac_addr = addr_file.read().strip()
return NetworkInterface(
interface_name,
mac_addr,
ipv4_address=self.get_ipv4_addr(interface_name),
has_carrier=self._interface_has_carrier(interface_name),
lldp=None)
def _interface_has_carrier(self, interface_name):
path = '{0}/class/net/{1}/carrier'.format(self.sys_path,
interface_name)
try:
with open(path, 'rt') as fp:
return fp.read().strip() == '1'
except EnvironmentError:
LOG.debug('No carrier information for interface %s',
interface_name)
return False
def upgrade_powerpc_firmware (self, node, ports):
"""Upgrade firmware on a PowerPC computer"""
# Any commands needed to perform the firmware upgrade should go here.
# If you plan on actually flashing firmware every cleaning cycle, you
# should ensure your device will not experience flash exhaustion. A
# good practice in some environments would be to check the firmware
# version against a constant in the code, and noop the method if an
# upgrade is not needed.
func = "PowerPCHardwareManager.upgrade_powerpc_firmware"
LOG.debug("%s: node = %s", func, node)
LOG.debug("%s: ports = %s", func, ports)
if self._is_latest_firmware_ipmi(node, ports):
LOG.debug('Latest firmware already flashed, skipping')
self._MarkMark()
# Return values are ignored here on success
return True
else:
LOG.debug('Firmware version X found, upgrading to Y')
# Perform firmware upgrade.
try:
self._upgrade_firmware_ipmi(node, ports)
except Exception as e:
# Log and pass through the exception so cleaning will fail
LOG.exception(e)
self._MarkMark()
raise
self._MarkMark()
return True
def _is_latest_firmware_ipmi(self, node, ports):
"""Detect if device is running latest firmware."""
func = "PowerPCHardwareManager._is_latest_firmware_ipmi"
ipmi_username = node["driver_info"]["ipmi_username"]
ipmi_address = node["driver_info"]["ipmi_address"]
ipmi_password = node["driver_info"]["ipmi_password"]
version = None
try:
cmd = ("sudo ipmitool "
"-I lanplus "
"-H %s "
"-U %s "
"-P %s "
"fru") % (ipmi_address,
ipmi_username,
ipmi_password, )
out, _ = utils.execute(cmd, shell=True)
fInSection = False
for line in out.split('\n'):
if len(line.strip ()) == 0:
fInSection = False
continue
if line.find("FRU Device Description : System Firmware") > -1:
LOG.debug("%s: Found System Firmware section", func)
fInSection = True
continue
if not fInSection:
continue
if line.find("Product Version") > -1:
version = line.split(':')[1].strip()
except (processutils.ProcessExecutionError, OSError) as e:
LOG.warning("%s: Cannot execute %s: %s", func, cmd, e)
LOG.debug("%s: version = %s", func, version)
if version is None:
return False
# http://stackoverflow.com/a/29247821/5839258
elif version.upper().lower() == self.SYSTEM_FIRMWARE_VERSION.upper().lower():
return True
else:
return False
def _upgrade_firmware_ipmi(self, node, ports):
"""Upgrade firmware on device."""
func = "PowerPCHardwareManager._upgrade_firmware_ipmi"
ipmi_username = node["driver_info"]["ipmi_username"]
ipmi_address = node["driver_info"]["ipmi_address"]
ipmi_password = node["driver_info"]["ipmi_password"]
try:
cmd = ("sudo ipmitool "
"-I lanplus "
"-H %s "
"-U %s "
"-P %s "
"-z 30000 "
"hpm upgrade %s "
"force") % (ipmi_address,
ipmi_username,
ipmi_password,
self.SYSTEM_FIRMWARE_FILE)
out, _ = utils.execute(cmd, shell=True)
return True
except (processutils.ProcessExecutionError, OSError) as e:
LOG.warning("%s: Cannot execute %s: %s", func, cmd, e)
return False
def _MarkMark(self):
# Ironic powers off the computer before the entire debug log has
# been flushed out. Hack that here. :(
LOG.debug("MARKMARK")
import time
time.sleep (30)
|
[
"hamzy@us.ibm.com"
] |
hamzy@us.ibm.com
|
708cfe325109af42808f25f39d043b2a0676c301
|
1dbd4d92637c80d01a1f56c62b4871c7fe22a9f0
|
/analysis_scripts/1c-Plotting_patch_clamp_results.py
|
34d817a659c819363eab778d74d25c11498d5c61
|
[] |
no_license
|
nmarzano/peakfinding_patchclamping_calcium-imaging
|
4ecdb1b944ac7ac6b6d2d405a09a94683578ae49
|
31194e2a338f14f221e1c12917213e94261a65e5
|
refs/heads/master
| 2023-04-12T13:07:27.949766
| 2022-12-20T05:44:06
| 2022-12-20T05:44:06
| 580,261,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
output_dir = 'directory_to_save'
filename = 'directory/cleaned_data.csv'
major_peaks = pd.read_csv(filename, header="infer")
def plot_cells(dfs, xmin, xmax):
sns.set(style = 'ticks', font_scale = 1)
for cell, df in dfs.groupby('cell'):
fig, ax = plt.subplots(2, 1)
sns.lineplot(data = df, x = "Time (s)", y = "Background corrected", color = "black", ax = ax[0])
sns.lineplot(data = df, x = "Time (s)", y = "Background corrected", color = "black", ax = ax[1])
ax[1].set_xlim(xmin, xmax)
fig.savefig(f'{output_dir}/raw_plot_cell{cell}.eps', dpi = 600)
plt.show()
plot_cells(major_peaks, 70.5, 71)
|
[
"noreply@github.com"
] |
nmarzano.noreply@github.com
|
1fa034f767ef9f88cf6992b4ac2982972c7b0b5f
|
ca61296e18ae834628b6a4199bbd5a9379bdeff3
|
/worker/models.py
|
0cd8b2b7d891bed8050b7ab22c805edc0417230a
|
[] |
no_license
|
shashank-sharma/gdrs
|
8979361a21a01097ca9f5a9e969c55c8548fedfa
|
b0cb17eade5049b5175dc78eb93b0385b72ac61a
|
refs/heads/master
| 2020-03-29T22:41:18.934031
| 2019-01-22T15:31:31
| 2019-01-22T15:31:31
| 150,435,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
from django.db import models
from accounts.models import User
# Create your models here.
class Driver(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driving_licence_number = models.CharField(max_length=20)
expiry_date = models.DateField()
working = models.BooleanField(default=False)
class Cab(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driver = models.ForeignKey(Driver, on_delete=models.CASCADE)
license_plate = models.CharField(max_length=20)
car_model_id = models.CharField(max_length=20)
manufacturing_id = models.CharField(max_length=20)
active = models.BooleanField(default=True)
class CarModel(models.Model):
cab = models.ForeignKey(Cab, on_delete=models.CASCADE)
model_name = models.CharField(max_length=80)
model_description = models.CharField(max_length=100)
class shift(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driver = models.ForeignKey(Driver, on_delete=models.CASCADE)
shift_start = models.DateField()
shift_end = models.DateField()
login_time = models.DateField()
logout_time = models.DateField()
|
[
"shashank.sharma98@gmail.com"
] |
shashank.sharma98@gmail.com
|
5126cfeafdad3a6bee680a4dfae4380b7bea389c
|
381ba62f113dc74c4592bf4d2718cb3d1379bee1
|
/Mosh/Python/Variables/constructor_exercise.py
|
e30232d11f3d300087b054a2e7c2bf773b14c05a
|
[] |
no_license
|
MaSanTM/Mosh
|
2926cfddb9cf7f0faef0ed80e55d29a9227b9a1e
|
129e2f0618c2026556396734220b6d32f69acdf3
|
refs/heads/main
| 2023-07-22T05:31:55.159348
| 2021-09-07T21:17:52
| 2021-09-07T21:17:52
| 404,125,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
class Person:
def __init__(self, name):
self.name = name
def talk(self):
print(f"Hi, i'm {self.name}")
john = Person('SMITH John')
print(john.name)
john.talk()
|
[
"noreply@github.com"
] |
MaSanTM.noreply@github.com
|
1a33f5d587a589a07b4d735a0b1ae59f57d324be
|
9b91eb9e1bbd95b65452ac6dc00440e89a75232d
|
/basic_set.py
|
c2eeeaf7b35d85abb71868baf354610cf9c645a9
|
[] |
no_license
|
s00149s/Python_Basic
|
e376c3342cd114d641bc6e9e5a557baa7aece2a3
|
c63c595001f99f32c2dfe5e2f89dfb7716de5dd4
|
refs/heads/master
| 2023-05-14T18:23:53.096514
| 2021-05-31T02:13:59
| 2021-05-31T02:13:59
| 368,452,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
numbers = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} # 전체 집합
evens = {0, 2, 4, 6, 8} # 짝수 집합
odds = {1, 3, 5, 7, 9} # 홀수 집합
mthree = {0, 3, 6, 9} # 3의 배수의 집합
def define_set():
"""
SET 정의 연습
set() 함수
{}
"""
empty = set() # 빈 집합
print(empty, type(empty))
empty = {} # 주의 : 빈 SET가 아니라 빈 DIC
print(empty, type(empty))
# 순서가 없고, 인덱스도 없고, 슬라이싱 안되고
# 길이(요소의 수), 포함 여부(in, not in) 정도만 사용
print(numbers, "LENGTH : ", len(numbers))
print("포함여부 : ", 2 in numbers, 2 in evens, 2 in odds)
# 캐스팅 : 다른 순차형으로 SET 만들기
s = "Python Programming" # 문자열 안쪽에 모두 몇개의 알파벳이 사용되었는가?
chars = set(s.upper())
print(s, chars)
# 중복 허용 안함 특성
# list 등의 순차 자료형의 중복값 제거에 유용하게 사용
lst = "Python Programming Java Programming".upper().split() # 대문자로 바꿔서 공백 기준으로 나눠 리스트로 작성
print("lst : ", lst)
words = set(lst)
print(words, len(words)) # lst에 총 세 개의 단어가 사용됨
def set_method():
"""
SET의 메서드
"""
print("전체 집합 : ", numbers)
# 요소의 추가
numbers.add(10) # 10 요소를 추가
print(numbers)
evens.add(10)
print("짝수 집합 : ", evens)
evens.add(4) # SET는 중복을 허용하지 않음
print("짝수 집합 : ", evens)
# 삭제 : discard, remove
evens.discard(4)
print("짝수 집합 : ", evens)
evens.discard(4) # discard -> 없는 요소를 삭제해도 에러 발생 안함
# evens.remove(4) # remove -> 없는 요소 삭제 -> KeyError 발생
# 집합 업데이트
evens.update({2, 4, 6})
print("짝수 집합 : ", evens)
def set_oper():
"""
판별 연산
모집합 여부, 부분집합 여부
"""
# 짝수집합 합집합 홀수집합 == 전체집합
print("짝수 합집합 홀수 : ", evens.union(odds) == numbers)
print("짝수 합집합 홀수 : ", evens | odds == numbers)
# 모집합, 부분집합 판별
print("전체집합이 짝수 집합의 모집합? : ", numbers.issuperset(evens))
print("홀수집합이 전체집합의 부분집합? : ", odds.issubset(numbers))
# 교집합
print("짝수집합 교집합 3의 배수집합 : ", evens.intersection(mthree))
print(mthree & odds == {3, 9})
# 차집합
print("전체집합 차집합 짝수집합 : ", numbers.difference(evens))
print("전체집합 파집합 짝수집합 -> 홀수집합? : ", numbers - evens == odds)
def loop():
# numbers를 순회하면서 출력
for item in numbers:
print(item, end=" ")
else:
print()
if __name__ == "__main__":
# define_set()
# set_method()
# set_oper()
loop()
|
[
"s00149s@naver.com"
] |
s00149s@naver.com
|
fa57571b286048def7b562388f0f18b35d946a36
|
68e6fb66e17860638dd6fe2f06cc54c67f839d0b
|
/Book/ch2/linked_list.py
|
e77b24c59bccbd30e0f5087bf9e27181e61955be
|
[] |
no_license
|
lsclike/Programming-Structure
|
7ba110878fad416caf1afca052743de3180ccbd8
|
81c1c82fe0caf9be6c366b4bc66a731b42f37b94
|
refs/heads/master
| 2023-07-16T02:37:15.963698
| 2023-07-01T13:52:46
| 2023-07-01T13:52:46
| 167,762,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
def is_link(s):
return s == empty or (len(s) == 2 and is_link(s[1]))
def link(first, rest):
assert is_link(rest), "rest must be a linked list."
return [first, rest]
def first(s):
assert is_link(s), "first only applies to linked lists."
assert s != empty, "empty linked list has no first element."
return s[0]
def rest(s):
assert is_link(s), "rest only applies to linked lists."
assert s != empty, "empty linked list has no rest."
return s[1]
|
[
"lsclike2016@hotmail.com"
] |
lsclike2016@hotmail.com
|
231ff724f7f04714476eecaea9b9e0203ed9cc01
|
ecde4736e27b275b37bf1ccd7f375e31c1faf733
|
/2006old/TestScript/bin/testgameroom.py
|
b877fec178ee25bb962dcd667e966dd168360670
|
[] |
no_license
|
bahamut8348/xkcode
|
1d85ef9b806c13af7193c9fd2281c99f174357a3
|
41665e5601d6d555ae2633ac0aa8cc1108a6b6bf
|
refs/heads/master
| 2016-09-10T17:29:15.351377
| 2013-05-17T02:37:30
| 2013-05-17T02:37:30
| 34,992,677
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
import PSClient2 as PS
class PSEvent:
def OnValidateResult(self, result ):
pass
def OnCreateSessionResult(self, result):
pass
def OnConnectIS(self, bConnect, is_id, Reason, desp, pislist):
pass
def OnAreaListRecv(self, Result, AreaID, areaList):
pass
def OnRegisterResult(self, errors, names):
pass
def OnRoomListRecv(self, Result, AreaID, RoomList, RoomType):
pass
def OnCreateRoomRecv(self, callback, result, pRoomInfo):
pass
def OnJoinRoomRecv(self, Result, pRoomInfo):
pass
def OnRecvChatMsg(self, datatype, roomid, serverid, toUser, fromUser, msg):
pass
def OnQuitRoomResult(self, callback, result, roomid, pRoomInfoo):
pass
def OnLoginMMSResult(self, Result):
pass
def OnDispatchGame(self, GameID, MaxUserNum):
pass
def OnLobbyServerConn(self, bShow, ls_id, dwServerType, ls_ver):
pass
def OnQueryNewsRecv(self, Callback, Result, msg, AddTime):
pass
def OnConfigUserSession(self, Callback, Result, SessionType, userPSUID):
pass
def OnQuerySessionResult(self, Callback, Result, SessionType, userPSUID, sessioninfo):
pass
def OnQuerySysinfoRecv(self, Callback, sessionNum, chatRoomNum):
pass
def OnQuerySysinfoRecv(self, callback, info):
pass
def OnUserInvited(self, roomid, roomserver, inviterUser, roomName, toroomid, toChatServerID, usepass, password, roomType, invitebuffer ):
pass
def OnInviteUserReturn(self, roomid, roomserver, inviteeUser, result, toroomid, toChatServerID, inviteebuffer ):
pass
def OnSetRoomInfoResult(self, callback, result, roomid, serverid):
pass
def OnRoomInfoChange(self, roomid, RoomInfo):
pass
def OnSetUserInfoResult(self, Callback, Result, RoomID):
pass
def OnRoomUserInfoChange(self, roomid, userid, UserInfo):
pass
def OnStartGameResult(self, callback, result, roomid):
pass
def OnStartGameRecv(self, roomid, startgameinfo):
pass
def OnMatchGameResult(self, result, roomid, gameid, token, saddr):
pass
def OnQuerySession(self, Callback, result, Psuid, SessionType, sessioninfo):
pass
def QueryBuddySession(self, Callback, BuddyPSUID, SessionType):
pass
def OnQueryAccountInfoRecv(self, dwCallback, Result, UserInfo):
pass
def OnModifyAccountInfoRecv(self, dwCallback, errors ):
pass
def OnPasswordChangeRecv(self, dwCallback, errors):
pass
def OnSendPacketErrorRecv(self, result ):
pass
def OnUserListRecv(self, roomid, syncType, userList):
pass
def OnISListRecv(self, Result, IsList):
pass
def OnFSListRecv(self, Callback, FsList):
pass
def OnUserInfoRecv(self, Callback, Result, RoomID, UserInfo, buffer ):
pass
def OnRoomInfoRecv(self, callback, result, roominfo, buffer):
pass
def OnDeleteBuddyRes(self, Callback, Result, BuddyAccoutID, desp):
pass
def OnAddBuddyRes(self, Callback, Result, BuddyAccoutID, desp):
pass
def OnBuddyListRecv(self, TotalNum, BuddyList):
pass
def OnBuddyStateChange(self, AccountID, ChangeType, Status, buffer):
pass
def OnAddBuddyReq(self, Callback, NickName, Reason):
pass
def OnConfigBuddyStatusRes(self, Callback, Result):
pass
def OnRoomUserInfoRecv(self, callback, result, roomid, serverid, userinfo, buffer ):
pass
def OnInit_Buddy(self, callback, result, buddyserverid):
pass
def l(x):print x
class MyClient( PSEvent ):
def __init__(self):
self.ps = PS.PSClient(2)
self.ps.SetEvent(self)
self.ps.SetAppInfo(1,3,0,0)
self.ps.SetLog(l)
self.ps.SetIS( PS.str2addr('tcp://192.168.1.13:7788') )
self.ps.ConnectToIS()
def OnConnectIS(self, bConnect, is_id, Reason, desp, pislist):
self.ps.Login("ps_xu.ke",'psps123')
def OnCreateSessionResult(self, result):
self.ps.CreateRoom(0, 3, "myname", "roomname", 1, 0,"",5)
def OnCreateRoomRecv(self, callback, result, pRoomInfo):
print "CreateRoom", result
def update(self):
self.ps.Update()
import time
if __name__ == "__main__":
c = MyClient()
while 1:
c.update()
time.sleep(0.01)
|
[
"netmud@gmail.com@3d467911-a1bc-f432-5061-063988b6f25e"
] |
netmud@gmail.com@3d467911-a1bc-f432-5061-063988b6f25e
|
7aa49d03b00df1969a441a334cfa985a4fe57e98
|
e87d793b3a5facc6e54e0263fbd67703e1fbb382
|
/duckietown-world-venv/lib/python3.6/site-packages/geometry/manifolds/tests/embedding_test.py
|
c6435354a8abc351527207b355f8785768e2ff0f
|
[] |
no_license
|
llingg/behaviour-benchmarking
|
a860bbe709309e13f3e1133d916944882199a40f
|
85bbf1a9c2c628ba74480fe7abac3804d6afdac4
|
refs/heads/v1
| 2022-10-06T08:21:29.068329
| 2020-06-11T07:02:46
| 2020-06-11T07:02:46
| 259,622,704
| 0
| 0
| null | 2020-06-02T17:52:46
| 2020-04-28T11:52:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
# coding=utf-8
from geometry.manifolds import (SO3, SO2, R1, R2, R3, SE2, SE3, S2, S1, T1, T2,
T3, so2, so3, se2, se3, Tran3, Tran2, Tran1, tran2, tran1, tran3)
from nose.plugins.attrib import attr
def check_embed_relation_cond(A, B):
check_embed_relation_cond.description = 'Checking %s < %s' % (A, B)
msg = None
if not A.embeddable_in(B):
msg = '%s is not embeddable in %s' % (A, B)
if not B.can_represent(A):
msg = '%s cannot represent %s' % (B, A)
if msg:
raise Exception('%s;\n %s: %s\n %s: %s' %
(msg, A, A.relations_descriptions(),
B, B.relations_descriptions()))
def check_embed_relation(A, B):
check_embed_relation_cond(A, B)
points = list(A.interesting_points())
if not points:
msg = ('Cannot test because manifold %s does '
'not have interesting points' % A)
raise Exception(msg)
for a1 in points:
A.belongs(a1)
b = A.embed_in(B, a1)
B.belongs(b)
a2 = A.project_from(B, b)
A.belongs(a2)
a3 = B.project_to(A, b)
A.belongs(a3)
A.assert_close(a1, a2)
A.assert_close(a1, a3)
@attr('embed')
def test_embed_relations():
couples = []
def add(A, B):
couples.append((A, B))
add(R1, R2)
add(R2, R3)
add(R1, R3)
add(SO2, SO3)
add(SO2, SE3)
add(SO2, SE2)
add(SO3, SE3)
add(so3, se3)
add(so2, se2)
add(so2, se3)
add(S1, S2)
add(R1, SE2)
add(R2, SE2)
add(R1, SE3)
add(R2, SE3)
add(R3, SE3)
add(Tran1, SE2)
add(Tran2, SE2)
add(Tran1, SE3)
add(Tran2, SE3)
add(Tran3, SE3)
add(T1, T2)
add(T2, T3)
add(T1, T3)
add(T1, R1)
add(T2, R2)
add(T3, R3)
add(T3, SE3)
add(S1, SE3)
add(S2, SE3)
add(tran1, se3)
add(tran2, se3)
add(tran3, se3)
add(T1, S1)
for A, B in couples:
check_embed_relation(A, B)
|
[
"linggl@student.ethz.ch"
] |
linggl@student.ethz.ch
|
a83aef36ff14f7c63007b1cf8d651c30aeb8ef94
|
573f85b19a687b103bf0e70b48f1eab0fbed792a
|
/certbot/AliDNSCertbot.py
|
716f19879cb1900387314795c22cde75a263459f
|
[
"Apache-2.0"
] |
permissive
|
calllivecn/dockerbuild
|
7e240326743aaf88b7adc3637c8c643a8c4b7c41
|
e2c2c315677d2510f806e3dfa3fec062c58c7134
|
refs/heads/master
| 2023-07-23T03:27:17.378620
| 2023-03-13T04:19:54
| 2023-03-13T04:19:54
| 128,035,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,289
|
py
|
#!/usr/bin/env python3
# coding=utf-8
# date 2019-11-20 16:34:16
# author calllivecn <c-all@qq.com>
import os
import sys
import time
import base64
import hashlib
import hmac
import logging
#import configparser
import urllib
from urllib import request
from urllib import parse
# pip install alibabacloud_alidns20150109==2.0.2
from alibabacloud_alidns20150109.client import Client as Alidns20150109Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_alidns20150109 import models as alidns_20150109_models
def getlogger(level=logging.INFO):
logger = logging.getLogger("logger")
formatter = logging.Formatter("%(asctime)s %(filename)s:%(funcName)s:%(lineno)d %(levelname)s: %(message)s", datefmt="%Y-%m-%d-%H:%M:%S")
consoleHandler = logging.StreamHandler(stream=sys.stdout)
#logger.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)
# consoleHandler.setLevel(logging.DEBUG)
logger.addHandler(consoleHandler)
logger.setLevel(level)
return logger
logger = getlogger()
ALI_DDNS_URL = 'alidns.cn-zhangjiakou.aliyuncs.com'
LetsEncryptSubDomain = '_acme-challenge'
class AliDDNS:
def __init__(self, access_key_id, access_key_secret):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
"""
使用AK&SK初始化账号Client
@param access_key_id:
@param access_key_secret:
@return: Client
@throws Exception
"""
config = open_api_models.Config(
# 您的AccessKey ID,
access_key_id=self.access_key_id,
# 您的AccessKey Secret,
access_key_secret=self.access_key_secret
)
# 访问的域名
config.endpoint = ALI_DDNS_URL
self.client = Alidns20150109Client(config)
def addDomainRecord(self, domain_name, rr, typ, value):
"""
参数:
domain_name='calllive.cc',
type='AAAA',
rr='route'
value='240e:3b5:3013:f760:6edd:c591:41db:7a5d',
return:
{
"RequestId": "69698E87-A897-5FFA-B578-1001D5052D75",
"RecordId": "751818936343988224"
}
"""
add_domain_record_request = alidns_20150109_models.AddDomainRecordRequest(
domain_name=domain_name,
type=typ,
value=value,
rr=rr
)
# 复制代码运行请自行打印 API 的返回值
response = self.client.add_domain_record(add_domain_record_request)
return response.body.to_map()
def updateDonameRecord(self, record_id, rr, typ, value):
"""
参数:
record_id='751812982741233664',
rr='route',
type='AAAA',
value='240e:3b5:3013:f760:2292:83ab:872:2'
return:
{
"RequestId": "A997E4E6-C6BF-5A2B-85AE-01BE6E3AC1BE",
"RecordId": "751812982741233664"
}
"""
update_domain_record_request = alidns_20150109_models.UpdateDomainRecordRequest(
record_id=record_id,
rr=rr,
type=typ,
value=value
)
# 复制代码运行请自行打印 API 的返回值
response = self.client.update_domain_record(update_domain_record_request)
return response.body.to_map()
def describe_sub_domain(self, sub_domain, typ):
"""
return:
{
"TotalCount": 1,
"RequestId": "5AA5CC8A-4675-5B92-898A-5FBCC742E975",
"PageSize": 20,
"DomainRecords": {
"Record": [
{
"RR": "route",
"Line": "default",
"Status": "ENABLE",
"Locked": false,
"Type": "AAAA",
"DomainName": "calllive.cc",
"Value": "240e:3b5:3013:f760:7942:d2cd:5cc4:2aa1",
"RecordId": "751945591127363584",
"TTL": 600,
"Weight": 1
}
]
},
"PageNumber": 1
}
"""
describe_sub_domain_records_request = alidns_20150109_models.DescribeSubDomainRecordsRequest(
sub_domain=sub_domain,
type=typ
)
# 复制代码运行请自行打印 API 的返回值
response = self.client.describe_sub_domain_records(describe_sub_domain_records_request)
# logger.debug(f"response type: {type(response)}")
# logger.debug(f"response dir(): {dir(response)}")
# logger.debug(f"response to_map(): {response.to_map()}")
# logger.debug(f"response body: {response.body.to_map()}")
# logger.debug(f"response.body type: {type(response.body)}")
# jsondata = UtilClient.to_jsonstring(TeaCore.to_map(response))
return response.body.to_map()
def descrbieDomainRecord(self, domain_name, rrkey_word, typ):
"""
domain_name='baidu.com',
rrkey_word='ditu',
typ='AAAA'
return:
{
"TotalCount": 1,
"RequestId": "06A55865-42D5-5453-B7D3-ECA434200584",
"PageSize": 20,
"DomainRecords": {
"Record": [
{
"RR": "route",
"Line": "default",
"Status": "ENABLE",
"Locked": false,
"Type": "AAAA",
"DomainName": "calllive.cc",
"Value": "240e:3b5:3013:f760:6edd:c591:41db:7a5d",
"RecordId": "751812982741233664",
"TTL": 600,
"Weight": 1
}
]
},
"PageNumber": 1
}
"""
describe_domain_records_request = alidns_20150109_models.DescribeDomainRecordsRequest(
domain_name=domain_name,
rrkey_word=rrkey_word,
type=typ
)
# 复制代码运行请自行打印 API 的返回值
response = self.client.describe_domain_records(describe_domain_records_request)
return response.body.to_map()
class AliyunDns:
__endpoint = 'https://alidns.aliyuncs.com'
__appid = ''
__appsecret = ''
__logger = logging.getLogger("logger")
def __init__(self, appid, appsecret):
self.__appid = appid
self.__appsecret = appsecret
def __getSignatureNonce(self):
return str(int(round(time.time() * 1000)))
def __percentEncode(self, s):
res = parse.quote_plus(s.encode('utf8'), '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
#res = res.replace('+', '%20')
#res = res.replace('\'', '%27')
#res = res.replace('\"', '%22')
#res = res.replace('*', '%2A')
#res = res.replace('%7E', '~')
return res
def __signature(self, params):
sortedParams = sorted(params.items(), key=lambda params: params[0])
query = ''
for k, v in sortedParams:
query += '&' + self.__percentEncode(k) + '=' + self.__percentEncode(v)
self.__logger.debug("参数编码串:{}".format(query))
stringToSign = 'GET&%2F&' + self.__percentEncode(query[1:])
self.__logger.debug("签名串:{}".format(stringToSign))
try:
h = hmac.new((self.__appsecret + "&").encode("utf-8"), stringToSign.encode("utf-8"), hashlib.sha1)
except Exception as e:
self.__logger.error("签名出错...")
self.__logger.error(e)
signature = base64.encodebytes(h.digest()).strip()
return signature
def __request(self, params):
commonParams = {
'Format': 'JSON',
'Version': '2015-01-09',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': self.__getSignatureNonce(),
'SignatureVersion': '1.0',
'AccessKeyId': self.__appid,
'Timestamp': time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
}
# merge all params
finalParams = commonParams.copy()
finalParams.update(params)
self.__logger.debug("finalParams: {}".format(finalParams))
# signature
finalParams['Signature'] = self.__signature(finalParams)
self.__logger.info('Signature: {}'.format(finalParams['Signature']))
# get final url
url = "{}/?{}".format(self.__endpoint, parse.urlencode(finalParams))
# print(url)
req = request.Request(url)
self.__logger.debug(req.full_url)
self.__logger.debug(req.get_method())
try:
f = request.urlopen(req)
response = f.read()
self.__logger.info(response.decode('utf-8'))
except request.HTTPError as e:
self.__logger.info(e.read().strip().decode('utf-8'))
raise SystemExit(e)
def addDomainRecord(self, domain, rr, value):
params = {
'Action': 'AddDomainRecord',
'DomainName': domain,
'RR': rr,
'Type': 'TXT',
'Value': value
}
self.__request(params)
def deleteSubDomainRecord(self, domain, rr):
params = {
'Action': 'DeleteSubDomainRecords',
'DomainName': domain,
'RR': rr,
'Type': 'TXT'
}
self.__request(params)
def addLetsencryptDomainRecord(self, domain, value):
self.addDomainRecord(domain, self.__letsencryptSubDomain, value)
def deleteLetsencryptDomainRecord(self, domain):
self.deleteSubDomainRecord(domain, self.__letsencryptSubDomain)
def toString(self):
print('AliyunDns[appid=' + self.__appid + ', appsecret=' + self.__appsecret+']')
def auth(aliyunDns):
domain = os.environ.get('CERTBOT_DOMAIN')
value = os.environ.get('CERTBOT_VALIDATION')
if domain is None:
raise Exception('Environment variable CERTBOT_DOMAIN is empty.')
if value is None:
raise Exception('Environment variable CERTBOT_VALIDATION is empty.')
try:
logger.info('Start setting DNS')
logger.info('Domain:' + domain)
logger.info('Value:' + value)
# aliyunDns.toString()
# add letsencrypt domain record
aliyunDns.addLetsencryptDomainRecord(domain, value)
logger.debug("addDomainRecord()")
# wait for completion
logger.info('sleep 10 secs')
time.sleep(10)
logger.info('Success.')
logger.info('DNS setting end!')
except urllib.error.HTTPError as e:
logger.error(e)
sys.exit(1)
except Exception as e:
logger.error(e)
sys.exit(1)
def cleanup(aliyunDns):
domain = os.environ.get('CERTBOT_DOMAIN')
if domain is None:
raise Exception('Environment variable CERTBOT_DOMAIN is empty.')
try:
logger.info('Start to clean up')
logger.info('Domain:' + domain)
# aliyunDns.toString()
# delete letsencrypt domain record
aliyunDns.deleteLetsencryptDomainRecord(domain)
logger.info('Success.')
logger.info('Clean up end!')
except Exception as e:
logger.error(e)
sys.exit(1)
Usage="""\
Usage: {} <auth|cleanup> <appid> <secretkey>
And: set environment CERTBOT_DOMAIN CERTBOT_VALIDATION
""".format(sys.argv[0])
def main():
if len(sys.argv) == 1:
print(Usage)
sys.exit(1)
if len(sys.argv) == 4:
if "auth" == sys.argv[1] or "cleanup" == sys.argv[1]:
appid = sys.argv[2]
secretkey = sys.argv[3]
else:
logger.error(Usage)
sys.exit(1)
else:
logger.error("Usage: {} <auth|cleanup> <appid> <secretkey>".format(sys.argv[0]))
sys.exit(1)
if sys.argv[1] == "auth":
auth(AliyunDns(appid, secretkey))
elif sys.argv[1] == "cleanup":
cleanup(AliyunDns(appid, secretkey))
else:
logger.error(Usage)
if __name__ == '__main__':
main()
|
[
"calllivecn@outlook.com"
] |
calllivecn@outlook.com
|
f4ee36d85f337be493ffa614eb246403c3fd37ca
|
2bd4392a0929bf294df65bf45338d62e22474a25
|
/expenses/utils.py
|
07e29fb03ca4ec007e93ca6c77e29ab631a28c23
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
DjangoGworls/django-expenses
|
c5c7825017884be1bd53d5d19ee15acfb7bafbbd
|
60f2c20c21a9f01d7efa169b501e3beb361795d1
|
refs/heads/master
| 2023-01-30T01:20:45.723489
| 2020-11-07T11:13:03
| 2020-11-07T11:13:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
# Django-Expenses
# Copyright © 2018-2020, Chris Warrick.
# All rights reserved.
# See /LICENSE for licensing information.
"""Assorted Expenses utilities."""
import babel.numbers
import datetime
import decimal
import iso8601
import itertools
import typing
from django.utils import timezone
from django.conf import settings
from django.utils.translation import get_language
def format_money(amount: typing.Union[int, float, decimal.Decimal]) -> str:
"""Format an amount of money for display."""
if amount is None:
amount = 0
return babel.numbers.format_currency(
amount, settings.EXPENSES_CURRENCY_CODE, locale=settings.EXPENSES_CURRENCY_LOCALE
)
def today_date() -> datetime.date:
"""Get today’s date."""
return timezone.now().date()
def revchron(qs):
"""Sort expenses in reverse-chronological order."""
return qs.order_by("-date", "-date_added")
def round_money(amount: decimal.Decimal) -> decimal.Decimal:
"""Round money in a way appropriate for money."""
return amount.quantize(decimal.Decimal(".01"), rounding=decimal.ROUND_HALF_UP)
def dict_overwrite(destdict: dict, destkey, srcdict: dict, srckey=None) -> None:
"""Override a dict key with one taken from another dict."""
destdict[destkey] = srcdict.get(srckey or destkey, destdict[destkey])
def serialize_date(date: datetime.date) -> str:
"""Serialize a datetime."""
return date.isoformat()
def serialize_dt(dt: datetime.datetime) -> str:
"""Serialize a datetime."""
return dt.isoformat()
def serialize_decimal(amount: decimal.Decimal) -> str:
"""Serialize a decimal value."""
return str(amount)
def parse_date(date_str: str) -> datetime.date:
"""Parse an ISO 8601 date."""
return iso8601.parse_date(date_str).date()
def parse_dt(dt_str: str) -> datetime.datetime:
"""Parse an ISO 8601 datetime."""
return iso8601.parse_date(dt_str)
def parse_decimal(amount_str: str) -> decimal.Decimal:
"""Parse a decimal object."""
return decimal.Decimal(amount_str)
def parse_amount_input(amount_str: str) -> typing.Optional[decimal.Decimal]:
"""Parse an amount in a human-forgiving way."""
try:
return decimal.Decimal(amount_str)
except decimal.InvalidOperation:
try:
return decimal.Decimal(amount_str.replace(",", "."))
except ValueError:
return None
def get_babel_locale() -> str:
"""Get a babel-friendly locale name."""
lang, _, region = get_language().partition("-")
if not region:
region = lang.upper()
return f"{lang}_{region.upper()}"
T = typing.TypeVar("T")
def peek(iterable: typing.Iterable[T]) -> (T, typing.Iterable[T]):
"""Peek at the first row of an iterable.
Returns (first row, iterable with first row)."""
iterator = iter(iterable)
try:
first_row = next(iterator)
except StopIteration:
return None, None
return first_row, itertools.chain([first_row], iterator)
|
[
"kwpolska@gmail.com"
] |
kwpolska@gmail.com
|
0e409bfcafd82eb943b72bb7ba60857882323af3
|
4273fc9019cf3f85066614ff6ac62d2fe44cd6fc
|
/solutions/oop/old/shapescriptparser.py
|
a240df4b8741d27c8a008bd9e361eac40bfc4cad
|
[] |
no_license
|
jkh-code/data_science
|
c03cf23004a6e9e773109fdc0d56e94350ad8aad
|
d4d37bac4f2eb5b96593600a0c39f90c7521b434
|
refs/heads/main
| 2023-07-12T22:08:38.620029
| 2021-08-14T19:12:05
| 2021-08-14T19:12:05
| 395,725,301
| 0
| 0
| null | 2021-08-13T16:44:08
| 2021-08-13T16:44:07
| null |
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from shapes import Circle, Rectangle
from operators import UnionShape, IntersectionShape, DiffShape
from collections import OrderedDict
class ShapeScriptParser():
"""A parser for a scripting language
that would generate shapes (as objects defined in shapes.py)
and display the result on screen (using SimpleGUI).
Parameters
----------
gui : {SimpleGUI} a gui object for drawing pixels.
Attributes
----------
gui : {SimpleGUI} object for drawing shapes
shape_register : {OrderedDict} of shapes to draw
"""
def __init__(self, gui):
"""Instantiates the parser, gui is provided for drawing.
"""
self.gui = gui
self.shape_register = OrderedDict()
def parse(self, filepath):
"""Parses a file line by line using parse_line().
Parameters
----------
filename : {str} the path to the file
Returns
-------
None
"""
self.shape_register = OrderedDict()
with open(filepath, 'r') as sfile:
for line in sfile:
self.parse_line(line.strip())
def parse_line(self, line):
"""Parses one line of the scripting language.
Creates the corresponding object.
Parameters
----------
line : {str} the line to parse
Returns
-------
None
"""
args = line.split(',')
key = args[0]
if args[1] == 'circle':
x, y, rad, r, g, b = map(int,args[2:])
self.shape_register[key] = Circle(x,y,rad,(r,g,b))
elif args[1] == 'rectangle':
x1, y1, x2, y2, r, g, b = map(int,args[2:])
self.shape_register[key] = Rectangle(x1,y1,x2,y2,(r,g,b))
elif args[1] in {'union', 'intersection', 'difference'}:
keyA, keyB = args[2:]
if not (keyA in self.shape_register and keyB in self.shape_register):
print('error: shape {} or {} do not exist'.format(keyA,keyB))
if args[1] == 'union':
self.shape_register[key] = UnionShape(self.shape_register[keyA],
self.shape_register[keyB])
if args[1] == 'intersection':
self.shape_register[key] = IntersectionShape(self.shape_register[keyA],
self.shape_register[keyB])
if args[1] == 'difference':
self.shape_register[key] = DiffShape(self.shape_register[keyA],
self.shape_register[keyB])
else:
print('error: shape {} unknown'.format(args[1]))
def draw(self):
"""Draws objects that have been created previously
with parse() or parse_line().
Parameters
----------
None
Returns
-------
None
"""
self.gui.draw(self.shape_register.values())
|
[
"duhaimejosiah@gmail.com"
] |
duhaimejosiah@gmail.com
|
cfb405b18bd216514a3de5103fa8163a50939d5d
|
5b0ad97cb6924e487c0921965dacb3dd18f19a1a
|
/src/ImageCodec/CanonicalHuffman.py
|
0a8db2c0f411f4fcc36123282293d4ad5ad153ae
|
[] |
no_license
|
sinoyou/ImageProcess_and_PatternRecognition
|
bcde442b00a86f3dcb6c49f5f1e2857b48c41ad5
|
dffee2c21ac789066a3f4ceddf69172e417f84e7
|
refs/heads/master
| 2023-01-28T04:24:10.700353
| 2020-11-30T05:35:55
| 2020-11-30T05:35:55
| 247,293,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,762
|
py
|
class CanonicalHuffman:
def __init__(self, signs, sign_bit_width):
self.signs = signs
self.sign_bit_width = sign_bit_width
self.vanilla_huffman_table = dict()
self.canonical_huffman_table = dict()
# get vanilla huffman dict
self.vanilla_huffman_table = self.build_vanilla_table()
# get canonical huffman dict
self.canonical_huffman_table = self.build_canonical_table()
def build_vanilla_table(self):
"""
Build vanilla table with two fork tree.
:return: dict()
"""
# frequency analysis
frequency = dict()
for s in self.signs:
frequency.setdefault(s, 0)
frequency[s] += 1
# run Huffman 2-fork trees generation algorithms.
# tree node format (cost, left_index, right_index, sign(only for leaf nodes))
nodes = list()
trees = list()
p = 0
# initial
for k, v in frequency.items():
node = (v, None, None, k)
nodes.insert(p, node)
trees.append(node)
p += 1
# combine
while len(trees) > 1:
trees = sorted(trees, key=lambda x: x[0])
a = trees.pop(0)
b = trees.pop(0)
new_node = (a[0] + b[0], a, b, None)
trees.append(new_node)
# generate vanilla huffman dictionary
vanilla_dict = dict()
root = trees[0]
def dfs(node, string):
if node[1] is None and node[2] is None:
vanilla_dict[node[3]] = string
elif node[1] is None or node[2] is None:
raise Exception('Middle node should have 2 sons in huffman trees.')
else:
dfs(node[1], string + '0')
dfs(node[2], string + '1')
dfs(root, '')
return vanilla_dict
def build_canonical_table(self):
"""
Build canonical table based on built vanilla table.
Canonical Huffman Coding enable saving files without binary codes.
:return: dict()
"""
vanilla_items = sorted(self.vanilla_huffman_table.items(), key=lambda x: len(x[1]))
canonical_table = dict()
count = 0
pre_length = 0
pre_value = 0
for k, v in vanilla_items:
count += 1
# rule 1: first binary code must be zero with same lenth
if count == 1:
t = ''
pre_length = len(v)
pre_value = 0
canonical_table[k] = '0'.zfill(len(v))
# rule 2: binary code with same length as previous one will be +1.
else:
if pre_length == len(v):
canonical_table[k] = '{:b}'.format(pre_value + 1).zfill(len(v))
pre_value += 1
# rule:binary code with larger length, besides + 1, then << (new_len - old_len)
else:
canonical_table[k] = '{:b}'.format((pre_value + 1) << (len(v) - pre_length)).zfill(len(v))
pre_value = (pre_value + 1) << (len(v) - pre_length)
pre_length = len(v)
return canonical_table
def get_table(self, table='canonical'):
if table == 'canonical':
return self.canonical_huffman_table
elif table == 'vanilla':
return self.vanilla_huffman_table
else:
raise Exception('No such huffman table type = {}'.format(table))
def get_canonical_table_size(self):
size = 0
# 范式Huffman编码无需保存信号Sign所对应的二进制串,仅需要按顺序保存 <Sign, 对应二进制串长度>
# sign
size += len(self.canonical_huffman_table.keys()) * self.sign_bit_width
# length, 在最坏情况下,二进制串的最大长度是符号所取得的最大值(huffman树是一个串),因此二进制串最大长度为sign_bit_width
size += len(self.canonical_huffman_table.keys()) * self.sign_bit_width
return size
def encode_sign(self, sign, table='canonical'):
if sign not in self.vanilla_huffman_table.keys():
raise Exception('No such sign = {} in table. '.format(sign))
if table == 'canonical':
return self.canonical_huffman_table[sign]
elif table == 'vanilla':
return self.vanilla_huffman_table[sign]
else:
raise Exception('No such table type = {}'.format(table))
if __name__ == '__main__':
arrays = [1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 5]
huffman = CanonicalHuffman(arrays, sign_bit_width=4)
print(huffman.get_table(table='vanilla'))
print(huffman.get_table(table='canonical'))
|
[
"youzn99@qq.com"
] |
youzn99@qq.com
|
018b478deaa34ef7036f428aa0a5ce8e3ee99722
|
7f3112bd1cb6d5831370f01db1bf4ef7b9d6aee6
|
/selenium/test_search_in_python_org_search.py
|
43a2016183284cf053b611255f753820858169f3
|
[] |
no_license
|
insta-code1/Python-Unittests
|
f8a2138ae457756d8897594eaa2745a40f908a7e
|
84d62edce5b929b1822d4d7a92c7edf3003ddf07
|
refs/heads/master
| 2020-12-25T14:59:08.705048
| 2016-09-04T12:11:22
| 2016-09-04T12:11:22
| 67,342,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def test_search_in_python_org(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem = driver.find_element_by_name("q")
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"eoinkane1@gmail.com"
] |
eoinkane1@gmail.com
|
3bb45f868041fdd2eef7e1579e0956513f0ae960
|
7f622971d347057bdfea90d84f6c64e4fdbee418
|
/news_crawl/guardian/middlewares.py
|
825f870d12cf8394663bd0ca28a00ea7c57d941d
|
[] |
no_license
|
beharasatya/News_Crawler_Guardian
|
a29730c812562572328a40a2266bc584db9946b3
|
bbd1d96989f03e49a6befdd5ac9589c0f92da648
|
refs/heads/master
| 2021-09-04T18:51:51.480775
| 2018-01-21T09:38:48
| 2018-01-21T09:38:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class GuardianSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"noreply@github.com"
] |
beharasatya.noreply@github.com
|
4bac660cb1c22d45ca836f60605cb5cb8d27d99b
|
931812bda84afbd7823450eaf833deb35cc849f3
|
/chainladder/methods/base.py
|
5f44a384e053ebc3220cfe7e644ca8a6f98a35bf
|
[
"MIT"
] |
permissive
|
jiayiderekchen/chainladder-python
|
43164a3f546d60c28da427c5e0301ca336dac62c
|
c7d3f4f0a5333b6bd34922cc406f252ab9c47e10
|
refs/heads/master
| 2020-09-04T17:15:07.885237
| 2019-07-25T01:54:49
| 2019-07-25T01:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,061
|
py
|
import numpy as np
import copy
from sklearn.base import BaseEstimator
from chainladder.tails import TailConstant
from chainladder.development import Development
from chainladder.core import IO
class MethodBase(BaseEstimator, IO):
def __init__(self):
pass
def validate_X(self, X):
obj = copy.copy(X)
if 'ldf_' not in obj:
obj = Development().fit_transform(obj)
if len(obj.ddims) - len(obj.ldf_.ddims) == 1:
obj = TailConstant().fit_transform(obj)
for item in ['cdf_', 'ldf_', 'average_']:
setattr(self, item, getattr(obj, item, None))
return obj
def fit(self, X, y=None, sample_weight=None):
"""Applies the chainladder technique to triangle **X**
Parameters
----------
X : Triangle
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self.X_ = self.validate_X(X)
return self
def predict(self, X, sample_weight=None):
"""Predicts the chainladder ultimate on a new triangle **X**
Parameters
----------
X : Triangle
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
sample_weight : Triangle
For exposure-based methods, the exposure to be used for predictions
Returns
-------
X_new: Triangle
"""
obj = copy.copy(self)
obj.X_ = copy.copy(X)
obj.sample_weight = sample_weight
if np.unique(self.cdf_.values, axis=-2).shape[-2] == 1:
obj.cdf_.values = np.repeat(
np.unique(self.cdf_.values, axis=-2),
len(X.odims), -2)
obj.ldf_.values = np.repeat(
np.unique(self.ldf_.values, axis=-2),
len(X.odims), -2)
obj.cdf_.odims = obj.ldf_.odims = obj.X_.odims
obj.cdf_.valuation = obj.ldf_.valuation = \
Development().fit(X).cdf_.valuation
obj.cdf_.set_slicers()
obj.ldf_.set_slicers()
return obj
@property
def full_expectation_(self):
obj = copy.copy(self.X_)
obj.values = (self.ultimate_.values /
np.unique(self.cdf_.values, axis=-2))
obj.values = np.concatenate((obj.values,
self.ultimate_.values), -1)
ddims = [int(item[item.find('-')+1:]) for item in self.cdf_.ddims]
obj.ddims = np.array([obj.ddims[0]]+ddims)
obj.valuation = obj._valuation_triangle(obj.ddims)
obj.nan_override = True
obj.set_slicers()
return obj
def ultimate_(self):
raise NotImplementedError
@property
def ibnr_(self):
obj = copy.copy(self.ultimate_)
obj.values = self.ultimate_.values-self.X_.latest_diagonal.values
obj.ddims = ['IBNR']
obj.set_slicers()
return obj
def _get_full_triangle_(self):
obj = copy.copy(self.X_)
w = 1-np.nan_to_num(obj.nan_triangle())
extend = len(self.ldf_.ddims) - len(self.X_.ddims)
ones = np.ones((w.shape[-2], extend))
w = np.concatenate((w, ones), -1)
obj.nan_override = True
e_tri = \
np.repeat(self.ultimate_.values, self.cdf_.values.shape[3], 3) / \
np.unique(self.cdf_.values, axis=-2)
e_tri = e_tri * w
zeros = obj.expand_dims(ones - ones)
properties = self.full_expectation_
obj.valuation = properties.valuation
obj.ddims = properties.ddims
obj.values = \
np.concatenate((np.nan_to_num(obj.values), zeros), -1) + e_tri
obj.values = np.concatenate((obj.values,
self.ultimate_.values), 3)
obj.set_slicers()
return obj
|
[
"jbogaardt@gmail.com"
] |
jbogaardt@gmail.com
|
6bcac4d4c723244550d2e9fabe534d8c2f66cfab
|
4d24bafa0f9ed31a566774bdc055cff52057d13b
|
/Ngram_model.py
|
54fc4519e6d53a70f7905714d626a7207ea2d329
|
[] |
no_license
|
saviono/DH-202-Languague-Model
|
c00c118555dcfe0a47e0733fa818988bfdba57de
|
6d7856c43a0631f119b67499d668c994c7f30a37
|
refs/heads/master
| 2022-12-14T10:17:28.113851
| 2020-09-03T12:15:23
| 2020-09-03T12:15:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,241
|
py
|
import json
import os
from nltk import trigrams
from collections import defaultdict
import random
import settings
import pandas as pd
def create_trigram_model():
path = settings.ngram_dir
if not os.path.isdir(path):
os.mkdir(path)
if os.path.exists(settings.ngram_model):
return
print('----- Analyzing Trigram Model ------')
songs_list_as_tuples = []
df = pd.read_csv(settings.csv_file_name, encoding="utf-8-sig")
df = df[df['lyrics'].notna()]
# df = df[(df['year'] >= 80) & (df['year'] <= 80)]
# Convert lyrics to a list of words, while '\n' is a word too
for index, row in df.iterrows():
song_lines_list = row['lyrics'].split('\n')
for i, line in enumerate(song_lines_list):
song_lines_list[i] = line + ' \n'
song_lines_list = ' '.join(song_lines_list)
song_lines_list = song_lines_list.split(' ')
song_as_words = list(filter(''.__ne__, song_lines_list))
song_as_words = (song_as_words,row['weeks'])
songs_list_as_tuples.append(song_as_words)
# Create a placeholder for model
model = defaultdict(lambda: defaultdict(lambda: 0))
# Count frequency of co-occurance
for song in songs_list_as_tuples:
for w1, w2, w3 in trigrams(['~~~','~~~']+song[0]+['^^^','^^^']):
model[w1+'@'+w2][w3] += song[1]
# Let's transform the counts to probabilities
for key in model:
total_count = float(sum(model[key].values()))
for w3 in model[key]:
model[key][w3] /= total_count
print('----- Storing Data ------')
with open(settings.ngram_model, 'w') as json_file:
json.dump(model, json_file, indent=4)
json_file.seek(0)
print('----- DONE ------')
def generate_song(model):
text_temp = ['~~~','~~~']
text = ['~~~','~~~']
sentence_finished = False
num_of_words = 700
while not sentence_finished:
# select a random probability threshold
if len(text_temp) > num_of_words:
break
r = random.random()
accumulator = .0
for word in model['@'.join(text_temp[-2:])].keys():
accumulator += model['@'.join(text_temp[-2:])][word]
# select words that are above the probability threshold
if accumulator >= r:
counter = word.count('%')
if counter > 0:
pres = '%'*counter
start = int('1'+('0'*(counter-1)))
end = int('9'+('0'*(counter-1)))
new_word = word.replace(pres, str(random.randint(start, end)))
text.append(new_word)
text_temp.append(word)
else:
text.append(word)
text_temp.append(word)
break
if text_temp[-2:] == ['^^^', '^^^']:
sentence_finished = True
# Generate a song as list of words & handle '/n'
song_as_words = [t for t in text if t]
for i in range (0, len(song_as_words)-1):
if song_as_words[i]=='\n':
song_as_words[i+1]='\n'+song_as_words[i+1]
song_as_words = list(filter('\n'.__ne__, song_as_words))
for i in range(0, len(song_as_words) - 1):
if song_as_words[i] == '\n\n':
song_as_words[i + 1] = '\n\n' + song_as_words[i + 1]
song_as_words = list(filter('\n\n'.__ne__, song_as_words))
song = ' '.join(song_as_words)
song = song[8:-8]
return song
# Generate song with fix statistics
# # Calculate Statistics
# with open(settings.statistics_json, 'r') as json_file:
# statistics = json.load(json_file)
# json_file.seek(0)
#
# words_per_line = statistics['average_words_per_line']
# lines_per_stanza = statistics['average_lines_per_stanza']
#
# # split lines every 'words_per_line' number
# song = song.split()
# song = '\n'.join([' '.join(song[i:i + words_per_line]) for i in range(0, len(song), words_per_line)])
# # split lines to stanzas
# song = song.split('\n')
# song = '\n\n'.join(['\n'.join(song[i:i + lines_per_stanza]) for i in range(0, len(song), lines_per_stanza)])
# # return song
# return song
|
[
"savion.o@gmail.com"
] |
savion.o@gmail.com
|
d4af463e395f99b77dfd5bb09f2291a4f5e5cd4c
|
85c8d2290b33c26fa73f0d3901f8f6dea6d907c1
|
/Crear_Muchos_Rectangulos.py
|
9a0fd7cdcf2c23d3f9a1f2727595cef0e1580239
|
[] |
no_license
|
cristianpl1996/Pygame
|
2e6fd8b0700643f8e9a830fd724b2f884f4dca71
|
8c892335b7467d38d8bcece972850e2a9adda0c1
|
refs/heads/master
| 2020-03-25T12:46:46.851347
| 2018-07-29T19:16:29
| 2018-07-29T19:16:29
| 143,793,091
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import pygame, sys, random
from pygame.locals import *
def main():
pygame.init()
pantalla = pygame.display.set_mode((480,500))
pygame.display.set_caption("Crear Muchos Rectangulos")
reloj=pygame.time.Clock()
listarec=[]
for x in xrange(15):
w=random.randrange(15,45)
h=random.randrange(20,60)
x=random.randrange(450)
y=random.randrange(450)
listarec.append(pygame.Rect(x,y,w,h))
while True:
for evento in pygame.event.get():
if evento.type == QUIT:
pygame.quit()
sys.exit()
reloj.tick(20)
pantalla.fill((0,0,0))
for recs in listarec:
pygame.draw.rect(pantalla,(0,0,255),recs)
pygame.display.update()
main()
|
[
"c.patino@utp.edu.co"
] |
c.patino@utp.edu.co
|
fcdcbb3dc629b15123b24d968288cd160f889ebb
|
b2cc8bb4c41b58ee039ed0d8518089029203b3e7
|
/WebFrame/WebFrame.py
|
adbd18da71524c75689d4b346235ceaad012cdef
|
[] |
no_license
|
bigtaylor1989/HttPServer
|
75ebc1b1320e2024a38a93db5a12e9b046ffccb7
|
0824767f93465b41b126170bdc606f426805b712
|
refs/heads/master
| 2020-03-30T06:25:35.572693
| 2018-09-29T12:14:01
| 2018-09-29T12:14:01
| 150,860,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
#coding=utf-8
from socket import *
from setting import *
import time
from urls import *
from views import *
class Application(object):
def __init__(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.sockfd.bind(frame_addr)
def start(self):
self.sockfd.listen(5)
while True:
connfd,addr = self.sockfd.accept()
#接收请求方法
method = connfd.recv(128).decode()
#接收请求内容
path = connfd.recv(128).decode()
if method == 'GET':
if path == '/' or path[-5:] == '.html':
status,response_body = self.get_html(path)
else:
status,response_body = self.get_data(path)
#将结果给httpserver
connfd.send(status.encode())
time.sleep(0.1)
connfd.send(response_body.encode())
elif method == 'POST':
pass
def get_html(self,path):
if path == '/':
get_file = STATIC_DIR + '/index.html'
else:
get_file = STATIC_DIR + path
try:
f = open(get_file)
except IOError:
response = ('404','===Sorry not found the page===')
else:
response = ('200',f.read())
finally:
return response
def get_data(self,path):
for url,handler in urls:
if path == url:
response_body = handler()
return '200',response_body
return '404','Sorry,Not found the data'
if __name__ == '__main__':
app = Application()
app.start() #启动框架等待request
|
[
"Lvze@tedu.cn"
] |
Lvze@tedu.cn
|
90a6b0bf6ba220f42d8a29c89dc93492396116ff
|
350f0a5e56c83b4db157fe06137e929ab0b07f75
|
/models/tf_Cifar_OC_NN_Models.py
|
ad5660b15bfd3541fd0a08df12d27286fc2d736d
|
[] |
no_license
|
LiTangqing/Cleaned-OC-NN
|
4869e2db22fae4ce9f53e296b020ac945904a617
|
4c814626f69225215d27f11e3e316a7e7b299199
|
refs/heads/master
| 2020-04-05T03:33:38.447519
| 2018-11-07T09:56:51
| 2018-11-07T09:56:51
| 156,519,328
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,546
|
py
|
import time
import csv
from itertools import zip_longest
import matplotlib as plt
import tensorflow as tf
import numpy as np
import os
RANDOM_SEED = 42
g = lambda x : 1/(1 + tf.exp(-x))
def nnScore(X, w, V, g):
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = x
y[y < 0] = 0
return y
def write_decisionScores2Csv(path, filename, positiveScores, negativeScores):
newfilePath = path+filename
print ("Writing file to ", path+filename)
poslist = positiveScores
neglist = negativeScores
# rows = zip(poslist, neglist)
d = [poslist, neglist]
export_data = zip_longest(*d, fillvalue='')
with open(newfilePath, 'w') as myfile:
wr = csv.writer(myfile)
wr.writerow(("Normal", "Anomaly"))
wr.writerows(export_data)
myfile.close()
return
def tf_OneClass_NN_linear(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
tf.set_random_seed(RANDOM_SEED)
train_X = data_train
x_size = train_X.shape[1]
print ("Input Shape:",x_size)
h_size = 16
y_size = 1
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = (tf.matmul(X, w_1)) #
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : x
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu1(x):
y = x
y = tf.nn.relu(x)
return y
def relu(x):
with sess.as_default():
x = x.eval()
y = x
y[y < 0] = 0
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(tf.nn.relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.AdamOptimizer(0.05).minimize(cost)
# Run optimization routine after initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
rvalue = nnScore(train_X, w_1, w_2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue, q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
### Get the optimized weights here
start_time = time.time()
train = nnScore(train_X, w_1, w_2, g)
test = nnScore(test_X, w_1, w_2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
#pos_decisionScore[pos_decisionScore < 0] = 0 # why this?
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_linear_cifar.csv',
pos_decisionScore, neg_decisionScore)
# write_decisionScores2Csv(decision_scorePath, "OneClass_NN_linear.csv", pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_sigmoid(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
sess = tf.Session()
train_X = data_train
tf.set_random_seed(RANDOM_SEED)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print ("Input Shape:", x_size)
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
# def getActivations(layer, stimuli):
# units = sess.run(layer, feed_dict={x: np.reshape(stimuli, [1, 784], order='F'), keep_prob: 1.0})
# plotNNFilter(units)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : 1/(1 + tf.exp(-x))
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def data_rep(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
return g((tf.matmul(X, w)))
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
test_X = data_test
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
units = sess.run(updates, feed_dict={X: train_X,r:rvalue})
# plotNNFilter(units)
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
start_time = time.time()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
train_rep = data_rep(train_X, w1, w2, g)
test_rep = data_rep(test_X, w1, w2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
arraytrain_rep =train_rep.eval()
arraytest_rep= test_rep.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
# pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_sigmoid_cifar.csv',
pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_relu(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(RANDOM_SEED)
train_X = data_train
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print ("Input Shape:", x_size)
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : relu(x)
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
start_time = time.time()
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
start_time = time.time()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
# pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_sigmoid_relu.csv',
pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore, trainTime, testTime]
|
[
"noreply@github.com"
] |
LiTangqing.noreply@github.com
|
86c5a6de9d1a8c89705509b6f943d1dc8e51a68a
|
d64c8ada6b6f54662fd4846ad4d1eea51e01fe96
|
/dmlab_app/task/regression/knn_regressor.py
|
22ea93eb335068cd12da29d16a3a1157648c9600
|
[] |
no_license
|
lxk1997/dm-lab
|
be4a9ed156f5d0ec23b93c1049164c34e6b6188c
|
a8957f28bbe4875391977059032b4476e0eb9319
|
refs/heads/master
| 2023-07-14T23:53:05.740062
| 2021-09-05T12:52:24
| 2021-09-05T12:52:24
| 403,288,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,647
|
py
|
# coding=utf-8
import json
import logging
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from flask import url_for
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from ..base import Base
from ..dataset_utils import DatasetUtils
from ...db import get_db
from ...db.dao.evaluation import Evaluation
from ...db.dao.evaluation_file import EvaluationFile
from ...db.models import EvaluationFileModel
from ...extensions import get_file_client
from ...filesystem import get_fs
from ...utils import NpEncoder, numeric
matplotlib.use('Agg')
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class KNNRegressor(Base):
component_id = 1
menu_info_names = [{'name': "重命名"},
{'name': '删除'},
{'name': '全部运行'},
{'name': '运行到此处'},
{'name': '运行该节点'},
{'name': '从此节点运行'},
{'name': '查看数据'},
{'name': '查看日志'},
{'name': '查看报告'}]
evaluation_info_names = [{'name': 'data', 'type': 'data'},
{'name': 'log', 'type': 'text'},
{'name': 'report', 'type': 'general'}]
def _get_evaluation_dir(self, evaluation_id):
return 'evaluations/%s' % evaluation_id
def _check_valid_params(self, logger, params=None):
if not params:
logger.exception('params is None')
return False
elif not params.get('parent_id'):
logger.exception('params has no attribute name "parent_id"')
return False
elif not params.get('selected_columns'):
logger.exception('params has no attribute name "selected_columns"')
return False
elif not params.get('target_column'):
logger.exception('params has no attribute name "target_column"')
return False
else:
return True
def execute(self, evaluation_id=None, params=None, item_id=None):
fs = get_fs()
file_client = get_file_client()
evaluation_dir = self._get_evaluation_dir(item_id)
evaluation_output_dir = fs.join(evaluation_dir, 'outputs')
if fs.isdir(fs.join(evaluation_dir, 'outputs')):
fs.rmtree(fs.join(evaluation_dir, 'outputs'))
fs.makedirs(evaluation_output_dir)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log_path = fs.abs_path(fs.join(evaluation_output_dir, 'evaluation.log'))
result_path = fs.join(evaluation_output_dir, 'result.json')
data_path = fs.join(evaluation_output_dir, 'data.json')
overview_png = fs.join(evaluation_output_dir, 'overview.png')
fh = logging.FileHandler(log_path)
fh.setFormatter(formatter)
logger.addHandler(fh)
success = self._check_valid_params(logger, params)
selected_columns = params['selected_columns']
target_column = params['target_column']
if success:
evaluation = Evaluation().query(item_id=params['parent_id'])[0]
evaluation_file = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/data.json')
if evaluation_file:
parent_data_content = file_client.download(evaluation_file[0]['file_key'])
data_content = json.loads(parent_data_content)
try:
feature_rsts = self._get_feature(data_content, selected_columns)
target_rsts = self._get_target(data_content, target_column)
feature_train, feature_test, target_train, target_test = train_test_split(feature_rsts['content'],
target_rsts['content'],
test_size=0.33,
random_state=42)
dt_model = KNeighborsRegressor(n_neighbors=numeric(params['n_neighbors']),
weights=numeric(params['weights']),
p=numeric(params['p']),
algorithm=numeric(params['algorithm']),
leaf_size=numeric(params['leaf_size']),
metric=numeric(params['metric']))
dt_model.fit(feature_train, target_train)
prediction_test = dt_model.predict(feature_test)
mse = metrics.mean_squared_error(target_test, prediction_test)
rmse = np.sqrt(metrics.mean_squared_error(target_test, prediction_test))
mae = metrics.mean_absolute_error(target_test, prediction_test)
evs = metrics.explained_variance_score(target_test, prediction_test)
rsquared = metrics.r2_score(target_test, prediction_test)
json_data = {
'mse': mse,
'rmse': rmse,
'mae': mae,
'evs': evs,
'rsquared': rsquared,
'num_gt': len(target_train)
}
with fs.open(result_path, 'w') as fout:
json.dump(json_data, fout, indent=2, ensure_ascii=False)
merge_content = []
for idx, content in enumerate(feature_test):
new_content = content + [target_test[idx], prediction_test[idx]]
merge_content.append(new_content)
data = {'headers': feature_rsts['headers'] + [target_rsts['header'], 'pred'],
'content': merge_content}
with fs.open(data_path, 'w') as fout:
json.dump(data, fout, indent=2, cls=NpEncoder, ensure_ascii=False)
X_label = [str(i) for i in range(len(feature_test))]
plt.figure(figsize=(6.0, 4.0))
plt.style.use('ggplot')
plt.plot(X_label, target_test, marker='.', label='Groundtruth')
plt.plot(X_label, prediction_test, marker='.', alpha=0.7, label='Predict')
if len(feature_test) > 10 and (len(feature_test) - 1) % 10 < 5:
plt.xticks(np.linspace(0, np.ceil(len(feature_test) / 5) * 5 - 1, 5))
elif len(feature_test) > 10 and (len(feature_test) - 1) % 10 > 5:
plt.xticks(np.linspace(0, np.ceil(len(feature_test) / 10) * 10 - 1, 10))
else:
plt.xticks(np.linspace(0, len(feature_test) - 1, len(feature_test)))
plt.legend(loc='lower right', borderaxespad=0., fontsize='xx-small')
plt.title('Fitting Model(len=%d)' % len(feature_test))
plt.xlabel('Index')
plt.ylabel('Target')
plt.tight_layout()
plt.savefig(fs.abs_path(overview_png), bbox_inches='tight')
except Exception as e:
logger.exception(e)
success = False
else:
logger.exception(Exception('parent %s has no data.' % params['parent_id']))
success = False
logger.removeHandler(fh)
db = get_db()
try:
collection = file_client.get_collection()
file_paths = list()
for dirpath, dirnames, filenames in os.walk(fs.abs_path(evaluation_dir)):
for filename in filenames:
file_path = os.path.join(dirpath, filename)
r_path = os.path.relpath(file_path, fs.abs_path(evaluation_dir))
with open(file_path, 'rb') as fin:
collection.add(fin.read())
file_paths.append(r_path)
rets = file_client.upload_collection(collection)
for idx, ret in enumerate(rets):
evaluation_file = EvaluationFileModel(evaluation_id=evaluation_id,
file_path=file_paths[idx], file_key=ret.id, deleted=0)
db.add(evaluation_file)
collection.close()
# os.remove(fs.abs_path(evaluation_dir))
db.commit()
except:
db.rollback()
finally:
db.close()
return success
def get_evaluation_info_list(self, item_id, info_name, config=None, limit=None, offset=None):
assert info_name in map(lambda info_type: info_type['name'], self.evaluation_info_names)
if info_name == 'data':
return self._get_evaluation_data(item_id, limit=limit, offset=offset)
elif info_name == 'log':
return self._get_evaluation_log(item_id, limit=limit, offset=offset)
elif info_name == 'report':
return self._get_evaluation_report(item_id, limit=limit, offset=offset)
else:
raise NotImplementedError
def _get_evaluation_data(self, item_id, limit=None, offset=None):
file_client = get_file_client()
evaluation = Evaluation().query(item_id=item_id)[0]
evaluation_file = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/data.json')
if evaluation_file:
data_content = file_client.download(evaluation_file[0]['file_key'])
datas = [{
'id': 1,
'name': 'data',
'type': 'json_str',
'data': str(data_content, encoding='utf-8')
}]
else:
datas = []
count = len(datas)
if limit is None:
limit = len(datas)
else:
limit = int(limit)
if offset is None:
offset = 0
else:
offset = int(offset)
return datas[offset:offset + limit], count, None
def _get_evaluation_log(self, item_id, limit=None, offset=None):
file_client = get_file_client()
evaluation = Evaluation().query(item_id=item_id)[0]
evaluation_file = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/evaluation.log')
if evaluation_file:
log_content = file_client.download(evaluation_file[0]['file_key'])
logs = [{
'id': 1,
'name': 'evaluation.log',
'type': 'text',
'data': str(log_content, encoding='utf-8')
}]
else:
logs = []
count = len(logs)
if limit is None:
limit = len(logs)
else:
limit = int(limit)
if offset is None:
offset = 0
else:
offset = int(offset)
return logs[offset:offset + limit], count, None
def _get_evaluation_report(self, item_id, limit=None, offset=None):
fs = get_fs()
file_client = get_file_client()
cur = 1
reports = []
evaluation = Evaluation().query(item_id=item_id)[0]
report = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'], file_path='outputs/result.json')
overview = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/overview.png')
if report:
report_content = file_client.download(report[0]['file_key'])
json_report = json.loads(report_content)
datau = DatasetUtils()
datau.set_header(['MSE', 'RMSE', 'MAE', 'EVS', 'R-Squared', 'Num GT'])
datau.set_content([[round(json_report['mse'], 4), round(json_report['rmse'], 4),
round(json_report['mae'], 4), round(json_report['evs'], 4),
round(json_report['rsquared'], 4), json_report['num_gt']]])
reports.append({
'id': cur,
'name': 'Total',
'type': 'table',
'data': datau.format_dict()
})
cur += 1
if overview:
reports.append({
'id': cur,
'name': 'Overview',
'type': 'image',
'data': url_for('files_image.handle_get_info', path=overview[0]['file_key'])
})
cur += 1
count = len(reports)
if limit is None:
limit = len(reports)
else:
limit = int(limit)
if offset is None:
offset = 0
else:
offset = int(offset)
return reports[offset:offset + limit], count, None
def _get_feature(self, data, selected_columns):
selected_columns_idx = []
columns = []
for idx, item in enumerate(data['headers']):
if item in selected_columns:
selected_columns_idx.append(idx)
columns.append(item)
rsts = []
for idx, item in enumerate(data['content']):
rst = []
for idx1, item1 in enumerate(item):
if idx1 in selected_columns_idx:
item1 = numeric(item1)
rst.append(item1)
rsts.append(rst)
rsts = {'headers': columns, 'content': rsts}
return rsts
def _get_target(self, data, target_column):
target_column_idx = 0
mp = {}
names = []
cur = 1
column = ''
for idx, item in enumerate(data['headers']):
if item == target_column:
target_column_idx = idx
column = item
rsts = []
for idx, item in enumerate(data['content']):
val = item[target_column_idx]
if mp.get(val):
rsts.append(mp.get(val))
else:
mp[val] = cur
cur += 1
names.append(val)
rsts.append(mp.get(val))
rsts = {'header': column, 'content': rsts, 'names': names}
return rsts
def get_score(self, item_id):
score_content = ''
file_client = get_file_client()
evaluation = Evaluation().query(item_id=item_id)[0]
evaluation_file = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/result.json')
if evaluation_file:
report_content = file_client.download(evaluation_file[0]['file_key'])
json_report = json.loads(report_content)
score_content = 'MSE: %s,MAE: %s, R2: %s' % (
round(json_report['mse'], 2), round(json_report['mse'], 2), round(json_report['rsquared'], 2))
return score_content
def calc_score(self, score_field=None, item_id=None, cnt=None, time_value=None):
if not score_field or not item_id:
return None
else:
file_client = get_file_client()
evaluation = Evaluation().query(item_id=item_id)[0]
evaluation_file = EvaluationFile().query(evaluation_id=evaluation['evaluation_id'],
file_path='outputs/result.json')
if evaluation_file:
report_content = file_client.download(evaluation_file[0]['file_key'])
json_report = json.loads(report_content)
target = score_field['algorithm_target']
if json_report.get(target):
cur_score = json_report.get(target) * 100.0
finally_score = round((cur_score * score_field['result_ratio'] / 100.0 + ((cur_score * (100 - score_field['result_ratio']) / 100.0) ** (time_value))) * ((score_field['count_ratio'] / 100) ** (cnt+1)), 2)
return finally_score
else:
return 0
|
[
"xinkuan.liu@horizon.ai"
] |
xinkuan.liu@horizon.ai
|
c0edc0f8834f8a25754f1bca254c9c3812da98ed
|
ad1223cdadf0e7decce6f25a7676538389ebb505
|
/Sources/__init__.py
|
06a9dd8ffe260829d161c1fa1575adbff17d76fa
|
[] |
no_license
|
unix1010/rsimgroup.github.io
|
ebe978821ab3ae46f1625f8480790688bcfddc00
|
2cfbd2732329ac424b28b706fc7906cb2b57b53d
|
refs/heads/master
| 2020-03-18T11:37:14.983822
| 2018-05-03T20:16:49
| 2018-05-03T20:16:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
# from . import utils
# from . import manage_main
|
[
"songhang2016@hotmail.com"
] |
songhang2016@hotmail.com
|
c8aa8df708fa14ee7771650c5ffd7b543d0c78ca
|
c66e9277898da27d9d56fab1ac5fcdc772f57f4a
|
/tests/test_modeling_flax_common.py
|
f6737d864930434b8f8d929c30a957f20f9aae28
|
[
"Apache-2.0"
] |
permissive
|
vumichien/transformers
|
47901c895cd3ce8a7c30f691dcb40bdfe7fc4030
|
75a208ef66c0176fc12a4c98922728ced5befbf9
|
refs/heads/main
| 2023-02-26T03:57:52.930111
| 2023-02-10T22:28:24
| 2023-02-10T22:28:24
| 238,600,337
| 1
| 0
|
Apache-2.0
| 2020-02-06T03:34:11
| 2020-02-06T03:34:10
| null |
UTF-8
|
Python
| false
| false
| 58,666
|
py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import json
import random
import tempfile
import unittest
from typing import List, Tuple
import numpy as np
from huggingface_hub import HfFolder, delete_repo, set_access_token
from requests.exceptions import HTTPError
import transformers
from transformers import BertConfig, is_flax_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import (
TOKEN,
USER,
CaptureLogger,
is_pt_flax_cross_test,
is_staging_test,
require_flax,
torch_device,
)
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging
from transformers.utils.generic import ModelOutput
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
from transformers import (
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForSequenceClassification,
FlaxBertModel,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return np.array(values, dtype=jnp.float32).reshape(shape)
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxModelTesterMixin:
model_tester = None
all_model_classes = ()
test_mismatched_shapes = True
is_encoder_decoder = False
test_head_masking = False
has_attentions = True
def _prepare_for_class(self, inputs_dict, model_class):
inputs_dict = copy.deepcopy(inputs_dict)
# hack for now until we have AutoModel classes
if "ForMultipleChoice" in model_class.__name__:
inputs_dict = {
k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1]))
if isinstance(v, (jnp.ndarray, np.ndarray))
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
diff = np.abs((a - b)).max()
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
# (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs)
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
"""
Args:
model_class: The class of the model that is currently testing. For example, ..., etc.
Currently unused, but it could make debugging easier and faster.
names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs.
Currently unused, but in the future, we could use this information to make the error message clearer
by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax.
"""
self.assertEqual(type(name), str)
if attributes is not None:
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
if isinstance(fx_outputs, ModelOutput):
self.assertTrue(
isinstance(pt_outputs, ModelOutput),
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is",
)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch")
# convert to the case of `tuple`
# appending each key to the current (string) `name`
attributes = tuple([f"{name}.{k}" for k in fx_keys])
self.check_pt_flax_outputs(
fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
)
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
elif type(fx_outputs) in [tuple, list]:
self.assertEqual(
type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch"
)
self.assertEqual(
len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch"
)
if attributes is not None:
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
self.assertEqual(
len(attributes),
len(fx_outputs),
f"{name}: The tuple `attributes` should have the same length as `fx_outputs`",
)
else:
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name`
attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))])
for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes):
self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr)
elif isinstance(fx_outputs, jnp.ndarray):
self.assertTrue(
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is"
)
# Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`.
fx_outputs = np.array(fx_outputs)
pt_outputs = pt_outputs.detach().to("cpu").numpy()
self.assertEqual(
fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch"
)
# deal with NumPy's scalars to make replacing nan values by 0 work.
if np.isscalar(fx_outputs):
fx_outputs = np.array([fx_outputs])
pt_outputs = np.array([pt_outputs])
fx_nans = np.isnan(fx_outputs)
pt_nans = np.isnan(pt_outputs)
pt_outputs[fx_nans] = 0
fx_outputs[fx_nans] = 0
pt_outputs[pt_nans] = 0
fx_outputs[pt_nans] = 0
max_diff = np.amax(np.abs(fx_outputs - pt_outputs))
self.assertLessEqual(
max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})."
)
else:
raise ValueError(
"`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got"
f" {type(fx_outputs)} instead."
)
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
# It might be better to put this inside the for loop below (because we modify the config there).
# But logically, it is fine.
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class)
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
# send pytorch model to the correct device
pt_model_loaded.to(torch_device)
pt_model_loaded.eval()
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class)
def test_from_pretrained_save_pretrained(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs_dict).to_tuple()
# verify that normal save_pretrained works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
# verify that save_pretrained for distributed training
# with `params=params` works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=model.params)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
def test_save_load_from_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_save_load_to_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_from_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
# save pt model
pt_model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname, from_pt=True)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_bf16_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
model.params = model.to_bf16(model.params)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_naming_convention(self):
for model_class in self.all_model_classes:
model_class_name = model_class.__name__
module_class_name = (
model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module"
)
bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name])
module_cls = getattr(bert_modeling_flax_module, module_class_name)
self.assertIsNotNone(module_cls)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# Question Answering model returns start_logits and end_logits
if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_load_with_mismatched_shapes(self):
if not self.test_mismatched_shapes:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
with self.assertRaises(ValueError):
new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10)
logger = logging.get_logger("transformers.modeling_flax_utils")
with CaptureLogger(logger) as cl:
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs_dict)["logits"]
self.assertEqual(logits.shape[1], 42)
with CaptureLogger(logger) as cl:
new_model_without_prefix = FlaxAutoModel.from_pretrained(
tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
input_ids = ids_tensor((2, 8), 10)
if self.is_encoder_decoder:
new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
else:
new_model_without_prefix(input_ids)
def test_default_params_dtype(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# check if all params are still in float32 when dtype of computation is half-precision
model = model_class(config, dtype=jnp.float16)
types = jax.tree_util.tree_map(lambda x: x.dtype, model.params)
types = flatten_dict(types)
for name, type_ in types.items():
self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.")
def test_to_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to bf16
params = model.to_bf16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_bf16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_to_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16
params = model.to_fp16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_fp16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_to_fp32(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16 and back to fp32
params = model.to_fp16(model.params)
params = model.to_fp32(params)
# test if all params are in fp32
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
# cast to fp16 and back to fp32 with mask
params = model.to_fp16(model.params)
params = model.to_fp32(params, mask)
# test if all params are in fp32 except key
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.")
else:
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
def test_save_load_in_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to fp16 and save
params = model.to_fp16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_save_load_in_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to bf16 and save
params = model.to_bf16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "__call__"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(model_class.main_input_name, observed_main_input_name)
def test_headmasking(self):
if not self.test_head_masking:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)])
if i == num_hidden_layers - 1:
return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)])
return np.ones(attention_heads, dtype=jnp.int32)
for model_class in self.all_model_classes:
model = model_class(config)
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
# Prepare head mask
inputs["head_mask"] = np.stack(
[
_prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
]
)
outputs = model(**inputs)
def _check_attentions_validity(attentions):
# Remove NaN
for t in attentions:
# Check we don't have more than 25% nans (arbitrary)
self.assertLess(np.isnan(t).sum(), t.size / 4)
attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions]
self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0)
if model.config.is_encoder_decoder:
raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.")
else:
_check_attentions_validity(outputs.attentions)
def test_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
model = model_class(config, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if we params can be properly initialized when calling init_weights
params = model.init_weights(model.key, model.input_shape)
self.assertIsInstance(params, FrozenDict)
# Check if all required parmas are initialized
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if we can do a forward pass
inputs_dict["output_hidden_states"] = True
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
model(**inputs, params=params)
def test_from_pretrained_with_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _assert_all_params_initialised(model, params):
# Check if all required parmas are loaded
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
for model_class in self.all_model_classes:
# init the model
model = model_class(config)
# save the model in the temporary directory
# load the saved model with _do_init=False
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if init_weights initializes missing keys from from_pretrained
flat_params = flatten_dict(unfreeze(params))
random_key = random.choice(list(flat_params.keys()))
flat_params.pop(random_key)
params = freeze(unflatten_dict(flat_params))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
params = model.init_weights(model.key, model.input_shape, params=params)
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
def test_checkpoint_sharding_from_hub(self):
model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()):
assert np.allclose(np.array(p1), np.array(p2))
def test_checkpoint_sharding_local(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
with tempfile.TemporaryDirectory() as tmp_dir:
# We use the same folder for various sizes to make sure a new save erases the old checkpoint.
for max_size in ["150kB", "150kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size)
# Get each shard file and its size
shard_to_size = {}
for shard in os.listdir(tmp_dir):
if shard.endswith(".msgpack"):
shard_file = os.path.join(tmp_dir, shard)
shard_to_size[shard_file] = os.path.getsize(shard_file)
index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME)
# Check there is an index but no regular weight file
self.assertTrue(os.path.isfile(index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME)))
# Check a file is bigger than max_size only when it has a single weight
for shard_file, size in shard_to_size.items():
if max_size.endswith("kiB"):
max_size_int = int(max_size[:-3]) * 2**10
else:
max_size_int = int(max_size[:-2]) * 10**3
# Note: pickle adds some junk so the weight of the file can end up being slightly bigger than
# the size asked for (since we count parameters)
if size >= max_size_int + 50000:
with open(shard_file, "rb") as state_f:
state_file = from_bytes(FlaxBertModel, state_f.read())
self.assertEqual(len(state_file), 1)
# Check the index and the shard files found match
with open(index_file, "r", encoding="utf-8") as f:
index = json.loads(f.read())
all_shards = set(index["weight_map"].values())
shards_found = set(f for f in os.listdir(tmp_dir) if f.endswith(".msgpack"))
self.assertSetEqual(all_shards, shards_found)
# Finally, check the model can be reloaded
new_model = FlaxBertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()):
self.assertTrue(np.allclose(np.array(p1), np.array(p2)))
@is_pt_flax_cross_test
def test_from_sharded_pt(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True)
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only")
for key, ref_val in flatten_dict(ref_model.params).items():
val = flatten_dict(model.params)[key]
assert np.allclose(np.array(val), np.array(ref_val))
def test_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
remat_model = model_class(config)
try:
remat_model.enable_gradient_checkpointing()
except NotImplementedError:
continue
outputs = model(**prepared_inputs_dict)
remat_outputs = remat_model(**prepared_inputs_dict)
# ensure that the dicts of outputs contain the same keys
self.assertEqual(outputs.keys(), remat_outputs.keys())
outputs = outputs.to_tuple()
remat_outputs = remat_outputs.to_tuple()
# ensure that the outputs remain precisely equal
for output, remat_output in zip(outputs, remat_outputs):
self.assertTrue((output == remat_output).all())
@require_flax
@is_staging_test
class FlaxModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
set_access_token(TOKEN)
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-model-flax")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
model.push_to_hub("test-model-flax", use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
# Reset repo
delete_repo(token=self._token, repo_id="test-model-flax")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token
)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def check_models_equal(model1, model2):
models_are_equal = True
flat_params_1 = flatten_dict(model1.params)
flat_params_2 = flatten_dict(model2.params)
for key in flat_params_1.keys():
if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4:
models_are_equal = False
return models_are_equal
@require_flax
class FlaxModelUtilsTest(unittest.TestCase):
def test_model_from_pretrained_subfolder(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
model = FlaxBertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder))
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(tmp_dir)
model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_subfolder_sharded(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
model = FlaxBertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB")
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(tmp_dir)
model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_hub_subfolder(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(model_id)
model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
def test_model_from_pretrained_hub_subfolder_sharded(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(model_id)
model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
|
[
"noreply@github.com"
] |
vumichien.noreply@github.com
|
d0684e191884794bcca60c9a003d3a736017998e
|
f8ece22d9e9e12e2cbca56d72a6b2728ba9a275a
|
/polyaxon/experiments/utils.py
|
50329e5e6fe312b3cb5120c878e85833117c63a9
|
[
"MIT"
] |
permissive
|
pparan/polyaxon
|
8c8912f9ba724e007357efcaefeab86fec2d5630
|
423199721e90431209b00c0f76caa6b4f9aa4b24
|
refs/heads/master
| 2021-04-15T07:15:19.701268
| 2018-03-21T11:59:12
| 2018-03-21T11:59:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from experiments.models import Experiment
def is_experiment_still_running(experiment_id=None, experiment_uuid=None):
if not any([experiment_id, experiment_uuid]) or all([experiment_id, experiment_uuid]):
raise ValueError('`is_experiment_still_running` function expects an experiment id or uuid.')
try:
if experiment_uuid:
experiment = Experiment.objects.get(uuid=experiment_uuid)
else:
experiment = Experiment.objects.get(id=experiment_id)
except Experiment.DoesNotExist:
return False
if not experiment.is_running:
return False
return True
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
70235bf4ad6c09ac4527f137fb5253197aebb19a
|
2cf46d2c5fbd886ee919f30d394aa659ec0cb955
|
/spider/Storage/storage.py
|
fd52a4135d7180e934a564c69c4a703bcd6887ac
|
[] |
no_license
|
tinice/spider
|
6476496ea69ecd99ff20631faca5f0330b8a1d34
|
443745d67997f9be087473725ac06e8a5e91e043
|
refs/heads/master
| 2021-01-20T03:41:41.727420
| 2017-09-15T07:44:29
| 2017-09-15T07:44:29
| 101,365,342
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# -*- coding:utf-8 -*-
from DB import Db
db = Db()
def insertdb(sql):
'''
将数据插入数据库
:param sql: 插入数据的sql语句
:return:
'''
db.insert(sql)
def inserttxt(path, info):
'''
将数据写入txt文件
:param path: txt文件路径
:param info: 要写入文件的数据
:return:
'''
with open(path, 'a+') as f:
f.write(info + '\n')
|
[
"373826647@qq.com"
] |
373826647@qq.com
|
cb148cc1c8ad8192978a32dd0a8c43dadbfb98c9
|
939df41eecef155bdde01d001ab25a13dc3e47a7
|
/7. 문자열/7-9 크로아티아 알파벳.py
|
8f2cb79d0b22ae6c0cce6f2778b4b2c6eba1b329
|
[] |
no_license
|
m16khb/Baekjoon
|
533d3b7e2631cf60f2fba9fd3701c1ff66d95412
|
2f76d6ef19d05f91295b6876f9034b62183ed7be
|
refs/heads/master
| 2020-06-21T09:29:06.690856
| 2019-09-12T11:34:04
| 2019-09-12T11:34:04
| 197,408,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
croatia_alpha = ["c=", "c-", "dz=", "d-", "lj", "nj", "s=", "z="]
voca = input()
for i in croatia_alpha:
voca = voca.replace(i, '*')
print(len(voca))
|
[
"43867832+m16khb@users.noreply.github.com"
] |
43867832+m16khb@users.noreply.github.com
|
0dc4ec6f3ce082513e780ac20a9984667130b011
|
efe1928f5f3a38e37e6127c38f3ddab4f3953b41
|
/artifacts/frozen_models/tf_run_const_folding.py
|
f9d7a069be842244a314a72c2d58d5cd8fefc680
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
xiezhq-hermann/nnfusion
|
8cdf0b6d03b6a43a781caa061e0b7b3e4caed45e
|
1b2c23b0732bee295e37b990811719e0c4c6e993
|
refs/heads/osdi20_artifact
| 2023-01-19T06:25:09.755500
| 2020-09-05T09:49:54
| 2020-09-05T09:49:54
| 293,049,048
| 0
| 0
|
MIT
| 2020-09-23T07:43:08
| 2020-09-05T10:01:08
| null |
UTF-8
|
Python
| false
| false
| 3,062
|
py
|
#!/usr/bin/env python3
import tensorflow as tf
import os, sys
import argparse
import numpy as np
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow.core.framework.tensor_shape_pb2 import TensorShapeProto
from tensorflow.tools import graph_transforms
tf.reset_default_graph()
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, default='./frozen_graph.pb', help='The file name of the frozen graph.')
args = parser.parse_args()
if not os.path.exists(args.file):
parser.exit(1, 'The specified file does not exist: {}'.format(args.file))
graph_def = None
graph = None
print('Loading graph definition ...', file=sys.stderr)
try:
with tf.gfile.GFile(args.file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
except BaseException as e:
parser.exit(2, 'Error loading the graph definition: {}'.format(str(e)))
print('Importing graph ...', file=sys.stderr)
try:
assert graph_def is not None
with tf.Graph().as_default() as graph: # type: tf.Graph
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name='',
op_dict=None,
producer_op_list=None
)
except BaseException as e:
parser.exit(2, 'Error importing the graph: {}'.format(str(e)))
print()
print('Placeholders:')
assert graph is not None
ops = graph.get_operations() # type: Iterable[tf.Operation]
input_nodes = []
last_nodes = []
for op in ops:
if op.type == 'Placeholder':
for tensor in op.outputs:
print('- {0:20s} {1}'.format("Tensor", tensor.name))
input_nodes.append(tensor.name)
print()
print('Sinks (operations without outputs):')
last_outputs = []
num_nodes = len(ops)
name2nodeIdx_map = {}
for i in range(num_nodes):
name2nodeIdx_map[ops[i].name] = i
node_outputs_ = [[] for i in range(num_nodes)]
for n in range(num_nodes):
# if len(ops[n].outputs) > 0:
# last_outputs.append(ops[n].outputs[0])
op = ops[n]
pending_count = len(op.inputs)
for i in range(pending_count):
input_name_id = op.inputs[i].name.split(':')
node_outputs_[name2nodeIdx_map[input_name_id[0]]].append(n)
for n in range(num_nodes):
if len(node_outputs_[n]) == 0 and ops[n].type != 'NoOp' and ops[n].type != 'Assert':
print('- {0:20s} {1}'.format(ops[n].type, ops[n].name))
last_outputs.append(ops[n].outputs[0].name)
g_def_const = tf.import_graph_def(graph_def, name="")
g_def_const = graph_transforms.TransformGraph(graph_def, input_nodes, last_outputs, ["fold_constants", "strip_unused_nodes", "merge_duplicate_nodes", "sort_by_execution_order"])
print()
folded_graph = args.file[:-3] + ".const_folded.pb"
print("Saving Const-folded Graph... as " + folded_graph)
graph_io.write_graph(as_text=False, name=folded_graph, logdir="./",graph_or_graph_def=g_def_const)
print("Finished.")
|
[
"Wenxiang.Hu@microsoft.com"
] |
Wenxiang.Hu@microsoft.com
|
69a94fcce7180c19ab60d3cc3b9efd0c293cb623
|
8763ffa1e3319fa5cb454c4965d981a6b4784c16
|
/arvan.py
|
c057d4bf5088d668e494ae753860d3906833a4b8
|
[] |
no_license
|
alireza-roshanasan/arvan_vod
|
73d4a1f7d51b1b35cf4d9ea378c1e9a95e64ba34
|
0b553e1b4283cf608a219a333fa16344559f75f9
|
refs/heads/master
| 2022-12-30T06:46:38.075228
| 2020-10-16T12:33:17
| 2020-10-16T12:33:17
| 303,091,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,776
|
py
|
import argparse
import base64
import json
import os
import magic
import requests
from dotenv import load_dotenv
from requests.models import PreparedRequest
# TODO add description for arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--channel", required=True)
parser.add_argument("-m", "--mode", required=False)
parser.add_argument("-cm", "--convert_mode", required=False)
parser.add_argument("-g", "--per_page", required=False)
parser.add_argument("-t", "--title", required=False)
parser.add_argument("-f", "--file", required=False)
parser.add_argument("-o", "--out", required=False)
parser.add_argument("-p", "--postfix", required=False)
parser.add_argument("-d", "--dir", required=False)
parser.add_argument("-s", "--second", required=False)
args = parser.parse_args()
load_dotenv()
class UploadVideo:
def __init__(self, channel, title=0, file_path=0, convert_mode=0, thumb_time=0):
self.key = os.getenv("key")
self.channel = channel
if args.mode == "list":
return
self.mode = convert_mode if convert_mode else "auto"
self.file_name = os.path.basename(file_path)
self.title = title if title else self.file_name.split(".")[0]
self.file_path = file_path
self.thumb_time = thumb_time
file_name = self.file_name.encode("utf-8")
base64_bytes = base64.b64encode(file_name)
self.filename_base64 = base64_bytes.decode("utf-8")
file_type = str(magic.from_file(self.file_path, mime=True)).encode("ascii")
base64_bytes = base64.b64encode(file_type)
self.filetype_base64 = base64_bytes.decode("ascii")
self.file_size = os.path.getsize(self.file_path)
def GetChannels(self):
url = "https://napi.arvancloud.com/vod/2.0/channels"
res = requests.get(url=url, headers={"Authorization": self.key})
def GetChannelVideos(self):
url = f"https://napi.arvancloud.com/vod/2.0/channels/{self.channel}/videos"
headers = {
"Authorization": self.key,
}
params = {"per_page": args.per_page if args.per_page else 1000}
req = PreparedRequest()
req.prepare_url(url, params)
res = requests.get(req.url, headers=headers)
print(json.dumps(res.json(), indent=4, sort_keys=True, ensure_ascii=False))
if args.out:
with open(args.out, "w+", encoding="utf-8") as out:
out.write(
json.dumps(res.json(), indent=4, sort_keys=True, ensure_ascii=False)
)
def GetLink(self):
url = f"https://napi.arvancloud.com/vod/2.0/channels/{self.channel}/files"
headers = {
"Authorization": self.key,
"tus-resumable": "1.0.0",
"upload-length": str(self.file_size),
"upload-metadata": f"filename {self.filename_base64},filetype {self.filetype_base64}",
}
res = requests.post(url=url, headers=headers)
assert res.headers.get("Location") != None
self.upload_location = res.headers.get("Location")
return self.upload_location
def UploadFile(self):
upload_url = self.GetLink()
headers = {
"Authorization": self.key,
"tus-resumable": "1.0.0",
"upload-offset": "0",
"Content-Type": "application/offset+octet-stream",
}
with open(self.file_path, "rb") as upload_file:
print(f"start upload {self.file_path}")
res = requests.patch(url=upload_url, headers=headers, data=upload_file)
assert res.status_code == 204
def CreateVideo(self):
self.UploadFile()
url = f"https://napi.arvancloud.com/vod/2.0/channels/{self.channel}/videos"
headers = {
"Authorization": self.key,
}
data = {
"title": self.title.encode("utf-8"),
"file_id": self.upload_location.split("/")[-1],
"convert_mode": self.mode,
"parallel_convert": False,
"thumbnail_time": 1,
}
res = requests.post(url=url, headers=headers, json=data)
if res.status_code == 201:
print(f"{self.file_name} uploaded\n\n")
else:
print(res)
if args.mode == "list":
up = UploadVideo(args.channel)
up.GetChannelVideos()
elif args.dir:
for f in os.listdir(args.dir):
f = os.path.join("./", args.dir, f)
if f.endswith(args.postfix):
print(f)
up = UploadVideo(
args.channel, args.title, f, args.convert_mode, args.second
)
up.CreateVideo()
else:
up = UploadVideo(
args.channel, args.title, args.file, args.convert_mode, args.second
)
up.CreateVideo()
|
[
"alireza.roshanasan@gmail.com"
] |
alireza.roshanasan@gmail.com
|
5162e0e76235f80446539cc0e35cdbaaad258084
|
c74c714007b0ab223af61787bcfd3efb51cd313b
|
/sequencer_orig.py
|
a47b096c69e904bf435c2d0ada401821d57ac77f
|
[] |
no_license
|
nonagonal/fs2017
|
e3814492f3e19d82e0f8119bb2b713f9e8dce7bb
|
d2c850dcff59cffed8e381347ceaaa0bd95c6439
|
refs/heads/master
| 2021-01-19T17:17:02.195544
| 2017-04-15T02:12:56
| 2017-04-15T02:12:56
| 88,317,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,710
|
py
|
"""Drum sequencer app."""
from __future__ import print_function
#import ipdb as pdb # pylint: disable=W0611
import pygame
import os
# pylint: disable=E0611
from pygame.constants import QUIT, KEYDOWN, K_ESCAPE, USEREVENT, MOUSEBUTTONDOWN
# This is the folder where this file is, we look for resources there too
MAIN_DIR = os.path.split(os.path.abspath(__file__))[0]
# This is the file where Mathematica writes our input
INPUT_PATH = '/tmp/seqinput.txt'
# Window size
WINDOW_WIDTH, WINDOW_HEIGHT = 730, 380
WINDOW_TITLE = 'FS2017 Sequencer'
# Sequencer width and height
STEPS, TRACKS = 16, 8
SAMPLES = 9
PITCHES = 4
PITCHED_SAMPLES = (8,)
# Tempo in bpm and ms per step
TEMPO_BPM = 120
TEMPO_MS_PER_STEP = int(round(60.0 * 1000 / TEMPO_BPM / 4))
# Grid upper-left corner
GRID_LEFT, GRID_TOP = 30, 30
# Pixel size of a single step
STEP_EDGE = 40
# Colors for our display elements and sounds
GRID_COLOR = (140, 140, 140)
GRID_QUARTER_NOTE_COLOR = (255, 255, 255)
METRONOME_COLOR = (255, 0, 255)
RECORD_INACTIVE_COLOR = (100, 100, 100)
RECORD_ACTIVE_COLOR = (200, 40, 100)
COLORS = ((0, 0, 0),
(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 255, 0),
(128, 0, 0), (0, 0, 128), (0, 128, 0), (128, 128, 0),
(214, 137, 0))
# Timer events
EVENT_PLAY = USEREVENT
EVENT_CHECK_INPUT = USEREVENT + 1
def update_pattern(record_track, pattern, pitch_pattern, pos, button):
"""Update the pattern based on the click at the given position, return new record track."""
step = (pos[0] - GRID_LEFT) // STEP_EDGE
track = (pos[1] - GRID_TOP) // STEP_EDGE
if 0 <= step < STEPS and 0 <= track < TRACKS:
if button == 1:
# Cycle sample on left-click
pattern[track][step] = (pattern[track][step] + 1) % (SAMPLES + 1)
elif button == 3:
# Cycle pitch on right-click
pitch_pattern[track][step] = (pitch_pattern[track][step] + 1) % PITCHES
elif step >= STEPS and 0 <= track < TRACKS:
if button == 1:
# Set record track of left-click
record_track = track
elif button == 3:
# Clear track on right-click
pattern[track] = [0] * STEPS
return record_track
def draw_pattern(screen, record_track, pattern, pitch_pattern, current_step):
"""Draw our pattern to the screen."""
# Draw metronome at the top
for step in range(STEPS):
color = METRONOME_COLOR if step == current_step else (0, 0, 0)
rect = (GRID_LEFT + step * STEP_EDGE, GRID_TOP - STEP_EDGE // 2, STEP_EDGE, STEP_EDGE // 2)
screen.fill(color, rect=rect)
# Draw a square for each cell in our pattern, fill it with the pattern's color
for track in range(TRACKS):
for step in range(STEPS):
sound = pattern[track][step]
sound_index = sound - 1
color = COLORS[sound]
if sound_index in PITCHED_SAMPLES:
pitch = pitch_pattern[track][step] + 1
else:
pitch = PITCHES
# Draw outline
outline = (GRID_LEFT + step * STEP_EDGE, GRID_TOP + track * STEP_EDGE,
STEP_EDGE, STEP_EDGE)
pygame.draw.rect(screen, GRID_COLOR, outline, 2)
# Erase, then draw fill based on pitch
fill = (outline[0] + 2, outline[1] + 2, outline[2] - 3, outline[3] - 3)
screen.fill(COLORS[0], rect=fill)
height = int(round(fill[3] * float(pitch) / PITCHES))
fill = (fill[0], fill[1] + fill[3] - height, fill[2], height)
screen.fill(color, rect=fill)
# Highlight quarter notes
for step in range(4, STEPS, 4):
left = GRID_LEFT + step * STEP_EDGE
top = GRID_TOP + 2
bottom = GRID_TOP + TRACKS * STEP_EDGE - 2
pygame.draw.line(screen, GRID_QUARTER_NOTE_COLOR, (left, top), (left, bottom), 3)
# Draw record buttons
for track in range(TRACKS):
left = GRID_LEFT + STEPS * STEP_EDGE + 5
top = GRID_TOP + track * STEP_EDGE + 3
rect = (left, top, STEP_EDGE - 6, STEP_EDGE - 6)
center = rect[0] + rect[2] // 2, rect[1] + rect[3] // 2
radius = rect[2] // 2 - 3
pygame.draw.circle(screen, (0, 0, 0), center, radius, 0)
pygame.draw.circle(screen, RECORD_INACTIVE_COLOR, center, radius, 1)
if track == record_track:
pygame.draw.circle(screen, RECORD_ACTIVE_COLOR, center, radius, 0)
def play(pattern, pitch_pattern, sounds, current_step):
"""Play any sounds that are enabled for the given step."""
for track in range(TRACKS):
sound = pattern[track][current_step]
if sound:
sound_index = sound - 1
if sound_index in PITCHED_SAMPLES:
pitch = pitch_pattern[track][current_step]
sounds[sound - 1][pitch].play()
else:
sounds[sound - 1].play()
def check_input(record_track, pattern):
"""Check for input from Mathematica, return whether we found an update."""
if os.path.exists(INPUT_PATH):
# Parse the input, update our pattern
with open(INPUT_PATH) as f:
vals = [int(round(float(x))) for x in f]
total_samples = vals[0]
onsets = vals[1:]
# Take first onset as start of recording, last as next measure, for now!
total_samples = onsets[-1] - onsets[0]
onsets = [x - onsets[0] for x in onsets][:-1]
parse_input(record_track, pattern, total_samples, onsets)
# Remove the input file now that we've parsed it
os.remove(INPUT_PATH)
return True
else:
return False
def parse_input(record_track, pattern, total_samples, onsets):
"""Parse the given input, update pattern."""
# Quantize to the nearest step
steps = [0] * STEPS
for onset in onsets:
step = int(round(float(onset) / total_samples * STEPS))
steps[step] = 1
pattern[record_track] = steps
def main():
"""Entry point."""
# Initialize game engine
pygame.mixer.init(buffer=512)
pygame.init()
# Create our window and set its caption
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption(WINDOW_TITLE)
muted = False
# Load samples
sounds = []
for sample_index in range(SAMPLES):
if sample_index in PITCHED_SAMPLES:
sound = [pygame.mixer.Sound(os.path.join(MAIN_DIR, "{}{}.wav".format(sample_index, x)))
for x in range(PITCHES)]
else:
sound = pygame.mixer.Sound(os.path.join(MAIN_DIR, str(sample_index) + '.wav'))
sounds.append(sound)
# Initialize our pattern, indexed [track][step], 0 for no sound, [1 SAMPLES] for a sound
current_step = 0
record_track = 0
pattern = [[0] * STEPS for _ in range(TRACKS)]
pitch_pattern = [[0] * STEPS for _ in range(TRACKS)]
draw_pattern(screen, record_track, pattern, pitch_pattern, current_step)
# Check for initial input
check_input(record_track, pattern)
# Start our step timer, this sets our tempo
pygame.time.set_timer(EVENT_PLAY, TEMPO_MS_PER_STEP)
pygame.time.set_timer(EVENT_CHECK_INPUT, 500)
# Run our event loop
running = True
while running:
# Handle Input Events
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
# Stop our event loop on escape and ESC keypress
running = False
elif event.type == EVENT_PLAY:
current_step = (current_step + 1) % STEPS
if not muted:
play(pattern, pitch_pattern, sounds, current_step)
draw_pattern(screen, record_track, pattern, pitch_pattern, current_step)
elif event.type == MOUSEBUTTONDOWN:
record_track = update_pattern(record_track, pattern, pitch_pattern, event.pos,
event.button)
draw_pattern(screen, record_track, pattern, pitch_pattern, current_step)
elif event.type == EVENT_CHECK_INPUT:
if check_input(record_track, pattern):
draw_pattern(screen, record_track, pattern, pitch_pattern, current_step)
elif event.type == KEYDOWN:
if event.key == pygame.K_m:
muted = not muted
pygame.display.set_caption(WINDOW_TITLE + (' (muted)' if muted else ''))
# Now draw any updates to the screen
pygame.display.flip()
pygame.quit()
# Call our entry point
if __name__ == '__main__':
main()
|
[
"jeremy@Lappykins.local"
] |
jeremy@Lappykins.local
|
9fbba12d321ad7bcae325cc7b8e8bc3d77faa827
|
b557781831f6345f36f5d35b9c5fa6cbdb4c4815
|
/billing/yup/views.py
|
eb97a8947f82f58dad204478f718bf8e1651efe5
|
[] |
no_license
|
komsihon/Project1
|
5c067bcc2f299a28163eccf27716ed092e070b78
|
e32c481ad358c2a8af52d95a9bbc2f9faebfd703
|
refs/heads/master
| 2021-06-03T23:52:21.555310
| 2021-01-13T10:53:24
| 2021-01-13T10:53:24
| 98,784,648
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,108
|
py
|
import json
import traceback
import requests
from django.conf import settings
from django.db import transaction
from django.http import HttpResponse
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.template.defaultfilters import slugify
from django.utils.module_loading import import_by_path
from ikwen.core.utils import get_service_instance
from ikwen.billing.models import PaymentMean, MoMoTransaction
import logging
logger = logging.getLogger('ikwen')
YUP = 'yup'
UNKNOWN_PHONE = '<Unknown>'
CURRENCY = "XAF"
def init_yup_web_payment(request, *args, **kwargs):
api_url = getattr(settings, 'YUP_API_URL', 'https://33027.tagpay.fr/online/online.php')
yup = json.loads(PaymentMean.objects.get(slug=YUP).credentials)
phone = UNKNOWN_PHONE
service = get_service_instance()
request.session['phone'] = phone
amount = int(request.session['amount'])
model_name = request.session['model_name']
object_id = request.session['object_id']
if request.user.is_authenticated():
username = request.user.username
language = request.user.language
else:
username = None
language = 'en'
# Request a session id
try:
params = {'merchantid': yup['merchant_id']}
session_id_request = requests.get(api_url, params=params, verify=False)
except requests.exceptions.HTTPError as errh:
logger.error("YUP: Http Error:", errh)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.ConnectionError as errc:
logger.error("Error Connecting:", errc)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.Timeout as errt:
logger.error("Timeout Error:", errt)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.RequestException as err:
logger.error("OOps: Something Else", err)
return HttpResponse(request.session['cancel_url'])
session_id_resp_message = session_id_request.text
if session_id_resp_message[:2] == "NO":
logger.debug("YUP: Unable to provide a session with %s as Merchand ID" % (yup['merchant_id']))
logger.debug("YUP: SERVER ERR TEXT is : %s" % session_id_resp_message)
return HttpResponse("Error, YUP: Unable to provide a session with %s as Merchand ID; Please check and restart" % (yup['merchant_id']))
else:
logger.debug("YUP: Session ID OK; ")
session_id = session_id_resp_message.replace('OK:', '')
payments_conf = getattr(settings, 'PAYMENTS', None)
if payments_conf:
conf = request.session['payment_conf']
path = payments_conf[conf]['after']
else:
path = getattr(settings, 'MOMO_AFTER_CASH_OUT')
with transaction.atomic(using='wallets'):
try:
momo_tx = MoMoTransaction.objects.using('wallets').get(object_id=object_id)
except MoMoTransaction.DoesNotExist:
momo_tx = MoMoTransaction.objects.using('wallets').create(service_id=service.id, type=MoMoTransaction.CASH_OUT,
phone=phone, amount=amount, model=model_name,
object_id=object_id, wallet=YUP, username=username,
callback=path)
except MoMoTransaction.MultipleObjectsReturned:
momo_tx = MoMoTransaction.objects.using('wallets').filter(object_id=object_id)[0]
request.session['tx_id'] = momo_tx.id
accept_url = request.session['return_url']
# accept_url += '/%d' % momo_tx.id
company_name = slugify(service.config.company_name).replace('-', ' ')
logger.debug("YUP: Initiating paymentof %dF with %s as Merchand ID" % (amount, yup['merchant_id']))
context = {
'api_url': api_url,
'sessionid': session_id,
'merchantid': yup['merchant_id'],
'amount': amount,
'currency': CURRENCY,
'purchaseref': object_id,
'phone': phone,
'brand': company_name,
'description': '',
'declineurl': request.session['cancel_url'],
'cancelurl': request.session['cancel_url'],
'accepturl': accept_url,
'text': '',
'language': language
}
return render(request, 'billing/yup/do_redirect.html', context)
def yup_process_notification(request, *args, **kwargs):
logger.debug("YUP: New incoming notification %s" % request.META['REQUEST_URI'])
amount = request.GET['amount']
object_id = request.GET['purchaseref']
paymentref = request.GET['paymentref']
error_text = request.GET.get('error')
status = request.GET['status']
try:
tx = MoMoTransaction.objects.using('wallets').get(object_id=object_id)
except:
logger.error("YUP: Failure while querying transaction status", exc_info=True)
return HttpResponse("OK")
logger.debug("YUP: Successful payment of %dF from %s" % (tx.amount, tx.username))
if status == "OK":
path = tx.callback
momo_after_checkout = import_by_path(path)
with transaction.atomic(using='wallets'):
try:
with transaction.atomic():
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(processor_tx_id=paymentref, message='OK', is_running=False,
status=MoMoTransaction.SUCCESS)
except:
logger.error("YUP: Could not mark transaction as Successful. User: %s, Amt: %d" % (tx.username, tx.amount), exc_info=True)
else:
try:
momo_after_checkout(request, transaction=tx)
except:
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=traceback.format_exc())
logger.error("YUP: Error while running callback. User: %s, Amt: %d" % (tx.username, tx.amount), exc_info=True)
elif error_text != 'AUTHENTICATION':
with transaction.atomic(using='wallets'):
try:
if "CANCEL" in error_text:
logger.debug("YUP: transaction canceled. User: %s, Amt: %d " % (tx.username, tx.amount))
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=error_text, is_running=False, status=MoMoTransaction.DROPPED)
else:
logger.debug("YUP: transaction failed. User: %s, Amt: %d " % (tx.username, tx.amount))
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=error_text, is_running=False, status=MoMoTransaction.FAILURE)
except:
logger.error("YUP: Could not mark transaction as Failed or Canceled. User: %s, Amt: %s" % (tx.username, tx.amount), exc_info=True)
return HttpResponse('OK')
|
[
"rsihon@gmail.com"
] |
rsihon@gmail.com
|
9812d4b96bf0a572295fd6c0ddb188d7e8343f0e
|
ae3e23956d8d831a6934570fc8ff3ec1218a8934
|
/hwk10_leapfrog.py
|
5f7d38024a53fa21522608e51b08a552391f59f9
|
[] |
no_license
|
Kay-Towner/Homework10
|
04cecd1686ff15543d858cb5e3f5a6a5336b2e94
|
2451c2b15ad9acaef9521e8da7ab330aa2ad1449
|
refs/heads/main
| 2023-04-10T12:15:51.209962
| 2021-04-15T02:02:53
| 2021-04-15T02:02:53
| 357,039,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
#By Kay Towner
import math
import numpy as np
import matplotlib.pyplot as plt
def dif(t=None, h=None, x=None, dxdt=None, d2 = None):
"""Differential equation to solve.
d2=leapfrogmethod, dxdt=thederivative=0"""
return d2 - (dxdt)**2 + x + 5
def frog(t=None, h=None, x=None, f=None):
"Leapfrog method to run on dif."
#Had difficulty here:
x=x
t=t
x = x(t+(3/2)*h)
x(t+(1/2)*h)+h*f(x(t+h),t+h)
t = x(t+2*h)
x(t+h) + h*f(x(t+(3/2)*h),t+(3/2)*h)
return x, t
if __name__ == "__main__":
#VERIABLES:
t = np.arange(0, 50) #time
x = 1 #initial condition (position)
dxdt = 0
h = 0.001 #step size
d2 = frog(t=t, h=h, x=x, f=dif)
leapfrog = dif(t=t, h=h, x=x, dxdt=dxdt, d2=d2)
print(leapfrog)
|
[
"noreply@github.com"
] |
Kay-Towner.noreply@github.com
|
8f20ae93a9a65dd48cd7f1faaa5b414c39f8fd57
|
0b8a060fdd29cd19857f61afe1853a4791953de2
|
/src/tickets/forms.py
|
0faa66e5f33b9a8ddb5cac0f169ad2795c5a76b5
|
[] |
no_license
|
nazarja/issue-tracker
|
ad5e4679567378977842150f6cbef2426c549128
|
a8e9939b6b590fc2affe02684264fad1c5e9b8cd
|
refs/heads/master
| 2023-04-14T22:54:51.881294
| 2023-04-08T17:52:16
| 2023-04-08T17:52:16
| 192,345,371
| 5
| 4
| null | 2023-04-08T17:53:16
| 2019-06-17T12:45:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
from django import forms
from .models import Ticket
class TicketForm(forms.ModelForm):
"""
inherits from ticket modal to create a form,
most fields are left out and fields saved on the view / serializer
description field needs a custom textarea field
"""
description = forms.CharField(max_length=2000, required=True, widget=forms.Textarea)
class Meta:
model = Ticket
fields = [
'title',
'description',
'status',
]
|
[
"mail.seanmurphy@gmail.com"
] |
mail.seanmurphy@gmail.com
|
6a55569a68376c6732e321f34aada25ef8180167
|
7e5fd224594e90ebf9f235e8e996387f308d3c6a
|
/engine.py
|
6c0fa738c37916117a5458aa318795c0094b201f
|
[] |
no_license
|
aditya-singh-07/ecommerce
|
649e75dc844d5783bd5cedffe7862e0902a983a3
|
43cd0e4cc63ad28cb3b15b5bd51f639a265c8713
|
refs/heads/master
| 2023-05-02T19:27:12.279069
| 2021-05-28T16:44:56
| 2021-05-28T16:44:56
| 371,301,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
import pandas as pd
def get_recommendations(id):
orders = pd.read_csv("data/OrderProduct.csv")
orders_for_product = orders[orders.product_id == id].order_id.unique();
relevant_orders = orders[orders.order_id.isin(orders_for_product)]
accompanying_products_by_order = relevant_orders[relevant_orders.product_id != id]
num_instance_by_accompanying_product = accompanying_products_by_order.groupby("product_id")["product_id"].count().reset_index(name="instances")
num_orders_for_product = orders_for_product.size
product_instances = pd.DataFrame(num_instance_by_accompanying_product)
product_instances["frequency"] = product_instances["instances"]/num_orders_for_product
recommended_products = pd.DataFrame(product_instances.sort_values("frequency", ascending=False).head(3))
products = pd.read_csv("data/Product.csv")
recommended_products = pd.merge(recommended_products, products, on="product_id")
return recommended_products.to_json(orient="table")
|
[
"adityakusumlata@gmail.com"
] |
adityakusumlata@gmail.com
|
1ad1162ff17c839ef8ce55ab6bbee3cf0a93cf8e
|
5d86a45c0120bcaffb343493fefad8cb5e3dd996
|
/Preview/System/Filetools/bigpy-tree.py
|
163a1aea9b12829aef707983c5cc8adac825b59c
|
[] |
no_license
|
rhkzleek/PP4E
|
e2b2a5406930b33dd4416b517491d76f3fe479f4
|
c103bcc903bbdcac614189618ab4ed59fb359045
|
refs/heads/master
| 2020-04-13T11:32:54.016523
| 2019-01-07T12:32:56
| 2019-01-07T12:32:56
| 163,177,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
#auther:DELL
#createTime:2018/12/24 19:29
#fileName:bigpy-tree.py.py
#tool:PyCharm
'''
TODO:找出整个目录树中最大的python源代码文件
搜索Python源代码库,利用pprint漂亮的显示结果
'''
import sys,os, pprint
trace = False
if sys.platform[:3] == 'win':
dirname = r'D:\GitHub\PP4E\Preview' #在window下可用
else:
dirname = '/usr/lib/python' #在Unix , linux, Cygwin下可用
allsizes = []
for (thisDir, subsHere, filesHere) in os.walk(dirname):
if trace:
print(thisDir)
for filename in filesHere:
if trace:
print('...', filename)
fullname = os.path.join(thisDir, filename)
fullsize = os.path.getsize(fullname)
allsizes.append((fullsize, fullname))
allsizes.sort()
pprint.pprint(allsizes[:2])
pprint.pprint(allsizes[-2:])
pprint.pprint('*'*50)
pprint.pprint(allsizes)
|
[
"978415719@qq.com"
] |
978415719@qq.com
|
ebccd5c718ea4837e92d374872257d1bf7b56b4b
|
18f865a906e817b62560404355f6dbbdde171cc8
|
/codewars.com/python/kyu_7/baby_shark_lyrics_generator.py
|
3e7c67fb936e1040401f3889fb24a809509f7558
|
[] |
no_license
|
Mietunteol/playground
|
fdfeffb2c78a37d902094ace36f2f75ef25bdb99
|
0c87c215deb32581b12d2481164d2832880ed10a
|
refs/heads/master
| 2020-07-07T03:42:58.898415
| 2019-10-02T12:24:02
| 2019-10-02T12:24:02
| 203,234,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# https://www.codewars.com/kata/baby-shark-lyrics-generator
import base64
import zlib
def baby_shark_lyrics():
# EASY
return zlib.decompress(base64.b64decode(
'eJxzSkyqVCjOSCzK1lFIyc/HhrmcqK5Gkcs3PzeXoAaaKFLk'
'cklMSSGogyaKFLncixLzUnITCemhoTKoGwqI00UbZYpcPqkl6'
'sUK6fkKGaV5Jbh10VCZIldQaZ5CYnlipc6jhmUADlwJ3Q==')).decode()
|
[
"mietunteol@protonmail.com"
] |
mietunteol@protonmail.com
|
7d92eb6ee7101e674096f4e620feb32b4175097d
|
6dd64f2b04a1796fb7683d24bc7cb8102b3470e9
|
/python文件/python文件—读取方法.py
|
c06734222e5b043f0f3325b0a17462224a1df56e
|
[] |
no_license
|
lsifang/python
|
f616c5ee344cf5f6bd9c7522a8a615a0357febf1
|
69df369c0aa76097b22905ffc8b07ddb14db5f9f
|
refs/heads/master
| 2021-04-03T04:23:32.059614
| 2018-03-09T01:57:47
| 2018-03-09T01:57:47
| 124,472,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
#===========读方法=============#
# 1、read(【字节数】) 全部读取(默认全部)注意(字节数也包括换行或者制表符)
# 2、f.readline() 每次读取一行(从当前的位置读取一行)
# 3、f.readlines() 全部读取并把每一行(或者说是按照换行符进行处理后的一段)当作一个元素存入一个列表
# 注意:文件就是一个迭代器,怪不得也是指针型的遍历,每次操作都会移动指针的位置
f=open('b.txt','r')
# t=f.read(9)
# print(f.tell())
# print(t)
# f.close()
# f.seek(9)
# x=f.readline()
# y=f.readline()
# print(x,end='')
# print(y)
# t=f.readlines()
# print(t)
# print(f.tell())
import collections
if f.readable():
print(isinstance(f,collections.Iterator))
print(next(f))
print(next(f))
print(next(f))
print(f.__next__())
f=open('b.txt','r')
t=f.readlines() #你看,这里返回的是一个空的列表#如果前边不重新定义f的话
print(isinstance(t,collections.Iterator)) #readlines()不是一个迭代器应为他是一个列表
t=iter(t)
print(isinstance(t,collections.Iterator))
print(next(t))
#———————判定方法———————#
# if f.readable():########主要是为了代码的容错处理
|
[
"524582427@qq.com"
] |
524582427@qq.com
|
ad2d90a9cf6d6441b52603caf380efc303fd6b5f
|
1e697c49666a2098534098808916351f7faf6d7d
|
/shop/models.py
|
4c9a77c58bd74df3e2e346f2cfa9ee38f782c8f5
|
[] |
no_license
|
MunifMutasim/3-2-Term-Project
|
507520f0c9341feeaf110163b356ad203fed8702
|
09cc4d7ea8124410b3e1527c0fda31409573ad5d
|
refs/heads/master
| 2020-06-06T00:26:13.995447
| 2019-06-18T17:46:38
| 2019-06-18T17:46:38
| 192,586,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50, unique=True)
class Meta:
ordering = ['name',]
verbose_name = 'category'
verbose_name_plural = 'categories'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_list_by_category',args=[self.slug])
class Product(models.Model):
category = models.ManyToManyField(Category)
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=50, unique=True)
image = models.ImageField(upload_to='images',blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=9,decimal_places=2)
stock = models.PositiveIntegerField()
available = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_at']
index_together=["id","slug"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail',args=[self.id,self.slug])
@property
def image_url(self):
if self.image and hasattr(self.image, 'url'):
return self.image.url
|
[
"smdmunif@gmail.com"
] |
smdmunif@gmail.com
|
0f86bc2bb49aeaa1ea80641c646f9bb2d8c08163
|
be9d900c1c4971d7fb3647ba39802ea1c63a0e7d
|
/baithicuoiki.1.1.py
|
122965e30eb08c1b3858e871858dc09d61b3e170
|
[] |
no_license
|
leduykhanh2001/KTPMUD_DV18CLC_18H1040047
|
1c7e2c8e52cb70b463f3d2c4c3298c4f6ca6a04b
|
4e6981791abca202b32061234b1894ed94c84cbb
|
refs/heads/main
| 2023-05-12T21:43:59.783862
| 2021-05-25T01:41:14
| 2021-05-25T01:41:14
| 370,515,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
class complex:
def __init__(self,complex_real,complex_image,complex_module):
def complex_reals(self,complex_real):
self.complex_real = complex_real
def complex_images(self,complex):
self.complex_image = complex_image
def complex_modules(self,complex_module):
self.complex_module = complex_module
class person:
def __init__(self,person_name,person_my_complex):
person_name1 = 'le duy khanh'
person_name2 = 'huynh pham que lam'
def person_name(self,person_name1,person_name2):
self.person_name1 = person_name1
self.person_name2 = person_name2
def person_my_complex(self,person_my_complex1):
self.person_my_complex1 = person_my_complex1
person_com1 = person()
person_com2 = person()
person_com3 = person()
print(person.person_name1)
person_com1.person_name1(float(7-j2,))
person_com2.person_name1(float(5))
person_com3.person_name1(float(2+j3))
person_com1.person_name2(float(j8))
person_com2.person_name2(float(0))
|
[
"noreply@github.com"
] |
leduykhanh2001.noreply@github.com
|
398d673931c3682dd400519627ba080e730d4991
|
3e2d30b169ae706a159f29833e3ef3860249695d
|
/documentation/cmdPlot.py
|
33529aad1a71f8d83c6389051b603c792fee4d47
|
[] |
no_license
|
robbyHuelsi/VolksbotLineDetection
|
e36d5051cb922b36c1f40f9225f48c5aedd0be62
|
1c084bb488193cc49523e34d6b0d36f46c47ff51
|
refs/heads/master
| 2020-03-11T14:17:52.174465
| 2018-09-12T17:00:41
| 2018-09-12T17:00:41
| 130,049,373
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,254
|
py
|
import os
import sys
import matplotlib.pyplot as plt
import itertools
from datetime import datetime
from builtins import str
from _ast import Str
sys.path.append("../train")
import inputFunctions as ifu
def plotCmd(imgAndCmdList):
start = 100
end = 126
# sort by dateTime
#imgAndCmdList = sorted(imgAndCmdList, key=lambda k: k['dateTime'])
#for imgAndCmdDict in imgAndCmdList:
# imgAndCmdDict["fullCmdList"] = sorted(imgAndCmdDict["fullCmdList"],
# key=lambda k: k['dateTime'])
# get delay
startTime = imgAndCmdList[start]['dateTime']
# add delay to list
for imgAndCmdDict in imgAndCmdList:
imgAndCmdDict["delay"] = (imgAndCmdDict["dateTime"] - startTime).total_seconds()
#print("IMG: " + str(imgAndCmdDict["delay"]))
for fullCmdDict in imgAndCmdDict["fullCmdList"]:
fullCmdDict["delay"] = (fullCmdDict["dateTime"] - startTime).total_seconds()
#print(" cmd: " + str(fullCmdDict["delay"]))
#input()
imgTimeList = []
cmdFullTimeList = []
velYawFullCmdList = []
velYawList = []
#colors
color = {"green": "#85be48", "gray": "#8a8b8a", "orange": "#ffa500", "light_orange": "#ffe0b5",
"blue": "#0fa3b1", "pink": "#6b2d5c"}
cc = itertools.cycle(color.values())
# set font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# plot figure
fig = plt.figure(figsize=(4.5, 3.5))
ax = plt.subplot(111)
# set the y-spine (see below for more info on `set_position`)
ax.spines['bottom'].set_position('zero')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# limit view
ax.set_xlim(0, 1.001)
ax.set_ylim(0, 0.5)
# y axis ticks
vals = ax.get_yticks()
vals = [str(int(x*100)) for x in vals]
ax.set_yticklabels(vals)
# axis labels
ax.set_xlabel('Zeit [s]')
ax.set_ylabel('Gierrate [\%]')
for i, imgAndCmdDict in enumerate(imgAndCmdList[start:end]):
imgTime = imgAndCmdDict["delay"]
imgTimeList.append(imgTime)
velYawList.append(float(imgAndCmdDict["velYaw"]))
#print(str(i) + ".pdf: " + str(imgTime) + " (mean: " + str(imgAndCmdDict["velYaw"]) + ")")
for fullCmdDict in imgAndCmdDict["fullCmdList"]:
cmdTime = fullCmdDict["delay"]
cmdFullTimeList.append(cmdTime)
velYawFullCmdList.append(float(fullCmdDict["velYaw"]))
#print(" - " + str(cmdTime) + ": " + str(fullCmdDict["velYaw"]))
ax.axvline(x=imgTimeList[0], color="#8a8b8a", linestyle=":", linewidth=0.6, label="Aufzeichnungszeitpunkte Bilder (Rate: 25 Hz)")
for t in imgTimeList[1:]:
ax.axvline(x=t, color=color["gray"], linestyle="--", linewidth=0.6)
ax.step(imgTimeList, velYawList, where="post", markevery=2, marker="o", markersize=4, color=color["orange"], label="Gemittelte Steuerbefehle")
ax.bar(cmdFullTimeList, velYawFullCmdList, color=color["green"], width=0.005, label="Aufgezeichnete Steuerbefehle")
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3,
box.width, box.height * 0.7])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.23),
fancybox=True, shadow=True, ncol=1)
plt.savefig(os.path.join('cmds.pdf'))
if __name__ == "__main__":
recordingsFolder = os.path.join(os.path.expanduser("~"),
"volksbot", "data", "train_lane")
onlyUseSubfolder = os.path.join("straight_lane_angle_move_right_1",
"left_rect")
imgAndCmdList = ifu.getImgAndCommandList(recordingsFolder,
onlyUseSubfolder=onlyUseSubfolder,
filterZeros=False, getFullCmdList=True)
plotCmd(imgAndCmdList)
|
[
"robert@huelsi.de"
] |
robert@huelsi.de
|
748d81067ab239e7f7fe9b6000f2f94b32e04f99
|
5a74fbfb317c116a911e3e6d404ed050bdd2baab
|
/problems/bear.py
|
e43a3e4305255c5f52700ac1450f89e11c664a31
|
[] |
no_license
|
yorkypy/selise-intervie
|
39571fd14efca77240ee33d2ab770c86cb9103da
|
82da5fc6399652bb080cd3977d6a4c60c47db2a1
|
refs/heads/master
| 2023-01-23T05:14:52.903886
| 2020-12-05T10:50:53
| 2020-12-05T10:50:53
| 318,727,091
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
'''Competitive Programming'''
def bear(a,b):
ans=0
while a<=b:
a*=3;b*=2;ans+=1
return ans
#Driver Code
if __name__ == "__main__":
print(bear(1,9))
|
[
"nimayonten@gmail.com"
] |
nimayonten@gmail.com
|
15cfb6b1f6bcf67247dbc1632662dc3b164d8bba
|
72e5ab95d0e63c9f4ffcb2e1537cbf050400382a
|
/sudoku/main-script.py
|
2eb1a6ce542d22ab1d3e8586a923a1ea3ec15c63
|
[] |
no_license
|
divyansh-gaur/Automation
|
905b2c74b4f5750742f9e3b7f987a41e575a242e
|
d3112239b01829893af26d3afc603dce7f60a94b
|
refs/heads/main
| 2023-03-30T17:11:03.521231
| 2021-03-30T09:59:34
| 2021-03-30T09:59:34
| 352,947,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,251
|
py
|
# import necessary libraries
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from pyautogui import alert
from collections import deque
class Cell:
def __init__(self):
self.webElement = None
self.was_empty = None
class Solve:
def __init__(self, grid):
self.grid = grid
self.ref = [0, 0]
def __locate_vacant(self):
for i in range(9):
for j in range(9):
if self.grid[i][j] == 0:
self.ref[0], self.ref[1] = i, j
return True
return False
def __is_valid(self, row, col, value):
for i in range(9):
if (self.grid[row][i] == value) or (self.grid[i][col] == value):
return False
row -= row % 3
col -= col % 3
for i in range(3):
for j in range(3):
if self.grid[row + i][col + j] == value:
return False
return True
def solve(self):
if not self.__locate_vacant():
return True
row, col = self.ref
for number in range(1, 10):
if self.__is_valid(row, col, number):
self.grid[row][col] = number
if self.solve():
return True
self.grid[row][col] = 0
return False
if __name__ == "__main__":
game_url = "https://en.sudoku-online.net/sudoku-easy/"
# game_url = "https://en.sudoku-online.net/"
# game_url = "https://en.sudoku-online.net/sudoku-difficult/"
# game_url = "https://en.sudoku-online.net/sudoku-very-difficult/"
grid = deque(deque(0 for _ in range(9)) for __ in range(9))
game_table = deque(deque(Cell() for _ in range(9)) for __ in range(9))
# load grid
solver = Solve(grid)
options = ChromeOptions()
options.add_argument("--start-maximized")
driver = Chrome(options=options)
driver.get(url=game_url)
try:
start = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#overlay-wrapper > div > div')))
except TimeoutException:
driver.close()
alert("Loading took too much time!")
quit(0)
start.click()
rows = driver.find_element_by_css_selector("#overlay-wrapper > table").find_elements_by_tag_name("tr")
for i in range(9):
cells = rows[i].find_elements_by_tag_name("td")
for j in range(9):
cell = cells[j]
if cell.text:
game_table[i][j].webElement = cell
game_table[i][j].was_empty = False
grid[i][j] = int(cell.text)
else:
game_table[i][j].webElement = cell
game_table[i][j].was_empty = True
resp = solver.solve()
if not resp:
alert("Grid is not solvable!")
driver.close()
quit(0)
for i in range(9):
for j in range(9):
if game_table[i][j].was_empty:
game_table[i][j].webElement.send_keys(grid[i][j])
|
[
"68742658+divyansh-gaur@users.noreply.github.com"
] |
68742658+divyansh-gaur@users.noreply.github.com
|
e5e8294b5d1f9dcb995f1696ac69e28901803e67
|
38c9925575815a97dd062d259160a3dd47149cb0
|
/src/boletin/migrations/0001_initial.py
|
adc921b456a8c0947eea15b59383e5a61a05efe0
|
[
"MIT"
] |
permissive
|
wunjo/Gypsy
|
ec35037beaa30ca4a967eb30043e32e8f0d4c184
|
2c54214c062b84837b5334cc06e4f205ef015434
|
refs/heads/master
| 2021-01-22T09:17:30.132706
| 2017-01-24T14:16:39
| 2017-01-24T14:16:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-06 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Registrado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(max_length=254)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"benticarlos@gmail.com"
] |
benticarlos@gmail.com
|
c95a5c8502d45df00595d29173e7a01a5dc4f999
|
b150dcbc12ce410fba2577ec409923120c010e31
|
/autobook/serializers.py
|
ce53c47238ebb3dd579c528f4b37f29c107da959
|
[] |
no_license
|
KirtiKharb/Autobooking
|
bc10c4837d29bfdd447af4b66bb7b7af7dac2614
|
7fa6845a4c2a86438e0e481e2f3469e89447d170
|
refs/heads/master
| 2021-01-01T06:17:53.828428
| 2015-07-21T09:46:53
| 2015-07-21T09:46:53
| 39,436,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
from django.forms import widgets
from rest_framework import serializers
from autobook.models import *
class AirlineSerializer(serializers.ModelSerializer):
class Meta:
model = Airline
fields = ('airline_id', 'airline_name')
class FlightSerializer(serializers.ModelSerializer):
class Meta:
model = Flight
fields = ('flight_id', 'source', 'destination', 'airline', 'arrival_at', 'departure_at', 'travel_type', 'economy_seats', 'business_seats', 'first_class_seats', 'economy_fare', 'business_fare', 'first_class_fare')
class Autobook_requestSerializer(serializers.ModelSerializer):
class Meta:
model = Autobook_request
read_only_fields = ('autobook_request_id',)
|
[
"kirtikharb.4@gmail.com"
] |
kirtikharb.4@gmail.com
|
93e37e4ebcfacf110e0d4f0aa79aa17c43a9e7d0
|
bb95fa976a779819d8ea9f9e1968098aef9dfb5a
|
/src/lib/bindings/python/setup.py
|
078f2cee2db60ac44a51d9f9d5766cbb4fb8ebfc
|
[
"WTFPL"
] |
permissive
|
andrew-stevenson-sociomantic/faup
|
4f0067cab93b7d84bf02d14cc02e94d69a0ce840
|
88dbbe2378552c9753b4f1e938663484909a4940
|
refs/heads/master
| 2020-06-12T07:27:39.411297
| 2019-03-23T15:46:40
| 2019-03-23T15:46:40
| 194,232,443
| 0
| 0
|
WTFPL
| 2019-06-28T07:57:04
| 2019-06-28T07:57:04
| null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from setuptools import setup
setup(
name='pyfaup',
version = '1.0',
description='Python bindings for the faup library',
author='Sebastien Tricaud',
author_email='sebastien@honeynet.org',
packages = ['pyfaup'],
use_2to3 = True,
)
|
[
"sebastien.tricaud@gmail.com"
] |
sebastien.tricaud@gmail.com
|
68579e6e8e06ae4012f7f6ce8dc97fd210b66e9b
|
fc8622157720e65cbda419d9d76d5d2197bcb382
|
/lcvSearch/wsgi.py
|
b41fdd66da6a04636d70fcecbd629495215269a1
|
[] |
no_license
|
Geronimomiao/lcvSearch
|
c653a5d7ed0447fcab75745a409ca26ba1c5480d
|
7c94be9feeff4fca344d497f13ce5c4a93930975
|
refs/heads/master
| 2020-04-17T10:10:54.454690
| 2019-01-20T04:16:31
| 2019-01-20T04:16:31
| 166,490,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""
WSGI config for lcvSearch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lcvSearch.settings")
application = get_wsgi_application()
|
[
"1030057982@qq.com"
] |
1030057982@qq.com
|
7fd5cc644f629a66b0a37d3708a60aaa3f549445
|
155d61e41047acdab1f8b9a11953caf29e314e0f
|
/CameraCalibration.py
|
8db4e2da7262a1dd29f01d0518e4457ffc77ef52
|
[] |
no_license
|
mgrddsj/MEMS-OpenCV
|
9a2439882e7c74f939373355f504eadd0179d7a5
|
2b6f875868911e5c306debe7ce30290cd0a9548e
|
refs/heads/master
| 2023-05-05T19:01:16.318549
| 2021-05-28T06:57:13
| 2021-05-28T06:57:13
| 353,624,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
from functools import partial
import cv2
import streamlit as st
import numpy as np
import glob
import multiprocessing
import time
import stqdm
from PIL import Image
def processImages(file_path, CHESSBOARD, criteria, objpoints, imgpoints, objp):
image = cv2.imread(file_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# st.image(image, use_column_width=True, caption="原图", channels="BGR")
ret, corners = cv2.findChessboardCorners(gray, CHESSBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
# corners_img = cv2.drawChessboardCorners(image, CHESSBOARD, corners2,ret)
if __name__ == '__main__':
st.set_page_config(layout="centered")
st.header("")
# file_list = glob.glob("camcalib/*.jpg")
file_list = glob.glob("camcalib3/*.jpg")
# file_list = ['camcalib\\1.jpg', 'camcalib\\10.jpg', 'camcalib\\11.jpg', 'camcalib\\13.jpg', 'camcalib\\14.jpg', 'camcalib\\15.jpg', 'camcalib\\3.jpg', 'camcalib\\4.jpg', 'camcalib\\5.jpg', 'camcalib\\6.jpg', 'camcalib\\8.jpg']
if st.button("Start camera calibration 开始相机矫正"):
CHESSBOARD = (7, 9)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # 设置寻找亚像素角点的参数,采用的停止准则是最大循环次数30和最大误差容限0.001
manager = multiprocessing.Manager()
# Creating vector to store vectors of 3D points for each CHESSBOARD image
objpoints = manager.list()
# Creating vector to store vectors of 2D points for each CHESSBOARD image
imgpoints = manager.list()
# Defining the world coordinates for 3D points
objp = np.zeros((1, CHESSBOARD[0]*CHESSBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHESSBOARD[0], 0:CHESSBOARD[1]].T.reshape(-1, 2)
# Multiprocess
start_time = time.time()
pool = multiprocessing.Pool()
func = partial(processImages, CHESSBOARD=CHESSBOARD, criteria=criteria, objpoints=objpoints, imgpoints=imgpoints, objp=objp)
for _ in stqdm.stqdm(pool.imap_unordered(func, file_list), total=len(file_list), unit="photo"):
pass
pool.close()
pool.join()
st.write("Number of image used to calibrate the camera:", len(objpoints))
st.write("Time used:", time.time()-start_time, "s")
# 相机校准
image = cv2.imread("camcalib3/IMG_20210524_103804.jpg")
# image = cv2.imread("camcalib2/IMG_20210524_081912.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
st.write("相机内参矩阵 mtx:")
st.write(mtx)
st.write("透镜畸变系数 dist:")
st.write(dist)
st.write("旋转向量 rvecs:")
st.write(rvecs[0])
st.write("位移向量 tvecs:")
st.write(tvecs[0])
undistorted = cv2.undistort(image, mtx, dist)
cv2.imwrite("undistorted.jpg", undistorted)
st.image(undistorted, use_column_width=True, caption="校正后的图像", channels="BGR")
total_error = 0
for i in range(len(objpoints)):
img_points_repro, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], img_points_repro, cv2.NORM_L2)/len(img_points_repro)
total_error += error
st.write(("精度 Average Error of Reproject: "), total_error/len(objpoints))
else:
st.write("Press the button to start")
|
[
"Jesse_Xu@live.com"
] |
Jesse_Xu@live.com
|
c24bd142a33242ce550b621b5f054a0ae066ddc7
|
f8faa223d8ba64caab5a732bc6d1d9a944b62aa7
|
/tests/integration/loss/test_multi_op.py
|
01ec80e9fdb4dfb7ee583f90ff5518ad7e4e45ab
|
[
"BSD-3-Clause"
] |
permissive
|
pystiche/pystiche
|
2f53e26f38b7fe96ec29259a084ba8ab2c2a9d36
|
71217c24557dfba05da5795547bf6f3034e7c66f
|
refs/heads/main
| 2023-04-13T04:01:40.275142
| 2022-03-18T21:59:12
| 2022-03-18T21:59:12
| 208,798,287
| 138
| 36
|
BSD-3-Clause
| 2023-04-11T12:31:29
| 2019-09-16T12:49:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
import pytorch_testing_utils as ptu
import torch
from torch import nn
import pystiche
from pystiche import enc, loss, ops
from tests.asserts import assert_named_modules_identical
from tests.utils import suppress_deprecation_warning
@suppress_deprecation_warning
def test_MultiOperatorLoss():
class TestOperator(ops.Operator):
def process_input_image(self, image):
pass
named_ops = [(str(idx), TestOperator()) for idx in range(3)]
multi_op_loss = loss.MultiOperatorLoss(named_ops)
actuals = multi_op_loss.named_children()
desireds = named_ops
assert_named_modules_identical(actuals, desireds)
@suppress_deprecation_warning
def test_MultiOperatorLoss_trim():
class TestOperator(ops.EncodingOperator):
def __init__(self, encoder, **kwargs):
super().__init__(**kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
def forward(self, image):
pass
layers = [str(idx) for idx in range(3)]
modules = [(layer, nn.Module()) for layer in layers]
multi_layer_encoder = enc.MultiLayerEncoder(modules)
ops_ = (("op", TestOperator(multi_layer_encoder.extract_encoder(layers[0])),),)
loss.MultiOperatorLoss(ops_, trim=True)
assert layers[0] in multi_layer_encoder
assert all(layer not in multi_layer_encoder for layer in layers[1:])
@suppress_deprecation_warning
def test_MultiOperatorLoss_call():
class TestOperator(ops.Operator):
def __init__(self, bias):
super().__init__()
self.bias = bias
def process_input_image(self, image):
return image + self.bias
input = torch.tensor(0.0)
named_ops = [(str(idx), TestOperator(idx + 1.0)) for idx in range(3)]
multi_op_loss = loss.MultiOperatorLoss(named_ops)
actual = multi_op_loss(input)
desired = pystiche.LossDict([(name, input + op.bias) for name, op in named_ops])
ptu.assert_allclose(actual, desired)
@suppress_deprecation_warning
def test_MultiOperatorLoss_call_encode(forward_pass_counter):
class TestOperator(ops.EncodingOperator):
def __init__(self, encoder, **kwargs):
super().__init__(**kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
def forward(self, image):
return torch.sum(self.encoder(image))
modules = (("count", forward_pass_counter),)
multi_layer_encoder = enc.MultiLayerEncoder(modules)
ops_ = [
(str(idx), TestOperator(multi_layer_encoder.extract_encoder("count")),)
for idx in range(3)
]
multi_op_loss = loss.MultiOperatorLoss(ops_)
torch.manual_seed(0)
input = torch.rand(1, 3, 128, 128)
multi_op_loss(input)
actual = forward_pass_counter.count
desired = 1
assert actual == desired
multi_op_loss(input)
actual = forward_pass_counter.count
desired = 2
assert actual == desired
|
[
"noreply@github.com"
] |
pystiche.noreply@github.com
|
c76f9fa47ef7d9750b71945c9a4c81a19eb15fc0
|
77c8b96d0f3c7c0a5a110d2eb3752e352bc2e162
|
/examples/views/OperacoesView.py
|
e4e460212774ef0b47d013fc762b5fa893d19b55
|
[] |
no_license
|
liipeandre/tutorial-flask
|
e225dc0781ffe74e9c065c481159312d45276157
|
7457748206f3bee7af95470cd432a19240e823ef
|
refs/heads/main
| 2023-08-13T21:29:34.497244
| 2021-10-05T15:43:46
| 2021-10-05T15:43:46
| 392,052,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
from app import app
from flask import request, render_template, url_for, redirect, flash, session
# Ler um campo da request
campo = request.form.get('campo', '')
# Criar uma mensagem para exibi-la no template.
flash('Nova Messagem')
# Criar uma sessão.
session['logged'] = True
# Enviar um template (usar com o comando return)
render_template('produtos/insert.html')
# Enviar um template com dados (usar com o comando return)
dados = [1, 2, 3, 4]
render_template('produtos/list.html', parametro1=dados)
# Enviar um template com os dados do formulário (usar com o comando return)
render_template('produtos/edit.html', produto=request.form)
# Redirecionar para uma outra view (usar com o comando return)
redirect(url_for('produto.list'))
|
[
"andrefelipe.pereira@yahoo.com.br"
] |
andrefelipe.pereira@yahoo.com.br
|
d5a85d36ce2ff99febea077850ade131d8763db9
|
83c6891e8fcde4a33ad7d0c1d7ac8436079556db
|
/rowingconditions/plotlydash/dashboard.py
|
9792be013aa7e17108e46c190558433433e7fb3a
|
[] |
no_license
|
zkilburn86/rowing-conditions
|
d2b8bc1c57b8ab97d86b23bc08132ae00aba7426
|
19634d83ddfb03c06035013f96a9336b0690da9f
|
refs/heads/master
| 2022-10-01T10:38:39.736524
| 2020-06-07T13:35:17
| 2020-06-07T13:35:17
| 269,249,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import plotly.graph_objects as go
from rowingconditions.resources.arkansas_data import last_30_days
df = last_30_days()
def create_dashboard(server):
dash_app = dash.Dash(server=server,
routes_pathname_prefix='/dashapp/',
external_stylesheets=['/static/css/styles.css']
)
trace1 = go.Scatter(x=df['date_time'],
y=df['gage_height'],
name='Gage Height',
mode='lines+markers',
yaxis='y1'
)
trace2 = go.Scatter(x=df['date_time'],
y=df['stream_flow'],
name='Stream Flow',
mode='lines+markers',
yaxis='y2'
)
data = [trace1, trace2]
layout = go.Layout(title='Stream Flow and Gage Height Last 30 Days',
yaxis=dict(title='Gage Height (ft)'),
yaxis2=dict(title='Stream Flow (cfs)',
overlaying='y',
side='right')
)
dash_app.layout = html.Div(children=[
html.H1(children='Arkansas River'),
html.P(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(figure=go.Figure(data=data, layout=layout))
])
return dash_app.server
|
[
"zachkilburn@Zach-Kilburns-MacBook-Pro.local"
] |
zachkilburn@Zach-Kilburns-MacBook-Pro.local
|
5eee522f1c71624e222b152f905e6ca8a07c2df5
|
b06bceb8fdc24e0c890fb2201c535cb660a94f86
|
/pretrain_module/mbart_deprecated.py
|
ac4b7e85270d93661fd4ae3d1554b48ad9738826
|
[
"MIT"
] |
permissive
|
quanpn90/NMTGMinor
|
7f294b40763b3f586d34ef4985799b851052f2ed
|
5e1e424d0d9c2135a456e372a2ea9ee49de5bd2c
|
refs/heads/master
| 2023-08-22T14:53:31.420276
| 2023-08-21T08:26:49
| 2023-08-21T08:26:49
| 116,663,163
| 92
| 39
|
NOASSERTION
| 2023-07-31T15:07:35
| 2018-01-08T10:33:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,832
|
py
|
from .modeling_bart import MBartAttention
class MBartCrossAttentionSlow(MBartAttention):
def convert_fast_attention(self):
pass
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
lang=None, atb=None,
incremental=False, incremental_cache=None, **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
# is_cross_attention = key_value_states is not None
assert key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
# these are stored
key_states = incremental_cache['c_k']
value_states = incremental_cache['c_v']
else:
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
if incremental:
incremental_cache['c_k'] = key_states
incremental_cache['c_v'] = value_states
# reshape into B x H x T x D ?
key_states = self._shape(key_states, -1, bsz)
value_states = self._shape(value_states, -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, incremental_cache
class MBartAutoRegressiveSelfAttentionSLow(MBartAttention):
def convert_fast_attention(self):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
incremental=False, incremental_cache=None, **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
# is_cross_attention = key_value_states is not None
assert key_value_states is None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if incremental:
if 'k' in incremental_cache and 'v' in incremental_cache:
key_states = torch.cat([incremental_cache['k'], key_states], dim=1) # time first
value_states = torch.cat([incremental_cache['v'], value_states], dim=1) # time first
incremental_cache['k'] = key_states
incremental_cache['v'] = value_states
else:
incremental_cache['k'] = key_states
incremental_cache['v'] = value_states
# reshape into B x H x T x D ?
key_states = self._shape(key_states, -1, bsz)
value_states = self._shape(value_states, -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, incremental_cache
|
[
"quanpn90@gmail.com"
] |
quanpn90@gmail.com
|
3b37848f68add020cd5d254cdc317cb60dc17c29
|
ba6105cbef80245d6a19215d343b2a7890a30271
|
/Unit 4- Data Structures/Ch 4.4- File Input and Output/Coding Problem 4.4.3.py
|
de5b01a50ecb947c881ef7f789133b04c345edd6
|
[] |
no_license
|
pyl135/Introduction-to-Computing-using-Python
|
a9a3674cd9088d50c9eef0f46ac6da5f084f9a2e
|
92e94ce2d7a23a45fa00a2907f69682e25e6ed48
|
refs/heads/master
| 2021-04-12T10:43:33.757692
| 2018-03-30T07:07:08
| 2018-03-30T07:07:08
| 126,672,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
#Write a function called "reader" that reads in a ".cs1301"
#file described in the previous problem. The function should
#return a list of tuples representing the lines in the file like so:
#
#[(line_1_number, line_1_assignment_name, line_1_grade, line_1_total, line_1_weight),
#(line_2_number, line_2_assignment_name, line_2_grade, line_2_total, line_2_weight)]
#
#All items should be of type int except for the name (string)
#and the weight (float). You can assume the file will be in the
#proper format.
#
#Hint: Although you could use readlines() to read in all
#the lines at once, they would all be strings, not a list.
#You still need to go line-by-line and convert each string
#to a list.
#Write your function here!
def reader(filename):
output = open(filename, "r")
array= ()
sum = []
for line in output:
each = line.split()
one = int(each[0])
two = each[1]
three = int(each[2])
four = int(each[3])
five = float(each[4])
array = (one,two,three,four,five)
sum.append(array)
return sum
output.close()
#We have supplied the same sample.cs1301 from the previous
#exercise. Feel free to test your code with it to see if it
#works:
print(reader("sample.cs1301"))
|
[
"noreply@github.com"
] |
pyl135.noreply@github.com
|
2591cc81fd5627fc8a9f64a4682768c4fd98f5ce
|
90c4326a1adc57476aea35ec18ba35f303765065
|
/Stack.py
|
4141878319d12f05cd69321beed2ddce74bb1a08
|
[] |
no_license
|
shyamsundar7897/Automata
|
d09b4695fc9292a867d6eaece89a4e28268e4632
|
32d47484b108cd04434b77ab395e26c68c19e591
|
refs/heads/master
| 2020-03-17T16:23:28.773729
| 2018-05-17T02:40:46
| 2018-05-17T02:40:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
class Stack:
# Stack item for expression conversion
def __init__(self):
# Constructor for Stack
self.stack = []
self.top = -1
def push(self, val):
# Push item into stack
self.top += 1
self.stack.append(val)
def pop(self):
# Return item from stack
if self.top < 0:
raise Exception('Stack Empty => Enter a correct expression')
else:
self.top -= 1
return self.stack.pop()
def isEmpty(self):
# Check if stack is empty
if self.top == -1:
return True
return False
|
[
"noreply@github.com"
] |
shyamsundar7897.noreply@github.com
|
d20ce66c888253d4220e2251912e787e94f914e6
|
e17c8f129a81b0ff056e7149dc202303e0588aa0
|
/abp_app/myapp.py
|
d79d9667946159d1eda711723aaebd0f2f8a2673
|
[] |
no_license
|
CONDUITlab/abp
|
da58a5fe9bc369b92561534bfdc3188b70f39a74
|
f78d1a4ff8617b73a3af6ee21b28d2491b78bcf3
|
refs/heads/master
| 2021-09-16T04:21:14.592984
| 2018-06-16T09:41:03
| 2018-06-16T09:41:03
| 105,927,752
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
import pandas as pd
import numpy as np
from bokeh.models import ColumnDataSource, TapTool
from bokeh.plotting import figure
from bokeh.layouts import row
#from bokeh.plotting import show
from bokeh.io import curdoc
# data for plot 2
df2 = pd.DataFrame({"A" : np.linspace(10, 20, 10),
"B" : np.linspace(20, 30, 10),
"C" : np.linspace(30, 40, 10),
"D" : np.linspace(40, 50, 10),
"E" : np.linspace(50, 60, 10),})
source2 = ColumnDataSource(
data=dict(
x=list(df2.index.values),
y=list(df2.iloc[:,0].values)
)
)
# data for plot 1
df1 = np.mean(df2)
source1 = ColumnDataSource(
data=dict(
x=list(range(0,df1.shape[0])),
y=list(df1.values),
colnames = list(df1.index.values)
)
)
# Plot graph one with data from df1 and source 1 as barplot
plot1 = figure(plot_height=300, plot_width=400, tools="tap")
plot1.vbar(x='x',top='y',source=source1, bottom=0,width =0.5)
# Plot graph two with data from df2 and source 2 as line
plot2 = figure(plot_height=300, plot_width=400, title="myvalues",
tools="crosshair,box_zoom,reset,save,wheel_zoom,hover")
r1 = plot2.line(x='x',y='y',source =source2, line_alpha = 1, line_width=1)
# safe data from plot 2 for later change in subroutine
ds1 = r1.data_source
def update_plot2(mycolumn):
try:
ds1.data['y'] = df2[mycolumn].values
except:
pass
# add taptool to plot1
taptool = plot1.select(type=TapTool)
taptool.callback = update_plot2(mycolumn="@colnames")
#show(row(plot1,plot2))
curdoc().add_root(row(plot1,plot2))
|
[
"stephen@peterkins.ca"
] |
stephen@peterkins.ca
|
b0cd844306784feeb3f0c8b18593d18f729e49f3
|
45b54b5063a548861a7971635679776dc13e5299
|
/bidnet.py
|
5164ee1ace7090f524a774070290513201425161
|
[] |
no_license
|
samsmusa/python-scraping
|
eb39274628f798c62e8099a40f5c1783b48d8bb4
|
b5788aac42652e59302ebf3dc6276f7ddcfa2bc9
|
refs/heads/main
| 2023-06-11T06:30:16.944977
| 2021-07-01T16:43:33
| 2021-07-01T16:43:33
| 382,097,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
import pandas as pd
headers1 = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"}
#state list
# sate = ["texas","alabama" ]
#data variable list
name_bid = []
region_bid = []
published = []
end = []
#progressbar
pbar = tqdm(total = 100, desc= "Collecting...", unit= "num")
#url
base_url = "https://www.bidnetdirect.com"
# url = "https://www.bidnetdirect.com/alabama/solicitations/open-bids/page1"
url = "https://www.bidnetdirect.com/solicitations/open-bids?selectedContent=AGGREGATE"
#get source of page
def get_data(url):
html = requests.get(url, headers= headers1)
soup = BeautifulSoup(html.text, "lxml")
return soup
#collect data from page
def parse(soup, c):
content = soup.find('table', class_='mets-table')
for te in tqdm(content.find_all('tbody'), desc= f'site {c}'):
rows = te.find_all('tr')
for row in rows:
name = row.find('a', class_="solicitation-link mets-command-link")
region = row.find('td', class_='region')
s_date = row.find('td', class_='dates publication-date')
end_date = row.find('td', class_='dates closing-date')
try:
name_bid.append(name.text.strip())
region_bid.append(region.text.strip())
published.append(s_date.text.strip())
end.append(end_date.text.strip())
except:
pass
#go next page
def next_page(soup, base_url):
next = soup.find("a", class_= "next mets-pagination-page-icon")
if next:
url = base_url + next["href"]
return url
else:
return False
c = 1
#main loop = 1
while True:
soup = get_data(url)
parse(soup, c)
url = next_page(soup, base_url)
# print(url)
pbar.update(1)
c += 1
if not url:
break
#save data
bid = {
"name" : name_bid,
"region": region_bid,
"Published": published,
"End": end,
}
df = pd.DataFrame(bid)
# df.to_html(open('googl11e.html', 'w'),escape=False)
df.to_csv("bid_us.csv")
|
[
"noreply@github.com"
] |
samsmusa.noreply@github.com
|
4e97f20afd5476a92d425462b32e3b8dbd42c3a8
|
d4e6160ca6131c0f8681749cc23f2ccef86c1367
|
/test_cap.py
|
8c8cadc0bc95309d5ce821dd0dacd9a1fb787b54
|
[] |
no_license
|
jennifercliu/pythonBootCamp2020
|
56cfa740b3b8a60966564997a2783d82b1721765
|
4cee94478216bb635d677323ddf1b6280ce8380b
|
refs/heads/main
| 2023-04-26T17:47:21.715391
| 2021-05-20T01:31:50
| 2021-05-20T01:31:50
| 323,473,154
| 1
| 0
| null | 2021-05-20T01:31:51
| 2020-12-21T23:41:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
import unittest
import cap
class TestCap(unittest.TestCase):
def test_one_word(self):
text = 'python'
result = cap.cap_text(text)
self.assertEqual(result, 'Python')
def test_multiple_words(self):
text = 'monty python'
result = cap.cap_text(text)
self.assertEqual(result, 'Monty Python')
if __name__=='__main__':
unittest.main()
|
[
"jennifercliu94@gmail.com"
] |
jennifercliu94@gmail.com
|
3869b5dfe0c1d52b3ad24506b1770db5c4ac8044
|
db98c27fd25542e0e341f6acf32162564ea77312
|
/animalgogo/animalgogo/wsgi.py
|
aa8cc78a6c3fb016aec4f2a85d0717b70c4c4971
|
[] |
no_license
|
ShaunaMack/go-fund-she
|
d652bce061ac836d1c5e80d6c0d5f761853b2cd2
|
c4705093bbec81f21f2ecd08596db7611dbaca35
|
refs/heads/main
| 2023-08-24T09:52:51.673728
| 2021-09-25T01:46:52
| 2021-09-25T01:46:52
| 405,237,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for animalgogo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animalgogo.settings')
application = get_wsgi_application()
|
[
"shaunakatemack@gmail.com"
] |
shaunakatemack@gmail.com
|
47cda6e3143dfdee7192c8060a5f9cf6e5ff6ffd
|
76ca62cd33781f812608b5dff989998df485566c
|
/multiplication_of_numbers.py
|
aa36a305e265041b7b7e94bf86a4ba6ce6cdd520
|
[] |
no_license
|
VictoriaKolesnyk/newrepository
|
6cc55e2f2d485e4bdca1b7af4a4333ca82358843
|
799f41c3f6065061cd1bbc7cdea1e1bb9b5e018d
|
refs/heads/master
| 2022-12-02T20:40:07.983725
| 2020-08-25T19:33:00
| 2020-08-25T19:33:00
| 289,306,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
def multiplication_of_number(number):
s = str(number)
res = sum([int(el) * idx for idx, el in enumerate(s, 1)])
return( res )
number =int(input('please enter a number '))
print(multiplication_of_number(number))
|
[
"a.vika787@gmail.com"
] |
a.vika787@gmail.com
|
3ae5a44b48257791e208650dc401ec8f6fbc5c64
|
6842e3fe3b21215859df6a61fddfd7a9b65b1ce3
|
/Simple Server-Client/client.py
|
cb265c912446b8dfaac2dc18b9add077cbd6a891
|
[] |
no_license
|
SiriK404/Python-Socket-Programming
|
bc47c90ddb480787e6f9b35c7ccd393c27f93016
|
0c70ce0e8eae29a69ad8e4d6db972fdc4e56a4a2
|
refs/heads/master
| 2020-11-26T04:45:00.503136
| 2019-12-26T07:08:30
| 2019-12-26T07:08:30
| 228,966,591
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
#!/usr/bin/python3
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((socket.gethostname(),4444))
s.send(bytes("I am CLIENt",'utf-8'))
msg=s.recv(1024)
s.close()
print(msg.decode('utf-8'))
|
[
"noreply@github.com"
] |
SiriK404.noreply@github.com
|
ea0d6e3a438b9ece04e26ddf4733d350905e9d39
|
45d0c1ec4f5e5ef2af3954d0c539e813081c985e
|
/cni/containers.py
|
02cf480596cefdf24535919258dfe1d14b86c78e
|
[] |
no_license
|
manotilla/manas
|
2e7a6bef7abdfaf2838f979c1ae8b6260d475924
|
4683d48fa6bce2b17ba6e1c99fca7338c856cf03
|
refs/heads/master
| 2023-02-26T21:33:38.864838
| 2021-02-01T17:31:37
| 2021-02-01T17:31:37
| 245,846,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
import docker
import logging
import os
try:
if os.environ["LOG_LEVEL"] == "DEBUG":
log_level = logging.DEBUG
else:
log_level = logging.INFO
except:
log_level = logging.INFO
logging.basicConfig(level=log_level)
class Containers(object):
def __init__(self):
self.container_client = docker.from_env()
def detect_host_ip(self, checked_ip):
containers = self.container_client.containers.list()
for container in containers:
try:
source_ip = container.attrs["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
image_id = container.attrs['Config']['Image']
container_id = container.attrs['Id'][0:4]
if source_ip == checked_ip:
logging.info({"container_id": container_id, "image_id": image_id, "source_ip": checked_ip})
return {"container_id": container_id, "image_id": image_id , "source_ip": checked_ip}
else:
continue
except Exception as exp:
logging.error(exp)
|
[
"emirozbir@kloia.com"
] |
emirozbir@kloia.com
|
2ee92a6558dede684ac8a357377754bb9e6687bf
|
ae550a5e31ee6c070f903268d6b6830d352f9e4e
|
/addy/wsgi.py
|
dca5ae26616709207b33990b141f90835abfad81
|
[] |
no_license
|
Cdingram/Routing
|
a3af64cf1a78cae0d6bbb16491e561cacc05effd
|
60b8327446d39a109a853443396536dd76ebac4d
|
refs/heads/master
| 2021-01-11T10:15:07.039803
| 2016-11-12T20:10:20
| 2016-11-12T20:10:20
| 72,590,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
WSGI config for addy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "addy.settings")
application = get_wsgi_application()
|
[
"cdingram@ualberta.ca"
] |
cdingram@ualberta.ca
|
6cc0b40552a7b84b67654c5343748b10becaba83
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/qbittorrent_examples/common.py
|
ddc95e8e8fe8667135cad88bfda306fb07fca849
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from typing import List, Dict
from pathlib import Path
# pip install tabulate
from tabulate import tabulate
# pip install python-qbittorrent
from qbittorrent import Client
from config import IP_HOST, USER, PASSWORD
sys.path.append(str(Path(__file__).resolve().parent.parent))
from human_byte_size import sizeof_fmt
def print_table(rows: List[List[str]], headers: List[str], show_index=True):
if show_index:
show_index = range(1, len(rows) + 1)
text = tabulate(rows, headers=headers, tablefmt="grid", showindex=show_index)
print(text)
def print_files_table(files: List[Dict]):
rows = [(file['name'], sizeof_fmt(file['size'])) for file in sorted(files, key=lambda x: x['name'])]
headers = ['#', 'File Name', 'Size']
print_table(rows, headers)
def print_torrents(torrents: List[Dict]):
total_size = 0
for i, torrent in enumerate(torrents, 1):
torrent_size = torrent['total_size']
total_size += torrent_size
print(f"{i:3}. {torrent['name']} ({sizeof_fmt(torrent_size)})")
print()
print(f'Total torrents: {len(torrents)}, total size: {sizeof_fmt(total_size)} ({total_size} bytes)')
def get_client() -> Client:
client = Client(IP_HOST)
client.login(USER, PASSWORD)
return client
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
f7fd42324d3169ef99b480627185da8318abf978
|
bd151bef8bca1eeb0c06cdc86fb3c7ef1d28954c
|
/venv/Scripts/pip3.8-script.py
|
c8fd068eb27f207d01564dcb3537c15877090cc8
|
[] |
no_license
|
ashleymazzonna/mac_changer
|
e7b1a273a3da79921f307fb7e2db733893a22362
|
c270fa424fed0500d70707b1c6c6835f6aa4350c
|
refs/heads/master
| 2022-05-22T22:08:37.597886
| 2020-04-25T03:03:11
| 2020-04-25T03:03:11
| 258,674,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!C:\Users\ashle\PycharmProjects\mac_changer\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"ashleym2910@gmail.com"
] |
ashleym2910@gmail.com
|
fe045577682f1f630883572c9a3b3c893c5aa70f
|
6591209f10b83bd617a0f577ba659cb91f2b2ac2
|
/04_data structures/03_list.py
|
59e1de4b10b2f009afdb050dc02e94ff567483be
|
[] |
no_license
|
gabrielsule/cursopython
|
94c834462b1f1e371ad1f986713773a89cec2066
|
63e62dd0a7229d4ec665fc95e50751f0d29c05c9
|
refs/heads/master
| 2020-05-30T12:15:31.780473
| 2019-06-01T12:18:34
| 2019-06-01T12:18:34
| 189,728,583
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
data = [1, 2, 3, 4, 5, 6]
# add
data.append(7)
data.insert(0, 0)
print(data)
# remove
data.pop(0)
data.remove(5)
print(data)
|
[
"gabrielsule@gmail.com"
] |
gabrielsule@gmail.com
|
9e17efeaae7712f632dfc951b8c4faccf09300ea
|
3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9
|
/EventFilter/Cosmics/python/__init__.py
|
8de5e10f583a8354f7bdce130bf75b64b564ba0f
|
[] |
no_license
|
sextonkennedy/cmssw-ib
|
c2e85b5ffa1269505597025e55db4ffee896a6c3
|
e04f4c26752e0775bd3cffd3a936b288ee7b0268
|
HEAD
| 2016-09-01T20:09:33.163593
| 2013-04-26T12:05:17
| 2013-04-29T16:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/EventFilter/Cosmics/',1)[0])+'/cfipython/slc6_amd64_gcc480/EventFilter/Cosmics')
|
[
"giulio.eulisse@cern.ch"
] |
giulio.eulisse@cern.ch
|
e79e058467598ed735112b6942be0f1b5e3269a6
|
5b93cdcbe4dd7e93ae3f14312c347a43953c2af4
|
/ArgTest/classes/attack.py
|
8f8e3117bf047c56d39b452456fef7ba2048143a
|
[] |
no_license
|
mszczot/argumentationSemantics
|
992941ebb8a0517195f9fb6b7f6d9e067f9839c0
|
8b205078fa015b0d03840f97969373ba88f53194
|
refs/heads/master
| 2021-04-27T00:24:10.966852
| 2018-03-15T21:27:42
| 2018-03-15T21:27:42
| 123,806,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
class Attack(object):
def __init__(self, attacker, attacked):
self.attacker = attacker
self.attacked = attacked
def get_set(self):
return set([self.attacker, self.attacked])
|
[
"40180425@napier.ac.uk"
] |
40180425@napier.ac.uk
|
4da25535cadb102bd8f51971ed39cb23a040407d
|
0f152b4bcd3a901d69b230bc8499c84edc9565c9
|
/eatsmart/eatsmart/wsgi.py
|
3ae72e18daeea82a4d6a9834832ee623cb03e5cb
|
[] |
no_license
|
Kyoud/Django
|
2042bc1d8df9261ddfcd3ba56d7416c77461bb12
|
334d8fd4db49d154d58962ad1a199c5ece14e9a7
|
refs/heads/master
| 2021-01-13T03:26:52.857446
| 2016-12-28T17:15:34
| 2016-12-28T17:15:34
| 77,548,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for eatsmart project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eatsmart.settings")
application = get_wsgi_application()
|
[
"hollenbeckjn@gmail.com"
] |
hollenbeckjn@gmail.com
|
4d066a1f3af37064dc6990b14a9a2e2baf54dc92
|
8f70b40ef1c657ee14accfe6e2f8b1ebb1bebb7e
|
/employeeform/migrations/0004_auto_20191206_1630.py
|
f84f3d3d85c39b082cf3985e9977f625ffe70444
|
[] |
no_license
|
TejashviVerma/School_ERP
|
e3d6f1aabe92167c2b55c0b1682dde505bb04edd
|
11406da8b1d8701b7ea55f75c76f1cbf44a72c53
|
refs/heads/master
| 2023-08-03T15:10:11.481306
| 2020-09-13T18:02:40
| 2020-09-13T18:02:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# Generated by Django 2.2.5 on 2019-12-06 11:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employeeform', '0003_auto_20191206_1619'),
]
operations = [
migrations.RenameField(
model_name='employeedocuments',
old_name='idProof',
new_name='IdProof',
),
]
|
[
"yashboura303@gmail.com"
] |
yashboura303@gmail.com
|
ce842671e59252a6925bc1c003e6d86e44fc1c98
|
1e2244b3f706642c25a4be1d7fb8c69c2c025975
|
/job/admin.py
|
a3edbcc7790e829a975ebb0de54becf0749b2218
|
[] |
no_license
|
Mahmoud-m-bahget/django-job-board
|
59e78476b6178ab37753d627204133257f63b04b
|
55cd5c2c98c9afba2d54ad4462765e75ab49caec
|
refs/heads/master
| 2022-11-30T11:54:43.470987
| 2020-08-08T20:18:11
| 2020-08-08T20:18:11
| 284,384,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
from django.contrib import admin
from .models import Job , Category , Apply
# Register your models here.
admin.site.register(Job)
admin.site.register(Apply)
admin.site.register(Category)
|
[
"mahmoodbebo.mb@gmail.com"
] |
mahmoodbebo.mb@gmail.com
|
4a8b6dd2ee2549d673882e65280fb707c8b7ef9d
|
f588ce8fa900935e24d53887c3afa52005ff8b9c
|
/env/bin/pip2.7
|
a80a97e5fdc58c4b1c7bb837419b40c2b42d694c
|
[] |
no_license
|
ajm/tetris
|
8b4ebcf50214ef849bde429ddc02fc2944f10c9b
|
c3a350aa93a321e69df608d1207226e84517eae5
|
refs/heads/master
| 2022-04-27T00:45:45.050675
| 2019-07-31T11:46:06
| 2019-07-31T11:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
7
|
#!/Users/risei/Desktop/tetrisGit/tetris/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jing.li@helsinki.fi"
] |
jing.li@helsinki.fi
|
42306033a281b6350238b5fff7ae7cdab26766e9
|
23e21d8b3dca4991a2fcb8b708779a4e7b6c056d
|
/expenses/migrations/0001_initial.py
|
f24e5606dcf67ccc4ae9cfc4d2577ab1e3903b5b
|
[] |
no_license
|
tracepesa/tracepesa
|
4a9efc05d27751a399278bf3a6510d06f8e19f1a
|
094722de9d399dd765e78d39dd4355b684ef8b3a
|
refs/heads/master
| 2021-09-24T19:16:37.419479
| 2020-02-29T09:28:44
| 2020-02-29T09:28:44
| 243,740,782
| 0
| 0
| null | 2021-09-22T18:38:56
| 2020-02-28T10:52:53
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
# Generated by Django 3.0.3 on 2020-02-28 09:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=120, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='expenses.Category')),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=0, max_digits=20)),
('description', models.CharField(blank=True, max_length=120, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.Category')),
],
),
]
|
[
"shirimas@gmail.com"
] |
shirimas@gmail.com
|
c589a73bdb953c385df8a3734ad9b98afacc6e90
|
72839718a4b47b1babd4ad895ecd503a0a0e14d2
|
/stembot/executor/ticket.py
|
fff953e6803693a1039328f71b3155b6a80807ac
|
[
"MIT"
] |
permissive
|
phnomcobra/stembot-python
|
6fb0d9a2874fc1bb8b8e5cf69e9f4d39c38dba5a
|
497dd782556d62eeb9e9301f9de37332d93207d7
|
refs/heads/master
| 2021-06-17T10:56:33.148454
| 2021-02-23T00:58:00
| 2021-02-23T00:58:00
| 174,921,135
| 0
| 0
|
MIT
| 2021-02-07T03:48:14
| 2019-03-11T03:44:21
|
Python
|
UTF-8
|
Python
| false
| false
| 13,314
|
py
|
#!/usr/bin/python3
ASYNC_TICKET_TIMEOUT = 3600
SYNC_TICKET_TIMEOUT = 15
import traceback
from base64 import b64encode, b64decode
from time import time, sleep
from threading import Thread, Timer
from stembot.dao.ramdocument import Collection as RAMCollection
from stembot.dao.document import Collection as SQLCollection
from stembot.adapter.agent import MPIClient
from stembot.model.peer import create_peer
from stembot.model.peer import delete_peer
from stembot.model.peer import delete_peers
from stembot.model.peer import get_peers
from stembot.model.peer import get_routes
from stembot.model import kvstore
from stembot.adapter.python import interpret
from stembot.adapter.file import create_file_handle
from stembot.adapter.file import close_file_handle
from stembot.adapter.file import file_handle_read
from stembot.adapter.file import file_handle_write
from stembot.adapter.file import file_handle_seek
from stembot.adapter.file import file_handle_tell
from stembot.adapter.file import file_handle_truncate
from stembot.adapter.process import create_process_handle
from stembot.adapter.process import process_handle_status
from stembot.adapter.process import process_handle_kill
from stembot.adapter.process import process_handle_terminate
from stembot.adapter.process import process_handle_wait
from stembot.adapter.process import process_handle_recv
from stembot.adapter.process import process_handle_send
from stembot.adapter.process import close_process_handle
from stembot.executor.cascade import create_cascade_request
from stembot.executor.cascade import create_anonymous_cascade_request
from stembot.executor.cascade import get_cascade_responses
from stembot.executor.cascade import pop_cascade_responses
from stembot.executor.cascade import wait_on_cascade_responses
from stembot.executor.counters import increment as ctr_increment
from stembot.executor.counters import get_all as ctr_get_all
from stembot.executor.timers import register_timer
def create_ticket(request):
ctr_increment('tickets created')
tickets = RAMCollection('tickets')
ticket = tickets.get_object()
ticket.object['src'] = kvstore.get(name='agtuuid')
if 'dest' in request:
ticket.object['dest'] = request['dest']
else:
ticket.object['dest'] = kvstore.get(name='agtuuid')
ticket.object['timestamp'] = time()
ticket.object['request'] = request
ticket.object['response'] = None
ticket.set()
message = {}
message['type'] = 'ticket request'
message['src'] = ticket.object['src']
message['request'] = ticket.object['request']
message['dest'] = ticket.object['dest']
message['tckuuid'] = ticket.object['objuuid']
return message
def process_ticket(message):
ctr_increment('tickets processed')
message['type'] = 'ticket response'
message['src'], message['dest'] = message['dest'], message['src']
request = message['request']
response = {}
try:
if request['type'] == 'discover peer':
if 'ttl' in request:
ttl = request['ttl']
else:
ttl = None
if 'polling' in request:
polling = request['polling']
else:
request = False
create_peer(
MPIClient(
request['url'],
kvstore.get(name='secret_digest')
).send_json({'type': 'create info event'})['dest'],
url=request['url'],
ttl=ttl,
polling=polling
)
response = request
elif request['type'] == 'create peer':
if 'url' in request:
url = request['url']
else:
url = None
if 'ttl' in request:
ttl = request['ttl']
else:
ttl = None
if 'polling' in request:
polling = request['polling']
else:
polling = False
create_peer(
request['agtuuid'],
url=url,
ttl=ttl,
polling=polling
)
response = request
elif request['type'] == 'delete peers':
delete_peers()
response = request
elif request['type'] == 'delete peer':
delete_peer(request['agtuuid'])
response = request
elif request['type'] == 'get peers':
response = get_peers()
elif request['type'] == 'get routes':
response = get_routes()
elif request['type'] == 'get counters':
response = ctr_get_all()
elif request['type'] == 'file handle open':
response['fhduuid'] = create_file_handle(
request['filename'],
request['mode']
)
response['type'] = request['type']
elif request['type'] == 'file handle close':
close_file_handle(request['fhduuid'])
response = request
elif request['type'] == 'file handle read':
if 'size' in request:
response['b64data'] = b64encode(
file_handle_read(
request['fhduuid'],
request['size']
)
).decode()
else:
response['b64data'] = b64encode(
file_handle_read(
request['fhduuid']
)
).decode()
response['type'] = request['type']
elif request['type'] == 'file handle write':
file_handle_write(
request['fhduuid'],
b64decode(request['b64data'])
)
response = request
elif request['type'] == 'file handle truncate':
file_handle_truncate(request['fhduuid'], request['size'])
response = request
elif request['type'] == 'file handle seek':
file_handle_seek(request['fhduuid'], request['position'])
response = request
elif request['type'] == 'file handle tell':
response['position'] = file_handle_tell(request['fhduuid'])
response['type'] = request['type']
elif request['type'] == 'process handle create':
response['phduuid'] = create_process_handle(request['command'])
response['type'] = request['type']
elif request['type'] == 'process handle status':
response['status'] = process_handle_status(request['phduuid'])
elif request['type'] == 'process handle kill':
process_handle_kill(request['phduuid'])
response = request
elif request['type'] == 'process handle terminate':
process_handle_terminate(request['phduuid'])
response = request
elif request['type'] == 'process handle wait':
process_handle_wait(request['phduuid'])
response = request
elif request['type'] == 'process handle close':
close_process_handle(request['phduuid'])
response = request
elif request['type'] == 'process handle send':
process_handle_send(request['phduuid'], b64decode(request['b64data']))
response = request
elif request['type'] == 'process handle recv':
stdout, stderr = process_handle_recv(request['phduuid'])
response['stdout b64data'] = b64encode(stdout).decode()
response['stderr b64data'] = b64encode(stderr).decode()
response['type'] = request['type']
elif request['type'] == 'create cascade async':
response = create_cascade_request(request)
elif request['type'] == 'create cascade anon':
create_anonymous_cascade_request(request)
response = request
elif request['type'] == 'create cascade sync':
if 'timeout' in request:
response = wait_on_cascade_responses(
create_cascade_request(request)['cscuuid'],
request['timeout']
)
else:
response = wait_on_cascade_responses(
create_cascade_request(request)['cscuuid']
)
elif request['type'] == 'get cascade responses':
response = get_cascade_responses(request['cscuuid'])
elif request['type'] == 'pull cascade responses':
response = pop_cascade_responses(request['cscuuid'])
elif request['type'] == 'delete collection':
SQLCollection(request['name']).destroy()
response = request
elif request['type'] == 'rename collection':
SQLCollection(request['name']).rename(request['new name'])
response = request
elif request['type'] == 'create collection attribute':
SQLCollection(request['name']).create_attribute(
request['attribute'],
request['path']
)
response = request
elif request['type'] == 'delete collection attribute':
SQLCollection(request['name']).delete_attribute(request['attribute'])
response = request
elif request['type'] == 'find collection objects':
response = []
for temp in SQLCollection(request['name']).find(**request['query']):
response.append(temp.object)
elif request['type'] == 'find collection object uuids':
response = SQLCollection(request['name']).find_objuuids(**request['query'])
elif request['type'] == 'get collection object':
if 'objuuid' in request:
response = SQLCollection(request['name']).get_object(request['objuuid']).object
else:
response = SQLCollection(request['name']).get_object().object
elif request['type'] == 'set collection object':
response = request
c = SQLCollection(request['name'])
o = c.get_object(request['object']['objuuid'])
o.object = request['object']
o.set()
elif request['type'] == 'delete collection object':
response = request
SQLCollection(request['name']).get_object(request['objuuid']).destroy()
elif request['type'] == 'list collection object uuids':
response = SQLCollection(request['name']).list_objuuids()
elif request['type'] == 'ping':
response = request
elif request['type'] == 'execute python':
response['status'], response['stdout'], response['stderr'] = interpret(request['body'])
else:
raise Exception('Unknown request type!')
except:
response['exception'] = traceback.format_exc()
message['response'] = response
return message
def service_ticket(message):
ctr_increment('tickets serviced')
tickets = RAMCollection('tickets')
ticket = tickets.get_object(message['tckuuid'])
ticket.object['response'] = message['response']
ticket.set()
def wait_on_ticket_response(tckuuid, timeout=None):
tickets = RAMCollection('tickets')
if timeout == None:
timeout = SYNC_TICKET_TIMEOUT
while True:
ticket = tickets.get_object(tckuuid)
if time() - ticket.object['timestamp'] > timeout:
ticket.destroy()
raise Exception('Ticket timeout period reached!')
if ticket.object['response'] != None:
response = ticket.object['response']
ticket.destroy()
break
sleep(1.0)
return response
def get_ticket_response(tckuuid):
tickets = RAMCollection('tickets')
ticket = tickets.get_object(tckuuid)
response = ticket.object['response']
return response
def delete_ticket(tckuuid):
RAMCollection('tickets').get_object(tckuuid).destroy()
def worker():
tickets = RAMCollection('tickets')
for objuuid in tickets.list_objuuids():
ticket = tickets.get_object(objuuid)
try:
if time() - ticket.object['timestamp'] > ASYNC_TICKET_TIMEOUT:
ticket.destroy()
ctr_increment('tickets expired')
except:
ticket.destroy()
register_timer(
name='ticket_worker',
target=worker,
timeout=ASYNC_TICKET_TIMEOUT
).start()
Thread(target=worker).start()
|
[
"phnomcobra@gmail.com"
] |
phnomcobra@gmail.com
|
8dac52cf3fed52bdd5d99dd766b36cef91ae7dc6
|
66ecea43786d6a487aa31ed5779cc8a2dd20d4fb
|
/assignment_3/Q4.py
|
f3f011fd7abedbe578a0531fba8fb2d020a45587
|
[] |
no_license
|
AndresArdila89/SCRIPTING-LANGUAGE
|
a18eee709b7b64927bf439f1cf4f7bceda4e8193
|
7487247ce83b17dc88427c8d8f3037edcc2907c0
|
refs/heads/master
| 2023-01-06T22:17:11.066584
| 2020-11-09T03:25:14
| 2020-11-09T03:25:14
| 298,015,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
'''
Question 4: Given an input string, count occurrences
of all characters within a string.
'''
string = "andres"
letterInString = {}
for i in string:
if i in letterInString:
letterInString[i] += 1
else:
letterInString[i] = 1
print(letterInString)
|
[
"andresardila@me.com"
] |
andresardila@me.com
|
69192c6ab4ee2b552ad6a32cd7ad4ec54844ebd7
|
a4e187eb26c926a72ee260d3eb4f07a57eb31af0
|
/src/aceinna/devices/openrtk/lan_provider.py
|
8ea2bf70d39ddc9359b6154897157531eb45a6e2
|
[
"Apache-2.0"
] |
permissive
|
BrunoScaglione/python-openimu
|
2cab6386a65dba3676b152ba4ed07e3579e47aa4
|
5653fad05b735a26c44e46c4ee023137e621e58e
|
refs/heads/master
| 2023-06-04T08:36:30.982960
| 2021-05-07T09:09:12
| 2021-05-07T09:09:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,756
|
py
|
import os
import time
import json
import datetime
import threading
import math
import re
from ..widgets import (
NTRIPClient, LanDataLogger, LanDebugDataLogger, LanRTCMDataLogger
)
from ...framework.utils import (
helper, resource
)
from ...framework.context import APP_CONTEXT
from ..base.provider_base import OpenDeviceBase
from ..configs.openrtk_predefine import (
APP_STR, get_openrtk_products, get_configuratin_file_mapping
)
from ..decorator import with_device_message
from ..parsers.open_field_parser import encode_value
from ...framework.utils.print import print_yellow
class Provider(OpenDeviceBase):
'''
OpenRTK LAN provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'RTK'
self.server_update_rate = 100
self.sky_data = []
self.pS_data = []
self.app_config_folder = ''
self.device_info = None
self.app_info = None
self.parameters = None
self.setting_folder_path = None
self.data_folder = None
self.debug_serial_port = None
self.rtcm_serial_port = None
self.user_logf = None
self.debug_logf = None
self.rtcm_logf = None
self.debug_c_f = None
self.enable_data_log = False
self.is_app_matched = False
self.ntrip_client_enable = False
self.nmea_buffer = []
self.nmea_sync = 0
self.prepare_folders()
self.ntripClient = None
self.connected = True
self.rtk_log_file_name = ''
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.data_folder = data_folder_path
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'openrtk')
all_products = get_openrtk_products()
config_file_mapping = get_configuratin_file_mapping()
for product in all_products:
product_folder = os.path.join(self.setting_folder_path, product)
if not os.path.isdir(product_folder):
os.makedirs(product_folder)
for app_name in all_products[product]:
app_name_path = os.path.join(product_folder, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_mapping[product])
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join(product, app_name, config_file_mapping[product]))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
self._device_info_string = '# Connected {0} with LAN #\n\rDevice: {1} \n\rFirmware: {2}'\
.format('OpenRTK', device_info, app_info)
return self._device_info_string
def _build_device_info(self, text):
'''
Build device info
'''
split_text = text.split(' ')
sn = split_text[4]
# remove the prefix of SN
if sn.find('SN:') == 0:
sn = sn[3:]
self.device_info = {
'name': split_text[0],
'imu': split_text[1],
'pn': split_text[2],
'firmware_version': split_text[3],
'sn': sn
}
def _build_app_info(self, text):
'''
Build app info
'''
app_version = text
split_text = app_version.split(' ')
app_name = next(
(item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'RTK_INS'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'version': text
}
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'openrtk.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
product_name = self.device_info['name']
app_name = self.app_info['app_name']
app_file_path = os.path.join(
self.setting_folder_path, product_name, app_name, 'openrtk.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
if not self.is_app_matched:
print_yellow(
'Failed to extract app version information from unit.' +
'\nThe supported application list is {0}.'.format(APP_STR) +
'\nTo keep runing, use INS configuration as default.' +
'\nYou can choose to place your json file under execution path if it is an unknown application.')
def ntrip_client_thread(self):
self.ntripClient = NTRIPClient(self.properties, self.communicator)
self.ntripClient.run()
def after_setup(self):
set_user_para = self.cli_options and self.cli_options.set_user_para
self.ntrip_client_enable = self.cli_options and self.cli_options.ntrip_client
# with_raw_log = self.cli_options and self.cli_options.with_raw_log
if set_user_para:
result = self.set_params(
self.properties["initial"]["userParameters"])
##print('set user para {0}'.format(result))
if result['packetType'] == 'success':
self.save_config()
if self.ntrip_client_enable:
t = threading.Thread(target=self.ntrip_client_thread)
t.start()
try:
if self.data_folder is not None:
dir_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
file_time = time.strftime(
"%Y_%m_%d_%H_%M_%S", time.localtime())
file_name = self.data_folder + '/' + 'openrtk_log_' + dir_time
os.mkdir(file_name)
self.rtk_log_file_name = file_name
self.user_logf = open(
file_name + '/' + 'user_' + file_time + '.bin', "wb")
self.debug_logf = open(
file_name + '/' + 'debug_' + file_time + '.bin', "wb")
self.rtcm_logf = open(
file_name + '/' + 'rtcm_' + file_time + '.bin', "wb")
# start a thread to log data
threading.Thread(target=self.thread_data_log).start()
threading.Thread(target=self.thread_debug_data_log).start()
threading.Thread(target=self.thread_rtcm_data_log).start()
self.save_device_info()
except Exception as e:
print(e)
return False
def nmea_checksum(self, data):
data = data.replace("\r", "").replace("\n", "").replace("$", "")
nmeadata, cksum = re.split('\*', data)
calc_cksum = 0
for s in nmeadata:
calc_cksum ^= ord(s)
return int(cksum, 16), calc_cksum
def on_read_raw(self, data):
for bytedata in data:
if bytedata == 0x24:
self.nmea_buffer = []
self.nmea_sync = 0
self.nmea_buffer.append(chr(bytedata))
else:
self.nmea_buffer.append(chr(bytedata))
if self.nmea_sync == 0:
if bytedata == 0x0D:
self.nmea_sync = 1
elif self.nmea_sync == 1:
if bytedata == 0x0A:
try:
str_nmea = ''.join(self.nmea_buffer)
cksum, calc_cksum = self.nmea_checksum(
str_nmea)
if cksum == calc_cksum:
if str_nmea.find("$GPGGA") != -1:
if self.ntrip_client_enable and self.ntripClient != None:
self.ntripClient.send(str_nmea)
print(str_nmea, end='')
# else:
# print("nmea checksum wrong {0} {1}".format(cksum, calc_cksum))
except Exception as e:
# print('NMEA fault:{0}'.format(e))
pass
self.nmea_buffer = []
self.nmea_sync = 0
# if self.user_logf is not None:
# self.user_logf.write(data)
def thread_data_log(self, *args, **kwargs):
self.lan_data_logger = LanDataLogger(
self.properties, self.communicator, self.user_logf)
self.lan_data_logger.run()
def thread_debug_data_log(self, *args, **kwargs):
self.lan_debug_data_logger = LanDebugDataLogger(
self.properties, self.communicator, self.debug_logf)
self.lan_debug_data_logger.run()
def thread_rtcm_data_log(self, *args, **kwargs):
self.lan_rtcm_data_logger = LanRTCMDataLogger(
self.properties, self.communicator, self.rtcm_logf)
self.lan_rtcm_data_logger.run()
def on_receive_output_packet(self, packet_type, data, error=None):
'''
Listener for getting output packet
'''
# $GPGGA,080319.00,3130.4858508,N,12024.0998832,E,4,25,0.5,12.459,M,0.000,M,2.0,*46
if packet_type == 'gN':
if self.ntrip_client_enable:
# $GPGGA
gpgga = '$GPGGA'
# time
timeOfWeek = float(data['GPS_TimeofWeek']) - 18
dsec = int(timeOfWeek)
msec = timeOfWeek - dsec
sec = dsec % 86400
hour = int(sec / 3600)
minute = int(sec % 3600 / 60)
second = sec % 60
gga_time = format(hour*10000 + minute*100 +
second + msec, '09.2f')
gpgga = gpgga + ',' + gga_time
# latitude
latitude = float(data['latitude']) * 180 / 2147483648.0
if latitude >= 0:
latflag = 'N'
else:
latflag = 'S'
latitude = math.fabs(latitude)
lat_d = int(latitude)
lat_m = (latitude-lat_d) * 60
lat_dm = format(lat_d*100 + lat_m, '012.7f')
gpgga = gpgga + ',' + lat_dm + ',' + latflag
# longitude
longitude = float(data['longitude']) * 180 / 2147483648.0
if longitude >= 0:
lonflag = 'E'
else:
lonflag = 'W'
longitude = math.fabs(longitude)
lon_d = int(longitude)
lon_m = (longitude-lon_d) * 60
lon_dm = format(lon_d*100 + lon_m, '013.7f')
gpgga = gpgga + ',' + lon_dm + ',' + lonflag
# positionMode
gpgga = gpgga + ',' + str(data['positionMode'])
# svs
gpgga = gpgga + ',' + str(data['numberOfSVs'])
# hop
gpgga = gpgga + ',' + format(float(data['hdop']), '03.1f')
# height
gpgga = gpgga + ',' + \
format(float(data['height']), '06.3f') + ',M'
#
gpgga = gpgga + ',0.000,M'
# diffage
gpgga = gpgga + ',' + \
format(float(data['diffage']), '03.1f') + ','
# ckm
checksum = 0
for i in range(1, len(gpgga)):
checksum = checksum ^ ord(gpgga[i])
str_checksum = hex(checksum)
if str_checksum.startswith("0x"):
str_checksum = str_checksum[2:]
gpgga = gpgga + '*' + str_checksum + '\r\n'
print(gpgga)
if self.ntripClient != None:
self.ntripClient.send(gpgga)
return
elif packet_type == 'pS':
try:
if data['latitude'] != 0.0 and data['longitude'] != 0.0:
if self.pS_data:
if self.pS_data['GPS_Week'] == data['GPS_Week']:
if data['GPS_TimeofWeek'] - self.pS_data['GPS_TimeofWeek'] >= 0.2:
self.add_output_packet('pos', data)
self.pS_data = data
if data['insStatus'] >= 3 and data['insStatus'] <= 5:
ins_status = 'INS_INACTIVE'
if data['insStatus'] == 3:
ins_status = 'INS_SOLUTION_GOOD'
elif data['insStatus'] == 4:
ins_status = 'INS_SOLUTION_FREE'
elif data['insStatus'] == 5:
ins_status = 'INS_ALIGNMENT_COMPLETE'
ins_pos_type = 'INS_INVALID'
if data['insPositionType'] == 1:
ins_pos_type = 'INS_SPP'
elif data['insPositionType'] == 4:
ins_pos_type = 'INS_RTKFIXED'
elif data['insPositionType'] == 5:
ins_pos_type = 'INS_RTKFLOAT'
inspva = '#INSPVA,%s,%10.2f, %s, %s,%12.8f,%13.8f,%8.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f' %\
(data['GPS_Week'], data['GPS_TimeofWeek'], ins_status, ins_pos_type,
data['latitude'], data['longitude'], data['height'],
data['velocityNorth'], data['velocityEast'], data['velocityUp'],
data['roll'], data['pitch'], data['heading'])
print(inspva)
else:
self.add_output_packet('pos', data)
self.pS_data = data
else:
self.add_output_packet('pos', data)
self.pS_data = data
except Exception as e:
# print(e)
pass
elif packet_type == 'sK':
if self.sky_data:
if self.sky_data[0]['timeOfWeek'] == data[0]['timeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('skyview', self.sky_data)
self.add_output_packet('snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
else:
output_packet_config = next(
(x for x in self.properties['userMessages']['outputPackets']
if x['name'] == packet_type), None)
if output_packet_config and output_packet_config.__contains__('from') \
and output_packet_config['from'] == 'imu':
self.add_output_packet('imu', data)
def do_write_firmware(self, firmware_content):
raise Exception('It is not supported by connecting device with LAN')
# rules = [
# InternalCombineAppParseRule('rtk', 'rtk_start:', 4),
# InternalCombineAppParseRule('sdk', 'sdk_start:', 4),
# ]
# parsed_content = firmware_content_parser(firmware_content, rules)
# user_port_num, port_name = self.build_connected_serial_port_info()
# sdk_port = port_name + str(int(user_port_num) + 3)
# sdk_uart = serial.Serial(sdk_port, 115200, timeout=0.1)
# if not sdk_uart.isOpen():
# raise Exception('Cannot open SDK upgrade port')
# upgrade_center = UpgradeCenter()
# upgrade_center.register(
# FirmwareUpgradeWorker(self.communicator, parsed_content['rtk']))
# upgrade_center.register(
# SDKUpgradeWorker(sdk_uart, parsed_content['sdk']))
# upgrade_center.on('progress', self.handle_upgrade_process)
# upgrade_center.on('error', self.handle_upgrade_error)
# upgrade_center.on('finish', self.handle_upgrade_complete)
# upgrade_center.start()
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
def get_operation_status(self):
if self.is_logging:
return 'LOGGING'
return 'IDLE'
def save_device_info(self):
if not self.rtk_log_file_name or not self._device_info_string:
return
local_time = time.localtime()
formatted_file_time = time.strftime("%Y_%m_%d_%H_%M_%S", local_time)
file_path = os.path.join(
self.rtk_log_file_name,
'device_info_{0}.txt'.format(formatted_file_time)
)
with open(file_path, 'w') as outfile:
outfile.write(self._device_info_string)
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {
'packetType': 'ping',
'data': {'status': '1'}
}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name', 'value': self.device_info['name']},
{'name': 'IMU', 'value': self.device_info['imu']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']},
{'name': 'App Version', 'value': self.app_info['version']}
]
}
def get_log_info(self):
'''
Build information for log
'''
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"rtkProperties": json.dumps(self.properties)
}
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
has_error = False
parameter_values = []
if self.app_info['app_name'] == 'INS':
conf_parameters = self.properties['userConfiguration']
conf_parameters_len = len(conf_parameters)-1
step = 10
for i in range(2, conf_parameters_len, step):
start_byte = i
end_byte = i+step-1 if i+step < conf_parameters_len else conf_parameters_len
command_line = helper.build_packet(
'gB', [start_byte, end_byte])
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
has_error = True
break
parameter_values.extend(result['data'])
else:
command_line = helper.build_input_packet('gA')
result = yield self._message_center.build(command=command_line, timeout=3)
if result['error']:
has_error = True
parameter_values = result['data']
if not has_error:
self.parameters = parameter_values
yield {
'packetType': 'inputParams',
'data': parameter_values
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
# self.communicator.write(command_line)
# result = self.get_input_result('gP', timeout=1)
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
self.parameters = data
yield {
'packetType': 'inputParam',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
grouped_parameters = {}
for parameter in params:
exist_parameter = next(
(x for x in input_parameters if x['paramId'] == parameter['paramId']), None)
if exist_parameter:
has_group = grouped_parameters.__contains__(
exist_parameter['category'])
if not has_group:
grouped_parameters[exist_parameter['category']] = []
current_group = grouped_parameters[exist_parameter['category']]
current_group.append(
{'paramId': parameter['paramId'], 'value': parameter['value'], 'type': exist_parameter['type']})
for group in grouped_parameters.values():
message_bytes = []
for parameter in group:
message_bytes.extend(
encode_value('int8', parameter['paramId'])
)
message_bytes.extend(
encode_value(parameter['type'], parameter['value'])
)
# print('parameter type {0}, value {1}'.format(
# parameter['type'], parameter['value']))
# result = self.set_param(parameter)
command_line = helper.build_packet(
'uB', message_bytes)
# for s in command_line:
# print(hex(s))
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
# self.communicator.write(command_line)
# result = self.get_input_result('uP', timeout=1)
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line, timeout=2)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': error
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thread = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thread.start()
print("Upgrade OpenRTK firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
|
[
"ywsong@aceinna.com"
] |
ywsong@aceinna.com
|
25911f197ae6dcc2571df0328cbe82ec80518e6e
|
c3c10fde2fc25317099ee9f99c4287d04e8f9ef8
|
/server/packages/dbmanager/dbmanager.py
|
70a88ed3a1a15287939a3e82a260007fd75e0f0b
|
[] |
no_license
|
JonneDeurloo/WikiSearch
|
edc3968b02609635d3ee933b8b47a99389eb8ba4
|
4f368f48174bbd9ef97af1643dca4a99ac1349e9
|
refs/heads/master
| 2020-04-30T03:27:22.511590
| 2019-04-06T10:35:18
| 2019-04-06T10:35:18
| 176,586,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
"""
This folder contains all the different databases.
Connection to a databse can be done by simply calling the
create_connection() function with the database name as parameter.
"""
import os
import sqlite3
from sqlite3 import Error
def create_connection(db_name):
""" Return a database connection to an SQLite database """
try:
dir_path = os.path.dirname(os.path.realpath(__file__))
db_path = os.path.join(dir_path, f"{db_name}.db")
return sqlite3.connect(db_path)
except Error as e:
print(e)
def table_exists(db, name):
""" Check if table exists """
cursor = db.cursor()
cursor.execute(
f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}'")
table = cursor.fetchone()
return table != None
def close_connection(db):
""" Close a database connection """
db.close()
|
[
"jonnemdeurloo@gmail.com"
] |
jonnemdeurloo@gmail.com
|
275f8b6ac31792a9e4bb823b61366f868e45ef4e
|
6397692bade269ca38ee6f8a9f8d5a87ca7e8d3f
|
/app/api/v2/models/meetupsmodel.py
|
c8a99072560efbc57e830230b247dddf8dd606de
|
[] |
no_license
|
Philipotieno/Questioner-API
|
2a60df5b23f6e9de5aa1f21943ac5dd17ee1c022
|
51e5f479aba526959ac0b9f512e13c11ae282287
|
refs/heads/develop
| 2022-12-11T04:20:39.796022
| 2019-02-15T08:28:13
| 2019-02-15T08:28:13
| 164,468,288
| 1
| 0
| null | 2022-12-08T01:31:37
| 2019-01-07T17:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
import datetime
from app.api.v2.models.db import Database
now = datetime.datetime.now()
db = Database()
cur = db.cur
class Meetup():
#meetup constructor
def __init__(self, topic, location, tags, happening_on):
self.topic = topic
self.location = location
self.tags = tags
self.happening_on = happening_on
self.created_on = now
def check_if_meetup_exists(self, topic):
query = "SELECT topic from meetups WHERE topic=%s;"
cur.execute(query, (topic,))
meetup = cur.fetchone()
if meetup:
return True
def create_meetup(self):
if self.check_if_meetup_exists(self.topic):
return False
query = "INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) \
RETURNING meetup_id, topic, location, tags, happening_on, created_on;"
cur.execute(
query,
(self.topic,
self.location,
self.tags,
self.happening_on,
self.created_on))
meetup = cur.fetchone()
db.conn.commit()
return meetup
def delete_meetup(meetup_id):
"""Delete a single Meetup"""
query = "DELETE FROM meetups WHERE meetup_id= '{}';".format(meetup_id)
cur.execute(query)
db.conn.commit()
@staticmethod
def get_all_meetups():
'''Method to fetch all meetups'''
query = "SELECT * from meetups;"
cur.execute(query)
meetups = cur.fetchall()
return meetups
@staticmethod
def get_meetup_by_id(meetup_id):
""" Fetch a specific meetup using meetup_id"""
query = "SELECT * from meetups where meetup_id=%s;"
cur.execute(query, (meetup_id,))
meetup = cur.fetchone()
return meetup
|
[
"tmitchellb007@gmail.com"
] |
tmitchellb007@gmail.com
|
524bd89e5c1f7b8467818ce2131fcc559332c383
|
b9686987198daf03eef5dd03285da58a6e847b72
|
/20190910-add/add_using_zip.py
|
e6212a61c83131cf912ca88019923916eea53624
|
[] |
no_license
|
CompeterScience/pythonmorsels
|
f2646e31d3d393d37198b2e1c38bedcef75455c8
|
169b03fdac18ef5f60ca47128da50553fb4f0848
|
refs/heads/master
| 2022-05-23T20:04:38.097134
| 2020-04-23T21:35:37
| 2020-04-23T21:35:37
| 258,335,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
def add(m1, m2):
m3 = []
for r1,r2 in zip(m1,m2):
m3.append([c1 + c2
for c1,c2
in zip(r1,r2)])
return m3
|
[
"competerscience@outlook.com"
] |
competerscience@outlook.com
|
bea73cbe5c0c43a72e96cbe6ea639c0aea8a8f57
|
60f1c250e2186a101917e72608e95806f66f5445
|
/config.py
|
c1c8c0f46b70574bf6af796b677245b691eda268
|
[] |
no_license
|
AndrySar/jarvis
|
19a25385652a08ff5d79ac7586a918ed65b524b3
|
d062dbdb49edced94e7bd947dc3a405708518f50
|
refs/heads/master
| 2023-02-07T23:59:31.925605
| 2021-01-03T09:12:52
| 2021-01-03T09:12:52
| 326,367,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31
|
py
|
TOKEN = str(os.getenv("TOKEN"))
|
[
"moisandrew@yandex-team.ru"
] |
moisandrew@yandex-team.ru
|
f9dddda89e769203df88f8d4b6ca71d093a8ed82
|
67ee0395522ccc13ad78012845dd8f29317905bc
|
/venv/bin/pip
|
5324e018377757e57dc622b1e9eb542b044775f7
|
[] |
no_license
|
pawankhandal52/movie_website
|
2b717942e0c689a0fa10fcd3b45f5eb7f1726262
|
6f0e668f3e99e790e9f4441c97adbb812fd72901
|
refs/heads/master
| 2020-03-27T10:38:56.464189
| 2018-08-29T07:28:01
| 2018-08-29T07:28:01
| 146,435,262
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
#!/Users/stemdot/Desktop/movie_website/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"pksharama@stemdot.com"
] |
pksharama@stemdot.com
|
|
482a9c23b8b78c4c068f2a92b69400934aa9d8fd
|
5f06ea565f6d0d555a0034de591c1948b925a7e7
|
/blog/views.py
|
1cae3ad2d74213e99b7c23fb9a3da2f424d190bb
|
[] |
no_license
|
cement-hools/blog_by_molchanov
|
82ef3385080320b74a1cd9c4c21446d8f0ae60e4
|
da0a4c2c083c5c1da0d720a631ae1253792b32be
|
refs/heads/main
| 2023-03-30T08:51:41.100697
| 2021-03-28T02:09:49
| 2021-03-28T02:09:49
| 350,162,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.db.models import Q
from django.shortcuts import render
from django.views.generic import View
from blog.forms import TagForm, PostForm
from blog.models import Post, Tag
from blog.utils import (ObjectDetailMixin, ObjectCreateMixin,
ObjectUpdateMixin, ObjectDelete)
OBJ_IN_PAGE = 3
def posts_list(request):
search_query = request.GET.get('search')
if search_query:
posts = Post.objects.filter(
Q(title__icontains=search_query) |
Q(body__icontains=search_query)
)
else:
posts = Post.objects.all()
paginator = Paginator(posts, OBJ_IN_PAGE)
page_number = request.GET.get('page', 1)
page = paginator.get_page(page_number)
is_paginated = page.has_other_pages()
if page.has_previous():
prev_url = f'?page={page.previous_page_number()}'
else:
prev_url = ''
if page.has_next():
next_url = f'?page={page.next_page_number()}'
else:
next_url = ''
context = {
'page_object': page,
'is_paginated': is_paginated,
'next_url': next_url,
'prev_url': prev_url,
}
return render(request, 'blog/index.html', context)
class PostDetail(ObjectDetailMixin, View):
model = Post
template = 'blog/post_detail.html'
class PostCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = PostForm
template = 'blog/post_create_form.html'
raise_exception = True
class PostUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Post
model_form = PostForm
template = 'blog/post_update_form.html'
raise_exception = True
class PostDelete(LoginRequiredMixin, ObjectDelete, View):
model = Post
template = 'blog/post_delete_form.html'
redirect_url = 'posts_list_url'
raise_exception = True
class TagDetail(ObjectDetailMixin, View):
model = Tag
template = 'blog/tag_detail.html'
class TagCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = TagForm
template = 'blog/tag_create_form.html'
raise_exception = True
class TagUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Tag
model_form = TagForm
template = 'blog/tag_update_form.html'
raise_exception = True
class TagDelete(LoginRequiredMixin, ObjectDelete, View):
model = Tag
template = 'blog/tag_delete_form.html'
redirect_url = 'tags_list_url'
raise_exception = True
def tags_list(request):
tags = Tag.objects.all()
context = {
'tags': tags,
}
return render(request, 'blog/tags_list.html', context)
|
[
"cement-fan@ya.ru"
] |
cement-fan@ya.ru
|
378a396c00663d3f8648e37c9914cc247a876c70
|
8759a40c90d600ffb846e491caef02389e001253
|
/12.py
|
f321fbfd5cf6ceea6e65da1dd67a59ff3744223f
|
[] |
no_license
|
e185725/opencv_practice
|
e9f1862d588a3084363fdb235abb6665caf9c339
|
a9914fde756bdec5d8a1228409327de56ce4c647
|
refs/heads/main
| 2023-01-15T11:17:49.042291
| 2020-11-30T03:19:33
| 2020-11-30T03:19:33
| 317,097,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
"""
モーションフィルタ
motion filter
モーションフィルタ(3x3)を実装せよ。
モーションフィルタとは対角方向の平均値を取るフィルタであり、次式で定義される。
"""
import cv2
import numpy as np
def motion_filter(img , K_size = 3):
H,W,C = img.shape
K = np.diag([1]*K_size).astype(np.float)
K /= K_size
# zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2 , W + pad * 2 , C ) , dtype = np.float)
out[pad: pad + H , pad : pad + W] = img.copy().astype(np.float)
tmp = out.copy()
for y in range(H):
for x in range(W):
for c in range(C):
out[pad + y, pad + x, c] = np.sum(K*tmp[y:y + K_size, x: x + K_size,c])
out = out[pad: pad + H, pad:pad + W].astype(np.uint8)
return out
# Read image
img = cv2.imread("imori.jpg")
# motion filtering
out = motion_filter(img, K_size=3)
# Save result
#cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"e185725@ie.u-ryukyu.ac.jp"
] |
e185725@ie.u-ryukyu.ac.jp
|
93c6b0118efa5bfb83374e27dca773d5a9eaf6f8
|
ff8768e9ff484c9a1392029c0d89b7d75c635109
|
/w2.py
|
a646fbf144331bf4c158486f8ac042b7bc0b6eec
|
[] |
no_license
|
shiyujiucsb/wmdnew
|
77f2af4b4efc25507895d19fda6f8e87dda2d673
|
9f751202f780147fcfb6e0fb8d00ad86fa6bf152
|
refs/heads/master
| 2021-01-11T23:28:26.260917
| 2017-01-16T10:07:46
| 2017-01-16T10:07:46
| 78,588,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,965
|
py
|
#start
import pdb, sys, numpy as np, pickle, multiprocessing as mp
load_file = sys.argv[1]
save_file = sys.argv[2]
with open(load_file) as f:
[X, BOW_X, y, C, words] = pickle.load(f)
n = np.shape(X)
n = n[0]
D = np.zeros((n,n))
for i in xrange(n):
bow_i = BOW_X[i]
bow_i = bow_i / np.sum(bow_i)
bow_i = bow_i.tolist()
BOW_X[i] = bow_i
X_i = X[i].T
X_i = X_i.tolist()
X[i] = X_i
nBuckets = 20
nDim = len(X[0][0])
import random as r
r.seed(10)
LSH = []
for i in range(nBuckets):
vec = []
for j in range(nDim):
vec.append(r.uniform(-1,1))
LSH.append(vec)
def dot(v, u):
res = 0.0
for i in range(len(v)):
res += v[i] * u[i]
return res
def sign(V):
sig = ""
for i in range(len(LSH)):
if dot(V, LSH[i]) > 0:
sig+="1"
else:
sig+="0"
return int(sig, 2)
def helper(X1, BOW1, X2, BOW2):
slots1 = {}
slots2 = {}
for i in range(len(X1)):
s = sign(X1[i])
if s in slots1:
slots1[s] += BOW1[i]
else:
slots1[s] = BOW1[i]
for i in range(len(X2)):
s = sign(X2[i])
if s in slots2:
slots2[s] += BOW2[i]
else:
slots2[s] = BOW2[i]
res = 0.0
for k in slots1.keys():
if k in slots2:
res += slots1[k] * slots2[k]
return res
def get_wmd(ix):
n = np.shape(X)
n = n[0]
Di = np.zeros((1,n))
i = ix
#print '%d out of %d' % (i, n)
for j in xrange(i):
#Di[0,j] = emd( (X[i], BOW_X[i]), (X[j], BOW_X[j]), distance)
Di[0,j] = helper(X[i], BOW_X[i], X[j], BOW_X[j])
#if Di[0,j]>0.4 and min(len(X[i]), len(X[j])) >10:
#print(ix," and ",j,": ", Di[0, j])
return Di
def kNN(D, C, k):
test = []
train = []
for i in range(len(C)):
p = r.random();
if (p>0.5):
test.append(i)
else:
train.append(i)
k = min(k, len(train))
success = 0
for i in test:
res = train[:]
res.sort(key=lambda j: (-D[i][j] if i<j else -D[j][i]))
votes = {}
for j in range(k):
if C[res[j]][0] in votes:
votes[C[res[j]].split()[0]] += 1
else:
votes[C[res[j]].split()[0]] = 1
maxVotes = 0
maxClass = ""
for key in votes.keys():
if maxVotes < votes[key]:
maxVotes = votes[key]
maxClass = key
if maxClass == C[i].split()[0]:
success += 1
print(success*1.0/len(test))
def main():
n = np.shape(X)
n = n[0]
pool = mp.Pool(processes=8)
pool_outputs = pool.map(get_wmd, list(range(n)))
pool.close()
pool.join()
WMD_D = np.zeros((n,n))
for i in xrange(n):
WMD_D[:,i] = pool_outputs[i]
kNN(WMD_D, C, 10)
with open(save_file, 'w') as f:
pickle.dump(WMD_D, f)
if __name__ == "__main__":
main()
|
[
"shiyu@cs.ucsb.edu"
] |
shiyu@cs.ucsb.edu
|
c7224b78c1a6f736145512b1515152716e084fb0
|
7a63ce94e1806a959c9c445c2e0bae95afb760c8
|
/tests/user/test_update_credentials.py
|
0525fc1882db4236ea941f77e653a698474a366a
|
[
"MIT"
] |
permissive
|
pklauke/pycamunda
|
20b54ceb4a40e836148e84912afd04d78d6ba0ec
|
3faac4037212df139d415ee1a54a6594ae5e9ac5
|
refs/heads/master
| 2023-08-18T10:23:30.503737
| 2022-04-17T18:34:40
| 2022-04-17T18:34:40
| 240,333,835
| 40
| 16
|
MIT
| 2023-09-12T13:29:08
| 2020-02-13T18:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.base
import pycamunda.user
import pycamunda.resource
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_update_credentials_params(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
assert update_credentials.url == engine_url + '/user/janedoe/credentials'
assert update_credentials.query_parameters() == {}
assert update_credentials.body_parameters() == {
'password': 'password',
'authenticatedUserPassword': 'password'
}
@unittest.mock.patch('requests.Session.request')
def test_update_credentials_calls_requests(mock, engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
update_credentials()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'PUT'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_update_credentials_raises_pycamunda_exception(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
with pytest.raises(pycamunda.PyCamundaException):
update_credentials()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_update_credentials_raises_for_status(mock, engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
update_credentials()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_update_credentials_returns_none(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
result = update_credentials()
assert result is None
|
[
"peter.klauke@tu-dortmund.de"
] |
peter.klauke@tu-dortmund.de
|
e7d1aa876bce599d3252b04fd4e5cbed615be91c
|
35b78703e93dcd621a06308ff889339a18230898
|
/data/graph_loading.py
|
c24a902097d904cf9c67a56a9cdf487a548256c0
|
[] |
no_license
|
mbajaj01/CS534L-Social-Networks-Project
|
6b22986927cef602a077acb0e26f4c6b101dfcef
|
9dd8dbcf57257dba94f1390a7808f7bb63396f7e
|
refs/heads/master
| 2020-03-21T06:44:37.286410
| 2018-01-20T16:50:47
| 2018-01-20T16:50:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
import csv
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
csv_path = "digg_friends.csv"
csv_path = "digg_friends_pruned.csv"
csv_path = "digg_simar.csv"
def load_graph(csv_path):
G = nx.DiGraph()
with open(csv_path, "rb") as f_obj:
reader = csv.reader(f_obj)
flag=1
for row in reader:
if flag==1:
flag=0
continue
mutual = int(row[0].strip())
timestamp = row[1]
user_id = int(row[2])
friend_id = int(row[3])
prob1 = random.random()
prob2 = random.random()
if mutual==0:
G.add_edge(user_id, friend_id,prob=prob1)
elif mutual==1:
G.add_edge(user_id, friend_id,prob = prob1)
G.add_edge(friend_id, user_id,prob = prob2)
else:
print mutual
print user_id
print friend_id
print 'mutual value range out of bound'
return G
nx.draw(G, with_labels=True, font_weight='bold')
plt.show()
|
[
"simar.i3r@gmail.com"
] |
simar.i3r@gmail.com
|
e5c0db606e3376fbcaeddcc9fd05a7df98f0dc5d
|
e03f5e95619f7ea81a8990371f7859064fab9987
|
/regulaFalsiModificado.py
|
3efd6422e8fa834f73d41e42339ac4a93d04e0a0
|
[] |
no_license
|
josera21/numericalProgramming
|
b4e741978ce7a625bde24b22880fea2e4f739848
|
f1dc0a128f6a3806310d86b661758aa73289b259
|
refs/heads/master
| 2020-04-17T10:04:38.024009
| 2019-01-29T05:25:05
| 2019-01-29T05:25:05
| 166,486,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# -*- coding: utf-8 -*-
def regulaFalsiModificado(a, b, tol, n):
fa = function(a)
fb = function(b)
fc = fa
c = a
ant = float("inf") # Para la primera iteracion
i = 0
ia = 0
ib = 0
while i < n and abs(c - ant) > tol:
i += 1
ant = c
c = b - fb*((b - a) / (fb - fa))
fc = function(c)
if ((fa * fc) > 0):
a = c
fa = fc
ia = 0
ib += 1
if ib >= 2:
fb = fb / 2
else:
b = c
fb = fc
ib = 0
ia += 1
if ia >= 2:
fa = fa / 2
if abs(c - ant) <= tol:
print "En {0} iteraciones".format(i)
print "Solución: {0}".format(c)
else:
print("Error")
def function(x):
# Comente o descomente segun la funcion que quiera usar
return (-0.4)*(pow(x, 2)) + (2.2)*(x) + 4.7
def capturarDatos():
print("-- Inserte los valores requeridos --")
a = float(raw_input("Valor de a: "))
b = float(raw_input("Valor de b: "))
tol = float(raw_input("Tolerancia: "))
n = float(raw_input("Cantidad maxima de iteraciones: "))
regulaFalsiModificado(a, b, tol, n)
if __name__ == "__main__":
resp = raw_input("¿ Desea colocar sus propios datos ? S/N: ")
if resp.lower() == "s":
capturarDatos()
else:
regulaFalsiModificado(-2, 1.5, 0.05, 30)
|
[
"jochix21@gmail.com"
] |
jochix21@gmail.com
|
65da08b0f3c75f793eca363ec016e0441370c495
|
a47ac7c64cb6bb1f181eadff8e4b24735c19080a
|
/PythonStudy/9-Tkinter/4-Entry.py
|
fc6d9a973f75667cf9bcbae7cca69b495df559b5
|
[
"MIT"
] |
permissive
|
CoderTitan/PythonDemo
|
6dcc88496b181df959a9d43b963fe43a6e4cb032
|
feb5ef8be91451b4622764027ac684972c64f2e0
|
refs/heads/master
| 2020-03-09T09:15:28.299827
| 2018-08-21T03:43:25
| 2018-08-21T03:43:25
| 128,708,650
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
# 主窗口
import tkinter
# 验证输入的文字
def varileText():
text = entry4.get()
if text != '1':
print('对喽')
return True
print('错漏')
return False
#
def testInvaild():
print('invaildCommanf被调用')
return True
# 创建主窗口
window = tkinter.Tk()
# 设置标题
window.title('Titanjun')
# 设置窗口大小
window.geometry('400x400')
button = tkinter.Button(window, text='Titan', bg='#ff4040')
button.pack()
'''
输入控件
用于显示简单的文本内容
'''
vari = tkinter.Variable()
entry = tkinter.Entry(window, textvariable=vari)
entry.pack()
# 设置值
vari.set('very good')
# 取值
print(vari.get())
print(entry.get())
# 只读输入框
vari2 = tkinter.Variable()
entry2 = tkinter.Entry(window, textvariable=vari2, state='disabled')
entry2.pack()
# 设置值
vari2.set('very bad')
print(vari2.get())
# 密码输入框, 无论输入什么都显示密文
vari3 = tkinter.Variable()
entry3 = tkinter.Entry(window, textvariable=vari3, show='@', bg='red', fg='white')
entry3.pack()
# 验证输入的内容是否符合要求
vari4 = tkinter.Variable()
entry4 = tkinter.Entry(window, textvariable=vari4, validate='key', validatecommand=varileText, invalidcommand=testInvaild)
entry4.pack()
# 进入消息循环
window.mainloop()
|
[
"quanjunt@163.com"
] |
quanjunt@163.com
|
71b3f01b9a0d842a63ee19689ad6994894a5ef8c
|
e47747c2ec6061ff19f5c4bd7ac63c2bbcfad018
|
/test_bangalore_slots_finder.py
|
fc975e51f7a2a6f61d89ff57a744ce76f02d5707
|
[] |
no_license
|
shivamsachit/setu-api
|
0ef72f890d63bc6a4e1e75c9728e66d0b2283f27
|
90df25407de04ada7f87121993ca9f875fad4a29
|
refs/heads/main
| 2023-06-29T12:05:52.028716
| 2021-07-31T13:49:38
| 2021-07-31T13:49:38
| 390,598,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,586
|
py
|
'''
Tests for COVID vaccination slots finder
This module contains tests for the COVID vaccination slots finder
Contains the following classes:
TestAllSuccess:
func test_no_slots
func test_one_slot
func test_three_slots
TestSomeTimeouts:
func test_no_slots_in_successful_calls
func test_three_slots_in_successful_calls
TestSomeErrors:
func test_no_slots_in_successful_calls
func test_three_slots_in_successful_calls
TestAllFailures:
func test_failure
TestAllTimeouts:
func test_timeouts
Can be run by invoking `pytest`
'''
import os
from datetime import datetime, timedelta
from unittest import mock
import pytest
import requests
from bangalore_slots_finder import get_bangalore_vaccine_slots
from fixtures import SUCCESS_WITH_1_SLOT, SUCCESS_WITH_3_SLOTS, SUCCESS_WITH_NO_SLOT
ERROR_STATUS_CODES = [500, 502, 503, 504, 520, 522, 524]
def mocked_get_bangalore_vaccine_slots(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
CASE_MAPPING = {
'1': ([MockResponse(SUCCESS_WITH_NO_SLOT, 200)], [], []),
'2': ([MockResponse(SUCCESS_WITH_1_SLOT, 200)], [], []),
'3': ([MockResponse(SUCCESS_WITH_3_SLOTS, 200)], [], []),
'4': ([MockResponse(SUCCESS_WITH_NO_SLOT, 200)], [], [requests.Timeout('timeout')]),
'5': ([MockResponse(SUCCESS_WITH_3_SLOTS, 200)], [], [requests.Timeout('timeout')]),
'6': (
[MockResponse(SUCCESS_WITH_NO_SLOT, 200)],
[requests.RequestException('exception')],
[],
),
'7': (
[MockResponse(SUCCESS_WITH_3_SLOTS, 200)],
[requests.RequestException('exception')],
[],
),
'8': (
[
MockResponse({}, 500),
MockResponse({}, 500),
MockResponse({}, 500),
],
[requests.RequestException('exception'), requests.RequestException('exception')],
[],
),
'9': (
[],
[],
[requests.Timeout('timeout'), requests.Timeout('timeout'), requests.Timeout('timeout')],
),
}
return CASE_MAPPING.get(kwargs['case'])
@pytest.fixture(scope='class')
def data(request):
'''
PyTest fixture for using the same date across all tests
'''
request.cls.date = os.getenv(
'DATE', (datetime.today() + timedelta(days=1)).strftime('%d-%m-%Y')
)
@pytest.mark.usefixtures("data")
class TestAllSuccess:
"""
Class containing all methods intended to run tests for when all requests are completed succesfully
"""
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_no_slots(self, mock_get):
'''
Function to run test to check if no slots were found
'''
# mock_get.return_value = {"sessions": [{'slots': []}]}
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '530068', case='1')
assert bool(results) == True and bool(exceptions) == False and bool(timeouts) == False
slots = []
for response in results:
print(response)
assert response.status_code == 200
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
assert len(slots) == 0
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_one_slot(self, mock_get):
'''
Function to run test to check if 1 slot was found
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '560034', case='2')
assert bool(results) == True and bool(exceptions) == False and bool(timeouts) == False
slots = []
for response in results:
assert response.status_code == 200
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
assert len(slots) == 1
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_three_slots(self, mock_get):
'''
Function to run test to check if 3 slots were found
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '560034', case='3')
assert bool(results) == True and bool(exceptions) == False and bool(timeouts) == False
slots = []
for response in results:
assert response.status_code == 200
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
assert len(slots) == 3
@pytest.mark.usefixtures("data")
class TestSomeTimeouts:
"""
Class containing all methods intended to run tests for when some of the requests timeout
"""
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_no_slots_in_successful_calls(self, mock_get):
'''
Function to run test to check if no slots were found in any successful calls, along with some requests that timed out
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '530068', case='4')
assert bool(results) == True and bool(exceptions) == False and bool(timeouts) == True
slots = []
for response in results:
assert response.status_code == 200
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
assert len(slots) == 0
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_three_slots_in_successful_calls(self, mock_get):
'''
Function to run test to check if 3 slots were found in any successful calls, along with some requests that timed out
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '560034', case='5')
assert bool(results) == True and bool(exceptions) == False and bool(timeouts) == True
slots = []
for response in results:
assert response.status_code == 200
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
assert len(slots) == 3
@pytest.mark.usefixtures("data")
class TestSomeErrors:
"""
Class containing all methods intended to run tests for when some of the requests return a 5xx status code
"""
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_no_slots_in_successful_calls(self, mock_get):
'''
Function to run test to check if no slots were found in any successful calls, along with some requests that threw a 5xx status code
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '530068', case='6')
assert bool(results) == True and bool(exceptions) == True and bool(timeouts) == False
slots = []
for response in results:
if response.status_code == 200:
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
else:
assert response.status_code in ERROR_STATUS_CODES
assert len(slots) == 0
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_three_slots_in_successful_calls(self, mock_get):
'''
Function to run test to check if 3 slots were found in any successful calls, along with some requests that threw a 5xx status code
'''
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '560034', case='7')
assert bool(results) == True and bool(exceptions) == True and bool(timeouts) == False
slots = []
for response in results:
if response.status_code == 200:
response_json = response.json()
locations = response_json.get('sessions', [])
for location in locations:
slots.extend(location.get('slots', []))
else:
assert response.status_code in ERROR_STATUS_CODES
assert len(slots) == 3
@pytest.mark.usefixtures("data")
class TestAllFailures:
"""
Class containing all methods intended to run tests for when all of the requests return a 5xx status code
"""
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_failure(self, mock_get):
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '530068', case='8')
assert bool(exceptions) == True and bool(timeouts) == False
for response in results:
assert response.status_code in ERROR_STATUS_CODES
@pytest.mark.usefixtures("data")
class TestAllTimeouts:
"""
Class containing all methods intended to run tests for when all of the requests timeout
"""
@mock.patch(
'test_bangalore_slots_finder.get_bangalore_vaccine_slots',
side_effect=mocked_get_bangalore_vaccine_slots,
)
def test_timeouts(self, mock_get):
results, exceptions, timeouts = get_bangalore_vaccine_slots(self.date, '530068', case='9')
assert bool(results) == False and bool(exceptions) == False and bool(timeouts) == True
|
[
"sachit.shivam@codemonk.in"
] |
sachit.shivam@codemonk.in
|
7a281220e87feece6d79b633cfd9a907a2a6ad8b
|
e8493c6f2eebb021ea77f28a21fa19d84a6088cd
|
/four_number_game/lib/utilities.py
|
df345050df29c861605876f1b9043c63a3fc01ca
|
[] |
no_license
|
facu077/four-number-game
|
606c93b5b9397f1c0d00e6d9651f124d747dfba7
|
0ad9a7fa2789bc33842d042794a17a9bfc61ce5f
|
refs/heads/master
| 2020-05-20T17:17:51.674634
| 2019-05-27T19:40:49
| 2019-05-27T19:40:49
| 185,685,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
import random
class Comparator:
def __init__(self):
self.regular = 0
self.good = 0
def compare_numbers(self, first_number, second_number):
self.regular = 0
self.good = 0
# We are going to use lists since is more easy to manipulate
first_number_list = list(first_number)
second_number_list = list(second_number)
# First we go through the two numbers at the same time
# and remove the matches from the lists to prevent duplicates
for first_digit, second_digit in zip(first_number, second_number):
if first_digit == second_digit:
self.good += 1
first_number_list.remove(first_digit)
second_number_list.remove(first_digit)
# Then we go through the rest of the list looking for regulars removing when found one to prevent duplicates
for digit in first_number_list:
if digit in second_number_list:
self.regular += 1
second_number_list.remove(digit)
def generate_number():
number = '0'
# We have to check that the numbers doesn't start with 0
while number[0] == '0':
number = ''.join(random.sample("0123456789", 4))
return number
|
[
"facu.m077@gmail.com"
] |
facu.m077@gmail.com
|
7c45869cc9da04563cf20da243c4ea1b766b1fd7
|
d571331f8cbcae66fdb5d9b3314d61d27b8e5cb3
|
/mysite/article/migrations/0004_article_is_delete.py
|
4d2989b677a114e2c34de1af25f608b32deac709
|
[] |
no_license
|
ScnuWang/Django_Bilibili
|
cfb066018f9a1a20bd86e7529b3ac1b093feb9ae
|
495fb37e6c7139f700e1d7434c1ac29b0aa75490
|
refs/heads/master
| 2020-03-20T23:31:16.342638
| 2018-06-29T08:48:40
| 2018-06-29T08:48:40
| 137,850,235
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# Generated by Django 2.0.6 on 2018-06-19 16:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0003_article_auther'),
]
operations = [
migrations.AddField(
model_name='article',
name='is_delete',
field=models.BooleanField(default=False),
),
]
|
[
"scnu_wang@163.com"
] |
scnu_wang@163.com
|
d3d2478915380b6f8d4f5778c5babd647003d786
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/show_instance_result_response.py
|
89a066b6d19712691fb0599b6d0fc736ad86c3d5
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,168
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceResultResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'resources': 'list[SubInstanceResult]'
}
attribute_map = {
'count': 'count',
'resources': 'resources'
}
def __init__(self, count=None, resources=None):
"""ShowInstanceResultResponse
The model defined in huaweicloud sdk
:param count: 总数量
:type count: int
:param resources: resources
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
super(ShowInstanceResultResponse, self).__init__()
self._count = None
self._resources = None
self.discriminator = None
if count is not None:
self.count = count
if resources is not None:
self.resources = resources
@property
def count(self):
"""Gets the count of this ShowInstanceResultResponse.
总数量
:return: The count of this ShowInstanceResultResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ShowInstanceResultResponse.
总数量
:param count: The count of this ShowInstanceResultResponse.
:type count: int
"""
self._count = count
@property
def resources(self):
"""Gets the resources of this ShowInstanceResultResponse.
resources
:return: The resources of this ShowInstanceResultResponse.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ShowInstanceResultResponse.
resources
:param resources: The resources of this ShowInstanceResultResponse.
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceResultResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
98c07bc117e7b12342e061b4f05594cc973f2a69
|
a5ac50432e169e39d9854a045da6f7ec8b29a997
|
/MongoTest/settings.py
|
2dbe180fb431b80a6d9801d574cef17fdeb44240
|
[] |
no_license
|
burritorepo/Django-MongoDB
|
4c8516d2c24b6644c7b2f53156ff9124a9032f46
|
4200bac5f8437d6260924038f2f5968a3d87e5f7
|
refs/heads/master
| 2020-06-16T11:30:44.109009
| 2019-05-20T04:59:09
| 2019-05-20T04:59:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
"""
Django settings for MongoTest project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import mongoengine
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c4!mc+h+m)u2=#z#ef1f=+*5^yxrwnr+q1^9df3r0hl!@oz9qq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_mongoengine'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MongoTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MongoTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
_MONGODB_USER = 'hacodewr'
_MONGODB_PASSWD = 'abc123'
_MONGODB_HOST = 'localhost'
_MONGODB_NAME = 'admin'
_MONGODB_DATABASE_HOST = \
'mongodb://%s:%s@%s/%s' \
% (_MONGODB_USER, _MONGODB_PASSWD, _MONGODB_HOST, _MONGODB_NAME)
mongoengine.connect(_MONGODB_NAME, host= _MONGODB_DATABASE_HOST)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"hacodeml@gmail.com"
] |
hacodeml@gmail.com
|
b2741fa2aa47d2ca507a4a587d78662b490be852
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/jacobi-2d/tmp_files/4634.py
|
598e8470565aa941811dde2f95b33c4baece406f
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046
| 2019-12-04T23:25:06
| 2019-12-04T23:25:06
| 225,975,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-2d/tmp_files/4634.c')
procedure('kernel_jacobi_2d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(0,4,64,4)
tile(1,2,16,2)
tile(1,4,64,4)
|
[
"nashenruoyang@163.com"
] |
nashenruoyang@163.com
|
b85e6af344facb6e0df6e9ed8dff20da26f7144a
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/merge-strings-alternately.py
|
107572aa3949742adfc4813ca836790e9dbcd7cc
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
# Time: O(m + n)
# Space: O(1)
class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
result = []
i = 0
while i < len(word1) or i < len(word2):
if i < len(word1):
result.append(word1[i])
if i < len(word2):
result.append(word2[i])
i += 1
return "".join(result)
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
1a62fead2b8972b791603ecd96b75643fdc06101
|
2da14d080bf2e54b13b8b1b23aface9b755f94f0
|
/zhichen/items.py
|
559c21abeb923bdac3a8d22ccfe79ecef1bbb05c
|
[] |
no_license
|
efdssdf/zhichen
|
82bf6fe38d59b8ebc4f90e7c84709a3e6fee59d1
|
0a76d507af85b1efd7676aeafac6d2cba48b5e5f
|
refs/heads/master
| 2020-04-08T07:36:23.988586
| 2018-11-26T09:40:28
| 2018-11-26T09:40:28
| 159,144,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhichenItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"efdssdf@gmail.com"
] |
efdssdf@gmail.com
|
ca17fee06b16873c1bf01a9602a2b6e6347d8b01
|
f675a690b62250847b514ace399c2bb7860528f9
|
/ZIFS.py
|
b0e5818588dee37abcd7d781d37fcfa637c0c83b
|
[] |
no_license
|
adkingston/final-project-programs
|
a30b5bb5abcfbb4e95d19030c1e4ab2ec05c5034
|
dd7db1a4484194162f756ae702743a05f7c7cd53
|
refs/heads/master
| 2021-01-13T10:14:31.507196
| 2017-08-18T16:56:21
| 2017-08-18T16:56:21
| 69,599,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pylab as pyl
D, R = np.arange(0.0, 1.0+1e-7, 0.1), np.arange(0.0, 2.0+1.e-7, 0.11)
A = [[a, b] for a in D for b in R]
def z1(s):
return [[0.25*x[0], 0.5*x[1]] for x in s]
def z2(s):
return [[-0.25*x[0]+0.5, -0.5*x[1]+2] for x in s]
def z3(s):
return [[-0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def z4(s):
return [[0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def iterations(ifs, seed, steps):
assert isinstance(ifs, list)
if steps < 1:
return seed
else:
next_step = []
for func in ifs:
next_step += func(seed)
next_step = iterations(ifs, next_step, steps-1)
return next_step
a = [[2., 3.]]
A1 = iterations([z1, z2, z3, z4], a, 7)
X1 = [z[0] for z in A1]
Y1 = [z[1] for z in A1]
# # # fig = plt.figure()
plt.plot(X1, Y1, 'bo', markersize=1, markeredgewidth=0.1)
pyl.show()
# fig.savefig("C:\\Users\\Alexander\\OneDrive\\Documents\\School
# \\University of St. Andrews\\Year 4\\MT4599
# Dissertation\\Main Document\\images\\A6.png")
# def hausdorff_dist(A, B):
# dists = []
# temp = []
# for a in A:
# for b in B:
# d = math.sqrt(abs(a[0] - b[0])**2 + abs(a[1] - b[1])**2)
# temp.append(d)
# dists.append(min(temp))
# temp = []
# return max(dists)
|
[
"noreply@github.com"
] |
adkingston.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.