| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import CoreML |
| |
|
| |
|
| | |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | public class CLIP_ImageEncoderInput : MLFeatureProvider { |
| |
|
| | |
| | |
| | public var image: CVPixelBuffer |
| |
|
| | public var featureNames: Set<String> { ["image"] } |
| |
|
| | public func featureValue(for featureName: String) -> MLFeatureValue? { |
| | if featureName == "image" { |
| | return MLFeatureValue(pixelBuffer: image) |
| | } |
| | return nil |
| | } |
| |
|
| | public init(image: CVPixelBuffer) { |
| | self.image = image |
| | } |
| |
|
| | public convenience init(imageWith image: CGImage) throws { |
| | self.init(image: try MLFeatureValue(cgImage: image, pixelsWide: 224, pixelsHigh: 224, pixelFormatType: kCVPixelFormatType_32ARGB, options: nil).imageBufferValue!) |
| | } |
| |
|
| | public convenience init(imageAt image: URL) throws { |
| | self.init(image: try MLFeatureValue(imageAt: image, pixelsWide: 224, pixelsHigh: 224, pixelFormatType: kCVPixelFormatType_32ARGB, options: nil).imageBufferValue!) |
| | } |
| |
|
| | func setImage(with image: CGImage) throws { |
| | self.image = try MLFeatureValue(cgImage: image, pixelsWide: 224, pixelsHigh: 224, pixelFormatType: kCVPixelFormatType_32ARGB, options: nil).imageBufferValue! |
| | } |
| |
|
| | func setImage(with image: URL) throws { |
| | self.image = try MLFeatureValue(imageAt: image, pixelsWide: 224, pixelsHigh: 224, pixelFormatType: kCVPixelFormatType_32ARGB, options: nil).imageBufferValue! |
| | } |
| |
|
| | } |
| |
|
| |
|
| | |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | public class CLIP_ImageEncoderOutput : MLFeatureProvider { |
| |
|
| | |
| | private let provider : MLFeatureProvider |
| |
|
| | |
| | |
| | public var var_1240: MLMultiArray { |
| | provider.featureValue(for: "var_1240")!.multiArrayValue! |
| | } |
| |
|
| | |
| | |
| | public var var_1240ShapedArray: MLShapedArray<Float> { |
| | MLShapedArray<Float>(var_1240) |
| | } |
| |
|
| | public var featureNames: Set<String> { |
| | provider.featureNames |
| | } |
| |
|
| | public func featureValue(for featureName: String) -> MLFeatureValue? { |
| | provider.featureValue(for: featureName) |
| | } |
| |
|
| | public init(var_1240: MLMultiArray) { |
| | self.provider = try! MLDictionaryFeatureProvider(dictionary: ["var_1240" : MLFeatureValue(multiArray: var_1240)]) |
| | } |
| |
|
| | public init(features: MLFeatureProvider) { |
| | self.provider = features |
| | } |
| | } |
| |
|
| |
|
| | |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | @available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, visionOS 1.0, *) |
| | public class CLIP_ImageEncoder { |
| | public let model: MLModel |
| |
|
| | |
| | |
| | public class var urlOfModelInThisBundle : URL { |
| | let bundle = Bundle(for: self) |
| | return bundle.url(forResource: "CLIP_ImageEncoder", withExtension:"mlmodelc")! |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public init(model: MLModel) { |
| | self.model = model |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public convenience init(configuration: MLModelConfiguration = MLModelConfiguration()) throws { |
| | try self.init(contentsOf: type(of:self).urlOfModelInThisBundle, configuration: configuration) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public convenience init(contentsOf modelURL: URL) throws { |
| | try self.init(model: MLModel(contentsOf: modelURL)) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public convenience init(contentsOf modelURL: URL, configuration: MLModelConfiguration) throws { |
| | try self.init(model: MLModel(contentsOf: modelURL, configuration: configuration)) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public class func load(configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_ImageEncoder, Error>) -> Void) { |
| | load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration, completionHandler: handler) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public class func load(configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_ImageEncoder { |
| | try await load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<CLIP_ImageEncoder, Error>) -> Void) { |
| | MLModel.load(contentsOf: modelURL, configuration: configuration) { result in |
| | switch result { |
| | case .failure(let error): |
| | handler(.failure(error)) |
| | case .success(let model): |
| | handler(.success(CLIP_ImageEncoder(model: model))) |
| | } |
| | } |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> CLIP_ImageEncoder { |
| | let model = try await MLModel.load(contentsOf: modelURL, configuration: configuration) |
| | return CLIP_ImageEncoder(model: model) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public func prediction(input: CLIP_ImageEncoderInput) throws -> CLIP_ImageEncoderOutput { |
| | try prediction(input: input, options: MLPredictionOptions()) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public func prediction(input: CLIP_ImageEncoderInput, options: MLPredictionOptions) throws -> CLIP_ImageEncoderOutput { |
| | let outFeatures = try model.prediction(from: input, options: options) |
| | return CLIP_ImageEncoderOutput(features: outFeatures) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | @available(macOS 14.0, iOS 17.0, tvOS 17.0, watchOS 10.0, visionOS 1.0, *) |
| | public func prediction(input: CLIP_ImageEncoderInput, options: MLPredictionOptions = MLPredictionOptions()) async throws -> CLIP_ImageEncoderOutput { |
| | let outFeatures = try await model.prediction(from: input, options: options) |
| | return CLIP_ImageEncoderOutput(features: outFeatures) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public func prediction(image: CVPixelBuffer) throws -> CLIP_ImageEncoderOutput { |
| | let input_ = CLIP_ImageEncoderInput(image: image) |
| | return try prediction(input: input_) |
| | } |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | public func predictions(inputs: [CLIP_ImageEncoderInput], options: MLPredictionOptions = MLPredictionOptions()) throws -> [CLIP_ImageEncoderOutput] { |
| | let batchIn = MLArrayBatchProvider(array: inputs) |
| | let batchOut = try model.predictions(from: batchIn, options: options) |
| | var results : [CLIP_ImageEncoderOutput] = [] |
| | results.reserveCapacity(inputs.count) |
| | for i in 0..<batchOut.count { |
| | let outProvider = batchOut.features(at: i) |
| | let result = CLIP_ImageEncoderOutput(features: outProvider) |
| | results.append(result) |
| | } |
| | return results |
| | } |
| | } |
| |
|