I have trained a model to differentiate between malignant and benign skin lesions to potentially detect if a patient has skin cancer, and have converted my keras model to coreML. Now I am trying to apply my model to an ios app using swift (through Xcode) which I have no experience in at all (still learning through trial and error).
Currently I am trying to get the model working through a simple app that just takes an image from the phone's camera to get a predicted label as output, but I am quite stuck in getting the camera to actually work to do just that.
import UIKit
import CoreML
import Vision
import Social
@UIApplicationMain
class ViewControl: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIApplicationDelegate {
@IBOutlet weak var imageView: UIImageView!
var classificationResults : [VNClassificationObservation] = []
let imagePicker = UIImagePickerController()
override func viewDidLoad() {
super.viewDidLoad()
imagePicker.delegate = self
}
func detect(image: CIImage) {
// Load the ML model through its generated class
guard let model = try? VNCoreMLModel(for: weights_skin_cancer().model) else {
fatalError("can't load ML model")
}
let request = VNCoreMLRequest(model: model) { request, error in
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first
else {
fatalError("unexpected result type from VNCoreMLRequest")
}
if topResult.identifier.contains("malignant") {
DispatchQueue.main.async {
self.navigationItem.title = "mal!"
self.navigationController?.navigationBar.barTintColor = UIColor.green
self.navigationController?.navigationBar.isTranslucent = false
}
}
else {
DispatchQueue.main.async {
self.navigationItem.title = "benign!"
self.navigationController?.navigationBar.barTintColor = UIColor.red
self.navigationController?.navigationBar.isTranslucent = false
}
}
}
let handler = VNImageRequestHandler(ciImage: image)
do { try handler.perform([request]) }
catch { print(error) }
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let image = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
imageView.image = image
imagePicker.dismiss(animated: true, completion: nil)
guard let ciImage = CIImage(image: image) else {
fatalError("couldn't convert uiimage to CIImage")
}
detect(image: ciImage)
}
}
@IBAction func cameraTapped(_ sender: Any) {
imagePicker.sourceType = .camera
imagePicker.allowsEditing = false
present(imagePicker, animated: true, completion: nil)
}
}
Here's also the code used to convert my model to coreML for reference:
import coremltools
output_labels = ['benign', 'malignant']
scale = 1/255.
coreml_model = coremltools.converters.keras.convert('/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/lr_0.00006-400_DS-20_epochs/weights.best.from_scratch.6.hdf5',
input_names='image',
image_input_names='image',
output_names='output',
class_labels=output_labels,
image_scale=scale)
coreml_model.author = 'Jack Bugeja'
coreml_model.short_description = 'Model used to identify between benign and malignant skin lesions'
coreml_model.input_description['image'] = 'Dermascopic image of skin lesion to evaluate'
coreml_model.input_description['output'] = 'Malignant/Benign'
coreml_model.save(
'/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/model_for_ios/lr_0.00006-400_DS-20_epochs/weights_skin_cancer.mlmodel')
Any help in general would be highly appreciate. Thanks!