graph_def_file = “model_name.pb” input_arrays = [“input_tensor_name”] # this array can have more than one input name if the model requires multiple inputs output_arrays = [“output_tensor_name”] # this array can have more than one input name if the model has multiple outputs converter = lite.TFLiteConverter.from_frozen_graph( graph_def_file, input_arrays, output_arrays) tflite_model = converter.convert() open("converted_model.tflite", "wb").write(tflite_model)
// create a model interpreter for local model (bundled with app) FirebaseModelOptions modelOptions = new FirebaseModelOptions.Builder() .setLocalModelName(“model_name”) .build(); modelInterpreter = FirebaseModelInterpreter.getInstance(modelOptions); // specify input output details for the model // SqueezeNet architecture uses 227 x 227 image as input modelInputOutputOptions = new FirebaseModelInputOutputOptions.Builder() .setInputFormat(0, FirebaseModelDataType.FLOAT32, new int[]{1, 227, 227, 3}) .setOutputFormat(0, FirebaseModelDataType.FLOAT32, new int[]{1, numLabels}) .build(); // create input data FirebaseModelInputs input = new FirebaseModelInputs.Builder().add(imgDataArray).build(); // imgDataArray is a float[][][][] array of (1, 227, 227, 3) // run inference modelInterpreter.run(input, modelInputOutputOptions);