openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/metadata.json
1.9 KB · 70 lines · json Raw
1 [
2 {
3 "metadataOutputVersion" : "3.0",
4 "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
5 "outputSchema" : [
6 {
7 "hasShapeFlexibility" : "0",
8 "isOptional" : "0",
9 "dataType" : "Float16",
10 "formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
11 "shortDescription" : "",
12 "shape" : "[1, 768, 1, 1500]",
13 "name" : "encoder_output_embeds",
14 "type" : "MultiArray"
15 }
16 ],
17 "modelParameters" : [
18
19 ],
20 "specificationVersion" : 7,
21 "mlProgramOperationTypeHistogram" : {
22 "Ios16.softmax" : 12,
23 "Ios16.add" : 99,
24 "Ios16.mul" : 12,
25 "Ios16.constexprSparseToDense" : 72,
26 "Ios16.constexprLutToDense" : 74,
27 "Ios16.batchNorm" : 25,
28 "Ios16.gelu" : 14,
29 "Ios16.reshape" : 48,
30 "Ios16.matmul" : 24,
31 "Ios16.layerNorm" : 25,
32 "Ios16.conv" : 148
33 },
34 "computePrecision" : "Mixed (Float16, Int32)",
35 "isUpdatable" : "0",
36 "stateSchema" : [
37
38 ],
39 "availability" : {
40 "macOS" : "13.0",
41 "tvOS" : "16.0",
42 "visionOS" : "1.0",
43 "watchOS" : "9.0",
44 "iOS" : "16.0",
45 "macCatalyst" : "16.0"
46 },
47 "modelType" : {
48 "name" : "MLModelType_mlProgram"
49 },
50 "userDefinedMetadata" : {
51 "com.github.apple.coremltools.source_dialect" : "TorchScript",
52 "com.github.apple.coremltools.source" : "torch==2.6.0",
53 "com.github.apple.coremltools.version" : "8.2"
54 },
55 "inputSchema" : [
56 {
57 "hasShapeFlexibility" : "0",
58 "isOptional" : "0",
59 "dataType" : "Float16",
60 "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
61 "shortDescription" : "",
62 "shape" : "[1, 80, 1, 3000]",
63 "name" : "melspectrogram_features",
64 "type" : "MultiArray"
65 }
66 ],
67 "generatedClassName" : "AudioEncoder_mixedBitPalettized_4_bit",
68 "method" : "predict"
69 }
70 ]