openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json
1.8 KB · 67 lines · json Raw
1 [
2 {
3 "metadataOutputVersion" : "3.0",
4 "storagePrecision" : "Float16",
5 "outputSchema" : [
6 {
7 "hasShapeFlexibility" : "0",
8 "isOptional" : "0",
9 "dataType" : "Float16",
10 "formattedType" : "MultiArray (Float16 1 × 384 × 1 × 1500)",
11 "shortDescription" : "",
12 "shape" : "[1, 384, 1, 1500]",
13 "name" : "encoder_output_embeds",
14 "type" : "MultiArray"
15 }
16 ],
17 "modelParameters" : [
18
19 ],
20 "specificationVersion" : 7,
21 "mlProgramOperationTypeHistogram" : {
22 "Concat" : 28,
23 "Ios16.add" : 9,
24 "Ios16.mul" : 96,
25 "SliceByIndex" : 168,
26 "Transpose" : 4,
27 "Ios16.batchNorm" : 9,
28 "Ios16.einsum" : 192,
29 "Ios16.gelu" : 6,
30 "Ios16.softmax" : 96,
31 "Ios16.layerNorm" : 9,
32 "Ios16.conv" : 26
33 },
34 "computePrecision" : "Mixed (Float16, Int32)",
35 "isUpdatable" : "0",
36 "availability" : {
37 "macOS" : "13.0",
38 "tvOS" : "16.0",
39 "visionOS" : "1.0",
40 "watchOS" : "9.0",
41 "iOS" : "16.0",
42 "macCatalyst" : "16.0"
43 },
44 "modelType" : {
45 "name" : "MLModelType_mlProgram"
46 },
47 "userDefinedMetadata" : {
48 "com.github.apple.coremltools.source_dialect" : "TorchScript",
49 "com.github.apple.coremltools.version" : "8.0",
50 "com.github.apple.coremltools.source" : "torch==2.4.1"
51 },
52 "inputSchema" : [
53 {
54 "hasShapeFlexibility" : "0",
55 "isOptional" : "0",
56 "dataType" : "Float16",
57 "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
58 "shortDescription" : "",
59 "shape" : "[1, 80, 1, 3000]",
60 "name" : "melspectrogram_features",
61 "type" : "MultiArray"
62 }
63 ],
64 "generatedClassName" : "AudioEncoder",
65 "method" : "predict"
66 }
67 ]