openai_whisper-medium/AudioEncoder.mlmodelc/metadata.json
1.8 KB · 68 lines · json Raw
1 [
2 {
3 "metadataOutputVersion" : "3.0",
4 "storagePrecision" : "Float16",
5 "outputSchema" : [
6 {
7 "hasShapeFlexibility" : "0",
8 "isOptional" : "0",
9 "dataType" : "Float16",
10 "formattedType" : "MultiArray (Float16 1 × 1024 × 1 × 1500)",
11 "shortDescription" : "",
12 "shape" : "[1, 1024, 1, 1500]",
13 "name" : "encoder_output_embeds",
14 "type" : "MultiArray"
15 }
16 ],
17 "modelParameters" : [
18
19 ],
20 "specificationVersion" : 7,
21 "mlProgramOperationTypeHistogram" : {
22 "Ios16.softmax" : 24,
23 "Ios16.add" : 49,
24 "Ios16.mul" : 24,
25 "Ios16.batchNorm" : 49,
26 "Ios16.gelu" : 26,
27 "Ios16.reshape" : 96,
28 "Ios16.matmul" : 48,
29 "Ios16.layerNorm" : 49,
30 "Ios16.conv" : 146
31 },
32 "computePrecision" : "Mixed (Float16, Int32)",
33 "isUpdatable" : "0",
34 "stateSchema" : [
35
36 ],
37 "availability" : {
38 "macOS" : "13.0",
39 "tvOS" : "16.0",
40 "visionOS" : "1.0",
41 "watchOS" : "9.0",
42 "iOS" : "16.0",
43 "macCatalyst" : "16.0"
44 },
45 "modelType" : {
46 "name" : "MLModelType_mlProgram"
47 },
48 "userDefinedMetadata" : {
49 "com.github.apple.coremltools.version" : "8.2",
50 "com.github.apple.coremltools.source" : "torch==2.5.0",
51 "com.github.apple.coremltools.source_dialect" : "TorchScript"
52 },
53 "inputSchema" : [
54 {
55 "hasShapeFlexibility" : "0",
56 "isOptional" : "0",
57 "dataType" : "Float16",
58 "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
59 "shortDescription" : "",
60 "shape" : "[1, 80, 1, 3000]",
61 "name" : "melspectrogram_features",
62 "type" : "MultiArray"
63 }
64 ],
65 "generatedClassName" : "AudioEncoder",
66 "method" : "predict"
67 }
68 ]