openai_whisper-large-v3-v20240930/AudioEncoder.mlmodelc/metadata.json
1.7 KB · 65 lines · json Raw
1 [
2 {
3 "metadataOutputVersion" : "3.0",
4 "storagePrecision" : "Float16",
5 "outputSchema" : [
6 {
7 "hasShapeFlexibility" : "0",
8 "isOptional" : "0",
9 "dataType" : "Float16",
10 "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11 "shortDescription" : "",
12 "shape" : "[1, 1280, 1, 1500]",
13 "name" : "encoder_output_embeds",
14 "type" : "MultiArray"
15 }
16 ],
17 "modelParameters" : [
18
19 ],
20 "specificationVersion" : 7,
21 "mlProgramOperationTypeHistogram" : {
22 "Ios16.softmax" : 32,
23 "Ios16.add" : 65,
24 "Ios16.mul" : 32,
25 "Ios16.batchNorm" : 65,
26 "Ios16.gelu" : 34,
27 "Ios16.reshape" : 128,
28 "Ios16.matmul" : 64,
29 "Ios16.layerNorm" : 65,
30 "Ios16.conv" : 194
31 },
32 "computePrecision" : "Mixed (Float16, Int32)",
33 "isUpdatable" : "0",
34 "availability" : {
35 "macOS" : "13.0",
36 "tvOS" : "16.0",
37 "visionOS" : "1.0",
38 "watchOS" : "9.0",
39 "iOS" : "16.0",
40 "macCatalyst" : "16.0"
41 },
42 "modelType" : {
43 "name" : "MLModelType_mlProgram"
44 },
45 "userDefinedMetadata" : {
46 "com.github.apple.coremltools.source_dialect" : "TorchScript",
47 "com.github.apple.coremltools.version" : "8.0",
48 "com.github.apple.coremltools.source" : "torch==2.4.1"
49 },
50 "inputSchema" : [
51 {
52 "hasShapeFlexibility" : "0",
53 "isOptional" : "0",
54 "dataType" : "Float16",
55 "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
56 "shortDescription" : "",
57 "shape" : "[1, 128, 1, 3000]",
58 "name" : "melspectrogram_features",
59 "type" : "MultiArray"
60 }
61 ],
62 "generatedClassName" : "AudioEncoder",
63 "method" : "predict"
64 }
65 ]