1
- /*
2
- All bytes are in big endian order.
1
+ namespace Mackiloha . Chunk ;
3
2
4
- It looks like milo files were replaced with this. Max Block Size = 0x10000 (2^16)
5
-
6
- BYTES(4) - "CHNK"
7
- INT32 - Uknown - Always 255?
8
- INT32 - Block Count
9
- INT32 - Largest Block (Uncompressed)
10
- INT16 - Always 1
11
- INT16 - Always 2
12
- BlockDetails[Block Count]
13
-
14
- * ----Block Details----
15
- * =====================
16
- * INT32 - Size
17
- * INT32 - Decompressed Size
18
- * Bool? - If "01 00 00 00", then it's compressed.
19
- * INT32 - Offset
20
-
21
- Begin ZLib'd blocks!
22
- */
23
-
24
- namespace Mackiloha . Chunk ;
25
-
26
- // Successor to Milo container (Used in FME/RBVR)
3
+ // Successor to Milo container (Used in FME/DCS/RBVR)
27
4
public class Chunk
28
5
{
29
6
private const uint CHNK_MAGIC = 0x43484E4B ; // "CHNK"
7
+ private const uint IS_COMPRESSED = 0x01_00_00_00 ;
30
8
31
9
public Chunk ( )
32
10
{
33
11
Entries = new List < ChunkEntry > ( ) ;
34
12
}
35
13
36
- public void WriteToFile ( string outPath , bool noHeader = false )
14
+ public void WriteToFile ( string outPath , bool writeHeader = true )
37
15
{
38
- using ( FileStream fs = File . OpenWrite ( outPath ) )
39
- {
40
- WriteToStream ( fs , noHeader ) ;
41
- }
16
+ using var fs = File . OpenWrite ( outPath ) ;
17
+ WriteToStream ( fs , writeHeader ) ;
42
18
}
43
19
44
- public void WriteToStream ( Stream stream , bool noHeader )
20
+ public void WriteToStream ( Stream stream , bool writeHeader = true )
45
21
{
46
22
AwesomeWriter aw = new AwesomeWriter ( stream , true ) ;
47
23
48
- if ( ! noHeader )
24
+ if ( writeHeader )
49
25
{
26
+ int endianFlag = 0xFF ;
27
+ short extraShort = 2 ;
28
+
29
+ if ( IsDurango )
30
+ {
31
+ endianFlag = 0x1FF ;
32
+ extraShort = 5 ;
33
+ }
34
+
50
35
aw . Write ( CHNK_MAGIC ) ;
51
- aw . Write ( ( int ) 255 ) ;
36
+ aw . Write ( ( int ) endianFlag ) ;
37
+
38
+ aw . BigEndian = ! IsDurango ;
39
+
52
40
aw . Write ( Entries . Count ) ;
53
41
aw . Write ( Entries . Max ( x => x . Data . Length ) ) ;
54
42
aw . Write ( ( short ) 1 ) ;
55
- aw . Write ( ( short ) 2 ) ;
43
+ aw . Write ( ( short ) extraShort ) ;
56
44
57
- int currentIdx = 20 + ( Entries . Count << 2 ) ;
45
+ int currentIdx = 20 + ( Entries . Count * 16 ) ;
58
46
59
47
// Writes block details
60
48
foreach ( ChunkEntry entry in Entries )
61
49
{
62
50
aw . Write ( entry . Data . Length ) ;
63
51
aw . Write ( entry . Data . Length ) ;
64
52
65
- aw . Write ( ( int ) ( entry . Compressed ? 1 : 0 ) ) ;
66
- aw . Write ( currentIdx ) ;
53
+ if ( IsDurango )
54
+ {
55
+ aw . Write ( currentIdx ) ;
56
+ aw . Write ( ( int ) ( entry . Compressed ? IS_COMPRESSED : 0 ) ) ;
57
+ }
58
+ else
59
+ {
60
+ aw . Write ( ( int ) ( entry . Compressed ? IS_COMPRESSED : 0 ) ) ;
61
+ aw . Write ( currentIdx ) ;
62
+ }
67
63
68
64
currentIdx += entry . Data . Length ;
69
65
}
@@ -73,13 +69,15 @@ public void WriteToStream(Stream stream, bool noHeader)
73
69
Entries . ForEach ( x => aw . Write ( x . Data ) ) ;
74
70
}
75
71
76
- public static void DecompressChunkFile ( string inPath , string outPath , bool noHeader )
72
+ public static void DecompressChunkFile ( string inPath , string outPath , bool writeHeader = true )
77
73
{
78
- using ( FileStream fs = File . OpenRead ( inPath ) )
74
+ Chunk chunk ;
75
+ using ( var fs = File . OpenRead ( inPath ) )
79
76
{
80
- Chunk chunk = ReadFromStream ( fs ) ;
81
- chunk . WriteToFile ( outPath , noHeader ) ;
77
+ chunk = ReadFromStream ( fs ) ;
82
78
}
79
+
80
+ chunk . WriteToFile ( outPath , writeHeader ) ;
83
81
}
84
82
85
83
private static Chunk ReadFromStream ( Stream stream )
@@ -89,9 +87,15 @@ private static Chunk ReadFromStream(Stream stream)
89
87
90
88
if ( ar . ReadUInt32 ( ) != CHNK_MAGIC ) return chunk ;
91
89
92
- ar . BaseStream . Position += 4 ; // Always 255?
90
+ var flag = ar . ReadUInt32 ( ) ;
91
+ if ( ( flag & 0x100 ) != 0 )
92
+ {
93
+ chunk . IsDurango = true ;
94
+ ar . BigEndian = false ;
95
+ }
96
+
93
97
int blockCount = ar . ReadInt32 ( ) ;
94
- ar . BaseStream . Position += 8 ; // Skips 1, 2 (16-bits)
98
+ ar . BaseStream . Position += 8 ; // Skips 1, 2/5 (16-bits)
95
99
96
100
int [ ] blockSize = new int [ blockCount ] ;
97
101
bool [ ] compressed = new bool [ blockCount ] ; // Uncompressed by default
@@ -102,10 +106,17 @@ private static Chunk ReadFromStream(Stream stream)
102
106
blockSize [ i ] = ar . ReadInt32 ( ) ;
103
107
ar . BaseStream . Position += 4 ; // Decompressed size (Not needed)
104
108
105
- // Sets as compressed if it meets the requirement
106
- compressed [ i ] = ar . ReadInt32 ( ) == 0x1000000 ;
107
-
108
- ar . BaseStream . Position += 4 ; // Offset (Not needed)
109
+ // Fields are swapped depending on platform
110
+ if ( chunk . IsDurango )
111
+ {
112
+ ar . BaseStream . Position += 4 ; // Offset (Not needed)
113
+ compressed [ i ] = ar . ReadInt32 ( ) == IS_COMPRESSED ;
114
+ }
115
+ else
116
+ {
117
+ compressed [ i ] = ar . ReadInt32 ( ) == IS_COMPRESSED ;
118
+ ar . BaseStream . Position += 4 ; // Offset (Not needed)
119
+ }
109
120
}
110
121
111
122
for ( int i = 0 ; i < blockCount ; i ++ )
@@ -133,7 +144,8 @@ private static Chunk ReadFromStream(Stream stream)
133
144
return chunk ;
134
145
}
135
146
136
- public List < ChunkEntry > Entries ;
147
+ public bool IsDurango { get ; set ; }
148
+ public List < ChunkEntry > Entries { get ; set ; }
137
149
}
138
150
139
151
public class ChunkEntry
0 commit comments